1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RPC-IF core driver
4 *
5 * Copyright (C) 2018-2019 Renesas Solutions Corp.
6 * Copyright (C) 2019 Macronix International Co., Ltd.
7 * Copyright (C) 2019-2020 Cogent Embedded, Inc.
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/clk.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/of.h>
16 #include <linux/regmap.h>
17 #include <linux/reset.h>
18
19 #include <memory/renesas-rpc-if.h>
20
21 #include "renesas-rpc-if-regs.h"
22 #include "renesas-xspi-if-regs.h"
23
24 static const struct regmap_range rpcif_volatile_ranges[] = {
25 regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1),
26 regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1),
27 regmap_reg_range(RPCIF_CMNSR, RPCIF_CMNSR),
28 };
29
30 static const struct regmap_access_table rpcif_volatile_table = {
31 .yes_ranges = rpcif_volatile_ranges,
32 .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
33 };
34
35 static const struct regmap_range xspi_volatile_ranges[] = {
36 regmap_reg_range(XSPI_CDD0BUF0, XSPI_CDD0BUF0),
37 };
38
39 static const struct regmap_access_table xspi_volatile_table = {
40 .yes_ranges = xspi_volatile_ranges,
41 .n_yes_ranges = ARRAY_SIZE(xspi_volatile_ranges),
42 };
43
44 struct rpcif_priv;
45
46 struct rpcif_impl {
47 int (*hw_init)(struct rpcif_priv *rpc, bool hyperflash);
48 void (*prepare)(struct rpcif_priv *rpc, const struct rpcif_op *op,
49 u64 *offs, size_t *len);
50 int (*manual_xfer)(struct rpcif_priv *rpc);
51 size_t (*dirmap_read)(struct rpcif_priv *rpc, u64 offs, size_t len,
52 void *buf);
53 u32 status_reg;
54 u32 status_mask;
55 };
56
57 struct rpcif_info {
58 const struct regmap_config *regmap_config;
59 const struct rpcif_impl *impl;
60 enum rpcif_type type;
61 u8 strtim;
62 };
63
64 struct rpcif_priv {
65 struct device *dev;
66 void __iomem *base;
67 void __iomem *dirmap;
68 struct regmap *regmap;
69 struct reset_control *rstc;
70 struct clk *spi_clk;
71 struct clk *spix2_clk;
72 struct platform_device *vdev;
73 size_t size;
74 const struct rpcif_info *info;
75 enum rpcif_data_dir dir;
76 u8 bus_size;
77 u8 xfer_size;
78 u8 addr_nbytes; /* Specified for xSPI */
79 u32 proto; /* Specified for xSPI */
80 void *buffer;
81 u32 xferlen;
82 u32 smcr;
83 u32 smadr;
84 u32 command; /* DRCMR or SMCMR */
85 u32 option; /* DROPR or SMOPR */
86 u32 enable; /* DRENR or SMENR */
87 u32 dummy; /* DRDMCR or SMDMCR */
88 u32 ddr; /* DRDRENR or SMDRENR */
89 };
90
91 /*
92 * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
93 * proper width. Requires rpcif_priv.xfer_size to be correctly set before!
94 */
rpcif_reg_read(void * context,unsigned int reg,unsigned int * val)95 static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
96 {
97 struct rpcif_priv *rpc = context;
98
99 switch (reg) {
100 case RPCIF_SMRDR0:
101 case RPCIF_SMWDR0:
102 switch (rpc->xfer_size) {
103 case 1:
104 *val = readb(rpc->base + reg);
105 return 0;
106
107 case 2:
108 *val = readw(rpc->base + reg);
109 return 0;
110
111 case 4:
112 case 8:
113 *val = readl(rpc->base + reg);
114 return 0;
115
116 default:
117 return -EILSEQ;
118 }
119
120 case RPCIF_SMRDR1:
121 case RPCIF_SMWDR1:
122 if (rpc->xfer_size != 8)
123 return -EILSEQ;
124 break;
125 }
126
127 *val = readl(rpc->base + reg);
128 return 0;
129 }
130
rpcif_reg_write(void * context,unsigned int reg,unsigned int val)131 static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
132 {
133 struct rpcif_priv *rpc = context;
134
135 switch (reg) {
136 case RPCIF_SMWDR0:
137 switch (rpc->xfer_size) {
138 case 1:
139 writeb(val, rpc->base + reg);
140 return 0;
141
142 case 2:
143 writew(val, rpc->base + reg);
144 return 0;
145
146 case 4:
147 case 8:
148 writel(val, rpc->base + reg);
149 return 0;
150
151 default:
152 return -EILSEQ;
153 }
154
155 case RPCIF_SMWDR1:
156 if (rpc->xfer_size != 8)
157 return -EILSEQ;
158 break;
159
160 case RPCIF_SMRDR0:
161 case RPCIF_SMRDR1:
162 return -EPERM;
163 }
164
165 writel(val, rpc->base + reg);
166 return 0;
167 }
168
169 static const struct regmap_config rpcif_regmap_config = {
170 .reg_bits = 32,
171 .val_bits = 32,
172 .reg_stride = 4,
173 .reg_read = rpcif_reg_read,
174 .reg_write = rpcif_reg_write,
175 .fast_io = true,
176 .max_register = RPCIF_PHYINT,
177 .volatile_table = &rpcif_volatile_table,
178 };
179
xspi_reg_read(void * context,unsigned int reg,unsigned int * val)180 static int xspi_reg_read(void *context, unsigned int reg, unsigned int *val)
181 {
182 struct rpcif_priv *xspi = context;
183
184 *val = readl(xspi->base + reg);
185 return 0;
186 }
187
xspi_reg_write(void * context,unsigned int reg,unsigned int val)188 static int xspi_reg_write(void *context, unsigned int reg, unsigned int val)
189 {
190 struct rpcif_priv *xspi = context;
191
192 writel(val, xspi->base + reg);
193 return 0;
194 }
195
196 static const struct regmap_config xspi_regmap_config = {
197 .reg_bits = 32,
198 .val_bits = 32,
199 .reg_stride = 4,
200 .reg_read = xspi_reg_read,
201 .reg_write = xspi_reg_write,
202 .fast_io = true,
203 .max_register = XSPI_INTE,
204 .volatile_table = &xspi_volatile_table,
205 };
206
rpcif_sw_init(struct rpcif * rpcif,struct device * dev)207 int rpcif_sw_init(struct rpcif *rpcif, struct device *dev)
208 {
209 struct rpcif_priv *rpc = dev_get_drvdata(dev);
210
211 rpcif->dev = dev;
212 rpcif->dirmap = rpc->dirmap;
213 rpcif->size = rpc->size;
214 rpcif->xspi = rpc->info->type == XSPI_RZ_G3E;
215 return 0;
216 }
217 EXPORT_SYMBOL(rpcif_sw_init);
218
rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv * rpc)219 static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc)
220 {
221 regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
222 regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
223 regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
224 regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022);
225 regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
226 regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024);
227 regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3),
228 RPCIF_PHYCNT_CKSEL(3));
229 regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030);
230 regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
231 }
232
rpcif_hw_init_impl(struct rpcif_priv * rpc,bool hyperflash)233 static int rpcif_hw_init_impl(struct rpcif_priv *rpc, bool hyperflash)
234 {
235 u32 dummy;
236 int ret;
237
238 if (rpc->info->type == RPCIF_RZ_G2L) {
239 ret = reset_control_reset(rpc->rstc);
240 if (ret)
241 return ret;
242 usleep_range(200, 300);
243 rpcif_rzg2l_timing_adjust_sdr(rpc);
244 }
245
246 regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK,
247 RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0));
248
249 /* DMA Transfer is not supported */
250 regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_HS, 0);
251
252 regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
253 /* create mask with all affected bits set */
254 RPCIF_PHYCNT_STRTIM(BIT(fls(rpc->info->strtim)) - 1),
255 RPCIF_PHYCNT_STRTIM(rpc->info->strtim));
256
257 regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3),
258 RPCIF_PHYOFFSET1_DDRTMG(3));
259 regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7),
260 RPCIF_PHYOFFSET2_OCTTMG(4));
261
262 if (hyperflash)
263 regmap_update_bits(rpc->regmap, RPCIF_PHYINT,
264 RPCIF_PHYINT_WPVAL, 0);
265
266 if (rpc->info->type == RPCIF_RZ_G2L)
267 regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
268 RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) |
269 RPCIF_CMNCR_BSZ(3),
270 RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(3) |
271 RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
272 else
273 regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
274 RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3),
275 RPCIF_CMNCR_MOIIO(3) |
276 RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
277
278 /* Set RCF after BSZ update */
279 regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
280 /* Dummy read according to spec */
281 regmap_read(rpc->regmap, RPCIF_DRCR, &dummy);
282 regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) |
283 RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7));
284
285 rpc->bus_size = hyperflash ? 2 : 1;
286
287 return 0;
288 }
289
xspi_hw_init_impl(struct rpcif_priv * xspi,bool hyperflash)290 static int xspi_hw_init_impl(struct rpcif_priv *xspi, bool hyperflash)
291 {
292 int ret;
293
294 ret = reset_control_reset(xspi->rstc);
295 if (ret)
296 return ret;
297
298 regmap_write(xspi->regmap, XSPI_WRAPCFG, 0x0);
299
300 regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0,
301 XSPI_LIOCFG_PRTMD(0x3ff) | XSPI_LIOCFG_CSMIN(0xf) |
302 XSPI_LIOCFG_CSASTEX | XSPI_LIOCFG_CSNEGEX,
303 XSPI_LIOCFG_PRTMD(0) | XSPI_LIOCFG_CSMIN(0) |
304 XSPI_LIOCFG_CSASTEX | XSPI_LIOCFG_CSNEGEX);
305
306 regmap_update_bits(xspi->regmap, XSPI_CCCTL0CS0, XSPI_CCCTL0_CAEN, 0);
307
308 regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
309 XSPI_CDCTL0_TRREQ | XSPI_CDCTL0_CSSEL, 0);
310
311 regmap_update_bits(xspi->regmap, XSPI_INTE, XSPI_INTE_CMDCMPE,
312 XSPI_INTE_CMDCMPE);
313
314 return 0;
315 }
316
rpcif_hw_init(struct device * dev,bool hyperflash)317 int rpcif_hw_init(struct device *dev, bool hyperflash)
318 {
319 struct rpcif_priv *rpc = dev_get_drvdata(dev);
320 int ret;
321
322 ret = pm_runtime_resume_and_get(dev);
323 if (ret)
324 return ret;
325
326 ret = rpc->info->impl->hw_init(rpc, hyperflash);
327
328 pm_runtime_put(dev);
329
330 return ret;
331 }
332 EXPORT_SYMBOL(rpcif_hw_init);
333
wait_msg_xfer_end(struct rpcif_priv * rpc)334 static int wait_msg_xfer_end(struct rpcif_priv *rpc)
335 {
336 u32 sts;
337
338 return regmap_read_poll_timeout(rpc->regmap, rpc->info->impl->status_reg,
339 sts, sts & rpc->info->impl->status_mask,
340 0, USEC_PER_SEC);
341 }
342
rpcif_bits_set(struct rpcif_priv * rpc,u32 nbytes)343 static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes)
344 {
345 if (rpc->bus_size == 2)
346 nbytes /= 2;
347 nbytes = clamp(nbytes, 1U, 4U);
348 return GENMASK(3, 4 - nbytes);
349 }
350
rpcif_bit_size(u8 buswidth)351 static u8 rpcif_bit_size(u8 buswidth)
352 {
353 return buswidth > 4 ? 2 : ilog2(buswidth);
354 }
355
rpcif_prepare_impl(struct rpcif_priv * rpc,const struct rpcif_op * op,u64 * offs,size_t * len)356 static void rpcif_prepare_impl(struct rpcif_priv *rpc, const struct rpcif_op *op,
357 u64 *offs, size_t *len)
358 {
359 rpc->smcr = 0;
360 rpc->smadr = 0;
361 rpc->enable = 0;
362 rpc->command = 0;
363 rpc->option = 0;
364 rpc->dummy = 0;
365 rpc->ddr = 0;
366 rpc->xferlen = 0;
367
368 if (op->cmd.buswidth) {
369 rpc->enable = RPCIF_SMENR_CDE |
370 RPCIF_SMENR_CDB(rpcif_bit_size(op->cmd.buswidth));
371 rpc->command = RPCIF_SMCMR_CMD(op->cmd.opcode);
372 if (op->cmd.ddr)
373 rpc->ddr = RPCIF_SMDRENR_HYPE(0x5);
374 }
375 if (op->ocmd.buswidth) {
376 rpc->enable |= RPCIF_SMENR_OCDE |
377 RPCIF_SMENR_OCDB(rpcif_bit_size(op->ocmd.buswidth));
378 rpc->command |= RPCIF_SMCMR_OCMD(op->ocmd.opcode);
379 }
380
381 if (op->addr.buswidth) {
382 rpc->enable |=
383 RPCIF_SMENR_ADB(rpcif_bit_size(op->addr.buswidth));
384 if (op->addr.nbytes == 4)
385 rpc->enable |= RPCIF_SMENR_ADE(0xF);
386 else
387 rpc->enable |= RPCIF_SMENR_ADE(GENMASK(
388 2, 3 - op->addr.nbytes));
389 if (op->addr.ddr)
390 rpc->ddr |= RPCIF_SMDRENR_ADDRE;
391
392 if (offs && len)
393 rpc->smadr = *offs;
394 else
395 rpc->smadr = op->addr.val;
396 }
397
398 if (op->dummy.buswidth) {
399 rpc->enable |= RPCIF_SMENR_DME;
400 rpc->dummy = RPCIF_SMDMCR_DMCYC(op->dummy.ncycles);
401 }
402
403 if (op->option.buswidth) {
404 rpc->enable |= RPCIF_SMENR_OPDE(
405 rpcif_bits_set(rpc, op->option.nbytes)) |
406 RPCIF_SMENR_OPDB(rpcif_bit_size(op->option.buswidth));
407 if (op->option.ddr)
408 rpc->ddr |= RPCIF_SMDRENR_OPDRE;
409 rpc->option = op->option.val;
410 }
411
412 rpc->dir = op->data.dir;
413 if (op->data.buswidth) {
414 u32 nbytes;
415
416 rpc->buffer = op->data.buf.in;
417 switch (op->data.dir) {
418 case RPCIF_DATA_IN:
419 rpc->smcr = RPCIF_SMCR_SPIRE;
420 break;
421 case RPCIF_DATA_OUT:
422 rpc->smcr = RPCIF_SMCR_SPIWE;
423 break;
424 default:
425 break;
426 }
427 if (op->data.ddr)
428 rpc->ddr |= RPCIF_SMDRENR_SPIDRE;
429
430 if (offs && len)
431 nbytes = *len;
432 else
433 nbytes = op->data.nbytes;
434 rpc->xferlen = nbytes;
435
436 rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
437 }
438 }
439
xspi_prepare_impl(struct rpcif_priv * xspi,const struct rpcif_op * op,u64 * offs,size_t * len)440 static void xspi_prepare_impl(struct rpcif_priv *xspi, const struct rpcif_op *op,
441 u64 *offs, size_t *len)
442 {
443 xspi->smadr = 0;
444 xspi->addr_nbytes = 0;
445 xspi->command = 0;
446 xspi->option = 0;
447 xspi->dummy = 0;
448 xspi->xferlen = 0;
449 xspi->proto = 0;
450
451 if (op->cmd.buswidth)
452 xspi->command = op->cmd.opcode;
453
454 if (op->ocmd.buswidth)
455 xspi->command = (xspi->command << 8) | op->ocmd.opcode;
456
457 if (op->addr.buswidth) {
458 xspi->addr_nbytes = op->addr.nbytes;
459 if (offs && len)
460 xspi->smadr = *offs;
461 else
462 xspi->smadr = op->addr.val;
463 }
464
465 if (op->dummy.buswidth)
466 xspi->dummy = op->dummy.ncycles;
467
468 xspi->dir = op->data.dir;
469 if (op->data.buswidth) {
470 u32 nbytes;
471
472 xspi->buffer = op->data.buf.in;
473
474 if (offs && len)
475 nbytes = *len;
476 else
477 nbytes = op->data.nbytes;
478 xspi->xferlen = nbytes;
479 }
480
481 if (op->cmd.buswidth == 1) {
482 if (op->addr.buswidth == 2 || op->data.buswidth == 2)
483 xspi->proto = PROTO_1S_2S_2S;
484 else if (op->addr.buswidth == 4 || op->data.buswidth == 4)
485 xspi->proto = PROTO_1S_4S_4S;
486 } else if (op->cmd.buswidth == 2 &&
487 (op->addr.buswidth == 2 || op->data.buswidth == 2)) {
488 xspi->proto = PROTO_2S_2S_2S;
489 } else if (op->cmd.buswidth == 4 &&
490 (op->addr.buswidth == 4 || op->data.buswidth == 4)) {
491 xspi->proto = PROTO_4S_4S_4S;
492 }
493 }
494
rpcif_prepare(struct device * dev,const struct rpcif_op * op,u64 * offs,size_t * len)495 void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
496 size_t *len)
497 {
498 struct rpcif_priv *rpc = dev_get_drvdata(dev);
499
500 rpc->info->impl->prepare(rpc, op, offs, len);
501 }
502 EXPORT_SYMBOL(rpcif_prepare);
503
rpcif_manual_xfer_impl(struct rpcif_priv * rpc)504 static int rpcif_manual_xfer_impl(struct rpcif_priv *rpc)
505 {
506 u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
507 int ret = 0;
508
509 regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
510 RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL);
511 regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
512 RPCIF_CMNCR_MD, RPCIF_CMNCR_MD);
513 regmap_write(rpc->regmap, RPCIF_SMCMR, rpc->command);
514 regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option);
515 regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy);
516 regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr);
517 regmap_write(rpc->regmap, RPCIF_SMADR, rpc->smadr);
518 smenr = rpc->enable;
519
520 switch (rpc->dir) {
521 case RPCIF_DATA_OUT:
522 while (pos < rpc->xferlen) {
523 u32 bytes_left = rpc->xferlen - pos;
524 u32 nbytes, data[2], *p = data;
525
526 smcr = rpc->smcr | RPCIF_SMCR_SPIE;
527
528 /* nbytes may only be 1, 2, 4, or 8 */
529 nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
530 if (bytes_left > nbytes)
531 smcr |= RPCIF_SMCR_SSLKP;
532
533 smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
534 regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
535 rpc->xfer_size = nbytes;
536
537 memcpy(data, rpc->buffer + pos, nbytes);
538 if (nbytes == 8)
539 regmap_write(rpc->regmap, RPCIF_SMWDR1, *p++);
540 regmap_write(rpc->regmap, RPCIF_SMWDR0, *p);
541
542 regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
543 ret = wait_msg_xfer_end(rpc);
544 if (ret)
545 goto err_out;
546
547 pos += nbytes;
548 smenr = rpc->enable &
549 ~RPCIF_SMENR_CDE & ~RPCIF_SMENR_ADE(0xF);
550 }
551 break;
552 case RPCIF_DATA_IN:
553 /*
554 * RPC-IF spoils the data for the commands without an address
555 * phase (like RDID) in the manual mode, so we'll have to work
556 * around this issue by using the external address space read
557 * mode instead.
558 */
559 if (!(smenr & RPCIF_SMENR_ADE(0xF)) && rpc->dirmap) {
560 u32 dummy;
561
562 regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
563 RPCIF_CMNCR_MD, 0);
564 regmap_write(rpc->regmap, RPCIF_DRCR,
565 RPCIF_DRCR_RBURST(32) | RPCIF_DRCR_RBE);
566 regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command);
567 regmap_write(rpc->regmap, RPCIF_DREAR,
568 RPCIF_DREAR_EAC(1));
569 regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option);
570 regmap_write(rpc->regmap, RPCIF_DRENR,
571 smenr & ~RPCIF_SMENR_SPIDE(0xF));
572 regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
573 regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
574 memcpy_fromio(rpc->buffer, rpc->dirmap, rpc->xferlen);
575 regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
576 /* Dummy read according to spec */
577 regmap_read(rpc->regmap, RPCIF_DRCR, &dummy);
578 break;
579 }
580 while (pos < rpc->xferlen) {
581 u32 bytes_left = rpc->xferlen - pos;
582 u32 nbytes, data[2], *p = data;
583
584 /* nbytes may only be 1, 2, 4, or 8 */
585 nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
586
587 regmap_write(rpc->regmap, RPCIF_SMADR,
588 rpc->smadr + pos);
589 smenr &= ~RPCIF_SMENR_SPIDE(0xF);
590 smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
591 regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
592 regmap_write(rpc->regmap, RPCIF_SMCR,
593 rpc->smcr | RPCIF_SMCR_SPIE);
594 rpc->xfer_size = nbytes;
595 ret = wait_msg_xfer_end(rpc);
596 if (ret)
597 goto err_out;
598
599 if (nbytes == 8)
600 regmap_read(rpc->regmap, RPCIF_SMRDR1, p++);
601 regmap_read(rpc->regmap, RPCIF_SMRDR0, p);
602 memcpy(rpc->buffer + pos, data, nbytes);
603
604 pos += nbytes;
605 }
606 break;
607 default:
608 regmap_write(rpc->regmap, RPCIF_SMENR, rpc->enable);
609 regmap_write(rpc->regmap, RPCIF_SMCR,
610 rpc->smcr | RPCIF_SMCR_SPIE);
611 ret = wait_msg_xfer_end(rpc);
612 if (ret)
613 goto err_out;
614 }
615
616 return ret;
617
618 err_out:
619 if (reset_control_reset(rpc->rstc))
620 dev_err(rpc->dev, "Failed to reset HW\n");
621 rpcif_hw_init_impl(rpc, rpc->bus_size == 2);
622 return ret;
623 }
624
xspi_manual_xfer_impl(struct rpcif_priv * xspi)625 static int xspi_manual_xfer_impl(struct rpcif_priv *xspi)
626 {
627 u32 pos = 0, max = 8;
628 int ret = 0;
629
630 regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRNUM(0x3),
631 XSPI_CDCTL0_TRNUM(0));
632
633 regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRREQ, 0);
634
635 regmap_write(xspi->regmap, XSPI_CDTBUF0,
636 XSPI_CDTBUF_CMDSIZE(0x1) | XSPI_CDTBUF_CMD_FIELD(xspi->command));
637
638 regmap_write(xspi->regmap, XSPI_CDABUF0, 0);
639
640 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, XSPI_CDTBUF_ADDSIZE(0x7),
641 XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes));
642
643 regmap_write(xspi->regmap, XSPI_CDABUF0, xspi->smadr);
644
645 regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff),
646 XSPI_LIOCFG_PRTMD(xspi->proto));
647
648 switch (xspi->dir) {
649 case RPCIF_DATA_OUT:
650 while (pos < xspi->xferlen) {
651 u32 bytes_left = xspi->xferlen - pos;
652 u32 nbytes, data[2], *p = data;
653
654 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
655 XSPI_CDTBUF_TRTYPE, XSPI_CDTBUF_TRTYPE);
656
657 nbytes = bytes_left >= max ? max : bytes_left;
658
659 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
660 XSPI_CDTBUF_DATASIZE(0xf),
661 XSPI_CDTBUF_DATASIZE(nbytes));
662
663 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
664 XSPI_CDTBUF_ADDSIZE(0x7),
665 XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes));
666
667 memcpy(data, xspi->buffer + pos, nbytes);
668
669 if (nbytes > 4) {
670 regmap_write(xspi->regmap, XSPI_CDD0BUF0, *p++);
671 regmap_write(xspi->regmap, XSPI_CDD1BUF0, *p);
672 } else {
673 regmap_write(xspi->regmap, XSPI_CDD0BUF0, *p);
674 }
675
676 regmap_write(xspi->regmap, XSPI_CDABUF0, xspi->smadr + pos);
677
678 regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
679 XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ);
680
681 ret = wait_msg_xfer_end(xspi);
682 if (ret)
683 goto err_out;
684
685 regmap_update_bits(xspi->regmap, XSPI_INTC,
686 XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC);
687
688 pos += nbytes;
689 }
690 regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRREQ, 0);
691 break;
692 case RPCIF_DATA_IN:
693 while (pos < xspi->xferlen) {
694 u32 bytes_left = xspi->xferlen - pos;
695 u32 nbytes, data[2], *p = data;
696
697 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
698 XSPI_CDTBUF_TRTYPE,
699 ~(u32)XSPI_CDTBUF_TRTYPE);
700
701 /* nbytes can be up to 8 bytes */
702 nbytes = bytes_left >= max ? max : bytes_left;
703
704 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
705 XSPI_CDTBUF_DATASIZE(0xf),
706 XSPI_CDTBUF_DATASIZE(nbytes));
707
708 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
709 XSPI_CDTBUF_ADDSIZE(0x7),
710 XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes));
711
712 if (xspi->addr_nbytes)
713 regmap_write(xspi->regmap, XSPI_CDABUF0,
714 xspi->smadr + pos);
715
716 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
717 XSPI_CDTBUF_LATE(0x1f),
718 XSPI_CDTBUF_LATE(xspi->dummy));
719
720 regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
721 XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ);
722
723 ret = wait_msg_xfer_end(xspi);
724 if (ret)
725 goto err_out;
726
727 if (nbytes > 4) {
728 regmap_read(xspi->regmap, XSPI_CDD0BUF0, p++);
729 regmap_read(xspi->regmap, XSPI_CDD1BUF0, p);
730 } else {
731 regmap_read(xspi->regmap, XSPI_CDD0BUF0, p);
732 }
733
734 memcpy(xspi->buffer + pos, data, nbytes);
735
736 regmap_update_bits(xspi->regmap, XSPI_INTC,
737 XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC);
738
739 pos += nbytes;
740 }
741 regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
742 XSPI_CDCTL0_TRREQ, 0);
743 break;
744 default:
745 regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
746 XSPI_CDTBUF_TRTYPE, XSPI_CDTBUF_TRTYPE);
747 regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
748 XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ);
749
750 ret = wait_msg_xfer_end(xspi);
751 if (ret)
752 goto err_out;
753
754 regmap_update_bits(xspi->regmap, XSPI_INTC,
755 XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC);
756 }
757
758 return ret;
759
760 err_out:
761 xspi_hw_init_impl(xspi, false);
762 return ret;
763 }
764
rpcif_manual_xfer(struct device * dev)765 int rpcif_manual_xfer(struct device *dev)
766 {
767 struct rpcif_priv *rpc = dev_get_drvdata(dev);
768 int ret;
769
770 ret = pm_runtime_resume_and_get(dev);
771 if (ret)
772 return ret;
773
774 ret = rpc->info->impl->manual_xfer(rpc);
775
776 pm_runtime_put(dev);
777
778 return ret;
779 }
780 EXPORT_SYMBOL(rpcif_manual_xfer);
781
memcpy_fromio_readw(void * to,const void __iomem * from,size_t count)782 static void memcpy_fromio_readw(void *to,
783 const void __iomem *from,
784 size_t count)
785 {
786 const int maxw = (IS_ENABLED(CONFIG_64BIT)) ? 8 : 4;
787 u8 buf[2];
788
789 if (count && ((unsigned long)from & 1)) {
790 *(u16 *)buf = __raw_readw((void __iomem *)((unsigned long)from & ~1));
791 *(u8 *)to = buf[1];
792 from++;
793 to++;
794 count--;
795 }
796 while (count >= 2 && !IS_ALIGNED((unsigned long)from, maxw)) {
797 *(u16 *)to = __raw_readw(from);
798 from += 2;
799 to += 2;
800 count -= 2;
801 }
802 while (count >= maxw) {
803 #ifdef CONFIG_64BIT
804 *(u64 *)to = __raw_readq(from);
805 #else
806 *(u32 *)to = __raw_readl(from);
807 #endif
808 from += maxw;
809 to += maxw;
810 count -= maxw;
811 }
812 while (count >= 2) {
813 *(u16 *)to = __raw_readw(from);
814 from += 2;
815 to += 2;
816 count -= 2;
817 }
818 if (count) {
819 *(u16 *)buf = __raw_readw(from);
820 *(u8 *)to = buf[0];
821 }
822 }
823
rpcif_dirmap_read_impl(struct rpcif_priv * rpc,u64 offs,size_t len,void * buf)824 static size_t rpcif_dirmap_read_impl(struct rpcif_priv *rpc, u64 offs,
825 size_t len, void *buf)
826 {
827 loff_t from = offs & (rpc->size - 1);
828 size_t size = rpc->size - from;
829
830 if (len > size)
831 len = size;
832
833 regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0);
834 regmap_write(rpc->regmap, RPCIF_DRCR, 0);
835 regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command);
836 regmap_write(rpc->regmap, RPCIF_DREAR,
837 RPCIF_DREAR_EAV(offs >> 25) | RPCIF_DREAR_EAC(1));
838 regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option);
839 regmap_write(rpc->regmap, RPCIF_DRENR,
840 rpc->enable & ~RPCIF_SMENR_SPIDE(0xF));
841 regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
842 regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
843
844 if (rpc->bus_size == 2)
845 memcpy_fromio_readw(buf, rpc->dirmap + from, len);
846 else
847 memcpy_fromio(buf, rpc->dirmap + from, len);
848
849 return len;
850 }
851
xspi_dirmap_read_impl(struct rpcif_priv * xspi,u64 offs,size_t len,void * buf)852 static size_t xspi_dirmap_read_impl(struct rpcif_priv *xspi, u64 offs,
853 size_t len, void *buf)
854 {
855 loff_t from = offs & (xspi->size - 1);
856 size_t size = xspi->size - from;
857 u8 addsize = xspi->addr_nbytes - 1;
858
859 if (len > size)
860 len = size;
861
862 regmap_update_bits(xspi->regmap, XSPI_CMCFG0CS0,
863 XSPI_CMCFG0_FFMT(0x3) | XSPI_CMCFG0_ADDSIZE(0x3),
864 XSPI_CMCFG0_FFMT(0) | XSPI_CMCFG0_ADDSIZE(addsize));
865
866 regmap_update_bits(xspi->regmap, XSPI_CMCFG1CS0,
867 XSPI_CMCFG1_RDCMD(0xffff) | XSPI_CMCFG1_RDLATE(0x1f),
868 XSPI_CMCFG1_RDCMD_UPPER_BYTE(xspi->command) |
869 XSPI_CMCFG1_RDLATE(xspi->dummy));
870
871 regmap_update_bits(xspi->regmap, XSPI_BMCTL0, XSPI_BMCTL0_CS0ACC(0xff),
872 XSPI_BMCTL0_CS0ACC(0x01));
873
874 regmap_update_bits(xspi->regmap, XSPI_BMCFG,
875 XSPI_BMCFG_WRMD | XSPI_BMCFG_MWRCOMB |
876 XSPI_BMCFG_MWRSIZE(0xff) | XSPI_BMCFG_PREEN,
877 0 | XSPI_BMCFG_MWRCOMB | XSPI_BMCFG_MWRSIZE(0x0f) |
878 XSPI_BMCFG_PREEN);
879
880 regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff),
881 XSPI_LIOCFG_PRTMD(xspi->proto));
882
883 memcpy_fromio(buf, xspi->dirmap + from, len);
884
885 return len;
886 }
887
rpcif_dirmap_read(struct device * dev,u64 offs,size_t len,void * buf)888 ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf)
889 {
890 struct rpcif_priv *rpc = dev_get_drvdata(dev);
891 size_t read;
892 int ret;
893
894 ret = pm_runtime_resume_and_get(dev);
895 if (ret)
896 return ret;
897
898 read = rpc->info->impl->dirmap_read(rpc, offs, len, buf);
899
900 pm_runtime_put(dev);
901
902 return read;
903 }
904 EXPORT_SYMBOL(rpcif_dirmap_read);
905
906 /**
907 * xspi_dirmap_write - Write data to xspi memory.
908 * @dev: xspi device
909 * @offs: offset
910 * @len: Number of bytes to be written.
911 * @buf: Buffer holding write data.
912 *
913 * This function writes data into xspi memory.
914 *
915 * Returns number of bytes written on success, else negative errno.
916 */
xspi_dirmap_write(struct device * dev,u64 offs,size_t len,const void * buf)917 ssize_t xspi_dirmap_write(struct device *dev, u64 offs, size_t len, const void *buf)
918 {
919 struct rpcif_priv *xspi = dev_get_drvdata(dev);
920 loff_t from = offs & (xspi->size - 1);
921 u8 addsize = xspi->addr_nbytes - 1;
922 size_t size = xspi->size - from;
923 ssize_t writebytes;
924 int ret;
925
926 ret = pm_runtime_resume_and_get(dev);
927 if (ret)
928 return ret;
929
930 if (len > size)
931 len = size;
932
933 if (len > MWRSIZE_MAX)
934 writebytes = MWRSIZE_MAX;
935 else
936 writebytes = len;
937
938 regmap_update_bits(xspi->regmap, XSPI_CMCFG0CS0,
939 XSPI_CMCFG0_FFMT(0x3) | XSPI_CMCFG0_ADDSIZE(0x3),
940 XSPI_CMCFG0_FFMT(0) | XSPI_CMCFG0_ADDSIZE(addsize));
941
942 regmap_update_bits(xspi->regmap, XSPI_CMCFG2CS0,
943 XSPI_CMCFG2_WRCMD_UPPER(0xff) | XSPI_CMCFG2_WRLATE(0x1f),
944 XSPI_CMCFG2_WRCMD_UPPER(xspi->command) |
945 XSPI_CMCFG2_WRLATE(xspi->dummy));
946
947 regmap_update_bits(xspi->regmap, XSPI_BMCTL0,
948 XSPI_BMCTL0_CS0ACC(0xff), XSPI_BMCTL0_CS0ACC(0x03));
949
950 regmap_update_bits(xspi->regmap, XSPI_BMCFG,
951 XSPI_BMCFG_WRMD | XSPI_BMCFG_MWRCOMB |
952 XSPI_BMCFG_MWRSIZE(0xff) | XSPI_BMCFG_PREEN,
953 0 | XSPI_BMCFG_MWRCOMB | XSPI_BMCFG_MWRSIZE(0x0f) |
954 XSPI_BMCFG_PREEN);
955
956 regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff),
957 XSPI_LIOCFG_PRTMD(xspi->proto));
958
959 memcpy_toio(xspi->dirmap + from, buf, writebytes);
960
961 /* Request to push the pending data */
962 if (writebytes < MWRSIZE_MAX)
963 regmap_update_bits(xspi->regmap, XSPI_BMCTL1,
964 XSPI_BMCTL1_MWRPUSH, XSPI_BMCTL1_MWRPUSH);
965
966 pm_runtime_put(dev);
967
968 return writebytes;
969 }
970 EXPORT_SYMBOL_GPL(xspi_dirmap_write);
971
rpcif_probe(struct platform_device * pdev)972 static int rpcif_probe(struct platform_device *pdev)
973 {
974 struct device *dev = &pdev->dev;
975 struct platform_device *vdev;
976 struct device_node *flash;
977 struct rpcif_priv *rpc;
978 struct resource *res;
979 const char *name;
980 int ret;
981
982 flash = of_get_next_child(dev->of_node, NULL);
983 if (!flash) {
984 dev_warn(dev, "no flash node found\n");
985 return -ENODEV;
986 }
987
988 if (of_device_is_compatible(flash, "jedec,spi-nor")) {
989 name = "rpc-if-spi";
990 } else if (of_device_is_compatible(flash, "cfi-flash")) {
991 name = "rpc-if-hyperflash";
992 } else {
993 of_node_put(flash);
994 dev_warn(dev, "unknown flash type\n");
995 return -ENODEV;
996 }
997 of_node_put(flash);
998
999 rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL);
1000 if (!rpc)
1001 return -ENOMEM;
1002
1003 rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
1004 if (IS_ERR(rpc->base))
1005 return PTR_ERR(rpc->base);
1006 rpc->info = of_device_get_match_data(dev);
1007 rpc->regmap = devm_regmap_init(dev, NULL, rpc, rpc->info->regmap_config);
1008 if (IS_ERR(rpc->regmap)) {
1009 dev_err(dev, "failed to init regmap for rpcif, error %ld\n",
1010 PTR_ERR(rpc->regmap));
1011 return PTR_ERR(rpc->regmap);
1012 }
1013
1014 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
1015 rpc->dirmap = devm_ioremap_resource(dev, res);
1016 if (IS_ERR(rpc->dirmap))
1017 return PTR_ERR(rpc->dirmap);
1018
1019 rpc->size = resource_size(res);
1020 rpc->rstc = devm_reset_control_array_get_exclusive(dev);
1021 if (IS_ERR(rpc->rstc))
1022 return PTR_ERR(rpc->rstc);
1023
1024 /*
1025 * The enabling/disabling of spi/spix2 clocks at runtime leading to
1026 * flash write failure. So, enable these clocks during probe() and
1027 * disable it in remove().
1028 */
1029 rpc->spix2_clk = devm_clk_get_optional_enabled(dev, "spix2");
1030 if (IS_ERR(rpc->spix2_clk))
1031 return dev_err_probe(dev, PTR_ERR(rpc->spix2_clk),
1032 "cannot get enabled spix2 clk\n");
1033
1034 rpc->spi_clk = devm_clk_get_optional_enabled(dev, "spi");
1035 if (IS_ERR(rpc->spi_clk))
1036 return dev_err_probe(dev, PTR_ERR(rpc->spi_clk),
1037 "cannot get enabled spi clk\n");
1038
1039 vdev = platform_device_alloc(name, pdev->id);
1040 if (!vdev)
1041 return -ENOMEM;
1042 vdev->dev.parent = dev;
1043
1044 rpc->dev = dev;
1045 rpc->vdev = vdev;
1046 platform_set_drvdata(pdev, rpc);
1047
1048 ret = platform_device_add(vdev);
1049 if (ret) {
1050 platform_device_put(vdev);
1051 return ret;
1052 }
1053
1054 return 0;
1055 }
1056
rpcif_remove(struct platform_device * pdev)1057 static void rpcif_remove(struct platform_device *pdev)
1058 {
1059 struct rpcif_priv *rpc = platform_get_drvdata(pdev);
1060
1061 platform_device_unregister(rpc->vdev);
1062 }
1063
rpcif_suspend(struct device * dev)1064 static int rpcif_suspend(struct device *dev)
1065 {
1066 struct rpcif_priv *rpc = dev_get_drvdata(dev);
1067
1068 clk_disable_unprepare(rpc->spi_clk);
1069 clk_disable_unprepare(rpc->spix2_clk);
1070
1071 return 0;
1072 }
1073
rpcif_resume(struct device * dev)1074 static int rpcif_resume(struct device *dev)
1075 {
1076 struct rpcif_priv *rpc = dev_get_drvdata(dev);
1077 int ret;
1078
1079 ret = clk_prepare_enable(rpc->spix2_clk);
1080 if (ret) {
1081 dev_err(dev, "failed to enable spix2 clock: %pe\n", ERR_PTR(ret));
1082 return ret;
1083 }
1084
1085 ret = clk_prepare_enable(rpc->spi_clk);
1086 if (ret) {
1087 clk_disable_unprepare(rpc->spix2_clk);
1088 dev_err(dev, "failed to enable spi clock: %pe\n", ERR_PTR(ret));
1089 return ret;
1090 }
1091
1092 return 0;
1093 }
1094
1095 static const struct rpcif_impl rpcif_impl = {
1096 .hw_init = rpcif_hw_init_impl,
1097 .prepare = rpcif_prepare_impl,
1098 .manual_xfer = rpcif_manual_xfer_impl,
1099 .dirmap_read = rpcif_dirmap_read_impl,
1100 .status_reg = RPCIF_CMNSR,
1101 .status_mask = RPCIF_CMNSR_TEND,
1102 };
1103
1104 static const struct rpcif_impl xspi_impl = {
1105 .hw_init = xspi_hw_init_impl,
1106 .prepare = xspi_prepare_impl,
1107 .manual_xfer = xspi_manual_xfer_impl,
1108 .dirmap_read = xspi_dirmap_read_impl,
1109 .status_reg = XSPI_INTS,
1110 .status_mask = XSPI_INTS_CMDCMP,
1111 };
1112
1113 static const struct rpcif_info rpcif_info_r8a7796 = {
1114 .regmap_config = &rpcif_regmap_config,
1115 .impl = &rpcif_impl,
1116 .type = RPCIF_RCAR_GEN3,
1117 .strtim = 6,
1118 };
1119
1120 static const struct rpcif_info rpcif_info_gen3 = {
1121 .regmap_config = &rpcif_regmap_config,
1122 .impl = &rpcif_impl,
1123 .type = RPCIF_RCAR_GEN3,
1124 .strtim = 7,
1125 };
1126
1127 static const struct rpcif_info rpcif_info_rz_g2l = {
1128 .regmap_config = &rpcif_regmap_config,
1129 .impl = &rpcif_impl,
1130 .type = RPCIF_RZ_G2L,
1131 .strtim = 7,
1132 };
1133
1134 static const struct rpcif_info rpcif_info_gen4 = {
1135 .regmap_config = &rpcif_regmap_config,
1136 .impl = &rpcif_impl,
1137 .type = RPCIF_RCAR_GEN4,
1138 .strtim = 15,
1139 };
1140
1141 static const struct rpcif_info xspi_info_r9a09g047 = {
1142 .regmap_config = &xspi_regmap_config,
1143 .impl = &xspi_impl,
1144 .type = XSPI_RZ_G3E,
1145 };
1146
1147 static const struct of_device_id rpcif_of_match[] = {
1148 { .compatible = "renesas,r8a7796-rpc-if", .data = &rpcif_info_r8a7796 },
1149 { .compatible = "renesas,r9a09g047-xspi", .data = &xspi_info_r9a09g047 },
1150 { .compatible = "renesas,rcar-gen3-rpc-if", .data = &rpcif_info_gen3 },
1151 { .compatible = "renesas,rcar-gen4-rpc-if", .data = &rpcif_info_gen4 },
1152 { .compatible = "renesas,rzg2l-rpc-if", .data = &rpcif_info_rz_g2l },
1153 {},
1154 };
1155 MODULE_DEVICE_TABLE(of, rpcif_of_match);
1156
1157 static DEFINE_SIMPLE_DEV_PM_OPS(rpcif_pm_ops, rpcif_suspend, rpcif_resume);
1158
1159 static struct platform_driver rpcif_driver = {
1160 .probe = rpcif_probe,
1161 .remove = rpcif_remove,
1162 .driver = {
1163 .name = "rpc-if",
1164 .of_match_table = rpcif_of_match,
1165 .pm = pm_sleep_ptr(&rpcif_pm_ops),
1166 },
1167 };
1168 module_platform_driver(rpcif_driver);
1169
1170 MODULE_DESCRIPTION("Renesas RPC-IF core driver");
1171 MODULE_LICENSE("GPL v2");
1172