1 // SPDX-License-Identifier: (GPL-2.0-only OR MIT)
2 /*
3 * Copyright (C) 2025 Amlogic, Inc. All rights reserved
4 *
5 * Driver for the SPI Mode of Amlogic Flash Controller
6 * Authors:
7 * Liang Yang <liang.yang@amlogic.com>
8 * Feng Chen <feng.chen@amlogic.com>
9 * Xianwei Zhao <xianwei.zhao@amlogic.com>
10 */
11
12 #include <linux/platform_device.h>
13 #include <linux/clk-provider.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/bitfield.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/bitops.h>
22 #include <linux/regmap.h>
23 #include <linux/mtd/spinand.h>
24 #include <linux/spi/spi-mem.h>
25
26 #define SFC_CMD 0x00
27 #define SFC_CFG 0x04
28 #define SFC_DADR 0x08
29 #define SFC_IADR 0x0c
30 #define SFC_BUF 0x10
31 #define SFC_INFO 0x14
32 #define SFC_DC 0x18
33 #define SFC_ADR 0x1c
34 #define SFC_DL 0x20
35 #define SFC_DH 0x24
36 #define SFC_CADR 0x28
37 #define SFC_SADR 0x2c
38 #define SFC_RX_IDX 0x34
39 #define SFC_RX_DAT 0x38
40 #define SFC_SPI_CFG 0x40
41
42 /* settings in SFC_CMD */
43
44 /* 4 bits support 4 chip select, high false, low select but spi support 2*/
45 #define CHIP_SELECT_MASK GENMASK(13, 10)
46 #define CS_NONE 0xf
47 #define CS_0 0xe
48 #define CS_1 0xd
49
50 #define CLE (0x5 << 14)
51 #define ALE (0x6 << 14)
52 #define DWR (0x4 << 14)
53 #define DRD (0x8 << 14)
54 #define DUMMY (0xb << 14)
55 #define IDLE (0xc << 14)
56 #define IDLE_CYCLE_MASK GENMASK(9, 0)
57 #define EXT_CYCLE_MASK GENMASK(9, 0)
58
59 #define OP_M2N ((0 << 17) | (2 << 20))
60 #define OP_N2M ((1 << 17) | (2 << 20))
61 #define OP_STS ((3 << 17) | (2 << 20))
62 #define OP_ADL ((0 << 16) | (3 << 20))
63 #define OP_ADH ((1 << 16) | (3 << 20))
64 #define OP_AIL ((2 << 16) | (3 << 20))
65 #define OP_AIH ((3 << 16) | (3 << 20))
66 #define OP_ASL ((4 << 16) | (3 << 20))
67 #define OP_ASH ((5 << 16) | (3 << 20))
68 #define OP_SEED ((8 << 16) | (3 << 20))
69 #define SEED_MASK GENMASK(14, 0)
70 #define ENABLE_RANDOM BIT(19)
71
72 #define CMD_COMMAND(cs_sel, cmd) (CLE | ((cs_sel) << 10) | (cmd))
73 #define CMD_ADDR(cs_sel, addr) (ALE | ((cs_sel) << 10) | (addr))
74 #define CMD_DUMMY(cs_sel, cyc) (DUMMY | ((cs_sel) << 10) | ((cyc) & EXT_CYCLE_MASK))
75 #define CMD_IDLE(cs_sel, cyc) (IDLE | ((cs_sel) << 10) | ((cyc) & IDLE_CYCLE_MASK))
76 #define CMD_MEM2NAND(bch, pages) (OP_M2N | ((bch) << 14) | (pages))
77 #define CMD_NAND2MEM(bch, pages) (OP_N2M | ((bch) << 14) | (pages))
78 #define CMD_DATA_ADDRL(addr) (OP_ADL | ((addr) & 0xffff))
79 #define CMD_DATA_ADDRH(addr) (OP_ADH | (((addr) >> 16) & 0xffff))
80 #define CMD_INFO_ADDRL(addr) (OP_AIL | ((addr) & 0xffff))
81 #define CMD_INFO_ADDRH(addr) (OP_AIH | (((addr) >> 16) & 0xffff))
82 #define CMD_SEED(seed) (OP_SEED | ((seed) & SEED_MASK))
83
84 #define GET_CMD_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
85
86 #define DEFAULT_PULLUP_CYCLE 2
87 #define CS_SETUP_CYCLE 1
88 #define CS_HOLD_CYCLE 2
89 #define DEFAULT_BUS_CYCLE 4
90
91 #define RAW_SIZE GENMASK(13, 0)
92 #define RAW_SIZE_BW 14
93
94 #define DMA_ADDR_ALIGN 8
95
96 /* Bit fields in SFC_SPI_CFG */
97 #define SPI_MODE_EN BIT(31)
98 #define RAW_EXT_SIZE GENMASK(29, 18)
99 #define ADDR_LANE GENMASK(17, 16)
100 #define CPOL BIT(15)
101 #define CPHA BIT(14)
102 #define EN_HOLD BIT(13)
103 #define EN_WP BIT(12)
104 #define TXADJ GENMASK(11, 8)
105 #define RXADJ GENMASK(7, 4)
106 #define CMD_LANE GENMASK(3, 2)
107 #define DATA_LANE GENMASK(1, 0)
108 #define LANE_MAX 0x3
109
110 /* raw ext size[25:14] + raw size[13:0] */
111 #define RAW_MAX_RW_SIZE_MASK GENMASK(25, 0)
112
113 /* Ecc fields */
114 #define ECC_COMPLETE BIT(31)
115 #define ECC_UNCORRECTABLE 0x3f
116 #define ECC_ERR_CNT(x) (((x) >> 24) & 0x3f)
117 #define ECC_ZERO_CNT(x) (((x) >> 16) & 0x3f)
118
119 #define ECC_BCH8_512 1
120 #define ECC_BCH8_1K 2
121 #define ECC_BCH8_PARITY_BYTES 14
122 #define ECC_BCH8_USER_BYTES 2
123 #define ECC_BCH8_INFO_BYTES (ECC_BCH8_USER_BYTES + ECC_BCH8_PARITY_BYTES)
124 #define ECC_BCH8_STRENGTH 8
125 #define ECC_BCH8_DEFAULT_STEP 512
126 #define ECC_DEFAULT_BCH_MODE ECC_BCH8_512
127 #define ECC_PER_INFO_BYTE 8
128 #define ECC_PATTERN 0x5a
129 #define ECC_BCH_MAX_SECT_SIZE 63
130 /* soft flags for sfc */
131 #define SFC_HWECC BIT(0)
132 #define SFC_DATA_RANDOM BIT(1)
133 #define SFC_DATA_ONLY BIT(2)
134 #define SFC_OOB_ONLY BIT(3)
135 #define SFC_DATA_OOB BIT(4)
136 #define SFC_AUTO_OOB BIT(5)
137 #define SFC_RAW_RW BIT(6)
138 #define SFC_XFER_MDOE_MASK GENMASK(6, 2)
139
140 #define SFC_DATABUF_SIZE 8192
141 #define SFC_INFOBUF_SIZE 256
142 #define SFC_BUF_SIZE (SFC_DATABUF_SIZE + SFC_INFOBUF_SIZE)
143
144 /* !!! PCB and SPI-NAND chip limitations */
145 #define SFC_MAX_FREQUENCY (250 * 1000 * 1000)
146 #define SFC_MIN_FREQUENCY (4 * 1000 * 1000)
147 #define SFC_BUS_DEFAULT_CLK 40000000
148 #define SFC_MAX_CS_NUM 2
149
150 /* SPI-FLASH R/W operation cmd */
151 #define SPIFLASH_RD_OCTALIO 0xcb
152 #define SPIFLASH_RD_OCTAL 0x8b
153 #define SPIFLASH_RD_QUADIO 0xeb
154 #define SPIFLASH_RD_QUAD 0x6b
155 #define SPIFLASH_RD_DUALIO 0xbb
156 #define SPIFLASH_RD_DUAL 0x3b
157 #define SPIFLASH_RD_FAST 0x0b
158 #define SPIFLASH_RD 0x03
159 #define SPIFLASH_WR_OCTALIO 0xC2
160 #define SPIFLASH_WR_OCTAL 0x82
161 #define SPIFLASH_WR_QUAD 0x32
162 #define SPIFLASH_WR 0x02
163 #define SPIFLASH_UP_QUAD 0x34
164 #define SPIFLASH_UP 0x84
165
166 struct aml_sfc_ecc_cfg {
167 u32 stepsize;
168 u32 nsteps;
169 u32 strength;
170 u32 oobsize;
171 u32 bch;
172 };
173
174 struct aml_ecc_stats {
175 u32 corrected;
176 u32 bitflips;
177 u32 failed;
178 };
179
180 struct aml_sfc_caps {
181 struct aml_sfc_ecc_cfg *ecc_caps;
182 u32 num_ecc_caps;
183 };
184
185 struct aml_sfc {
186 struct device *dev;
187 struct clk *gate_clk;
188 struct clk *core_clk;
189 struct spi_controller *ctrl;
190 struct regmap *regmap_base;
191 const struct aml_sfc_caps *caps;
192 struct nand_ecc_engine ecc_eng;
193 struct aml_ecc_stats ecc_stats;
194 dma_addr_t daddr;
195 dma_addr_t iaddr;
196 u32 info_bytes;
197 u32 bus_rate;
198 u32 flags;
199 u32 rx_adj;
200 u32 cs_sel;
201 u8 *data_buf;
202 __le64 *info_buf;
203 u8 *priv;
204 };
205
206 #define AML_ECC_DATA(sz, s, b) { .stepsize = (sz), .strength = (s), .bch = (b) }
207
208 static struct aml_sfc_ecc_cfg aml_a113l2_ecc_caps[] = {
209 AML_ECC_DATA(512, 8, ECC_BCH8_512),
210 AML_ECC_DATA(1024, 8, ECC_BCH8_1K),
211 };
212
213 static const struct aml_sfc_caps aml_a113l2_sfc_caps = {
214 .ecc_caps = aml_a113l2_ecc_caps,
215 .num_ecc_caps = ARRAY_SIZE(aml_a113l2_ecc_caps)
216 };
217
nand_to_aml_sfc(struct nand_device * nand)218 static struct aml_sfc *nand_to_aml_sfc(struct nand_device *nand)
219 {
220 struct nand_ecc_engine *eng = nand->ecc.engine;
221
222 return container_of(eng, struct aml_sfc, ecc_eng);
223 }
224
aml_sfc_to_ecc_ctx(struct aml_sfc * sfc)225 static inline void *aml_sfc_to_ecc_ctx(struct aml_sfc *sfc)
226 {
227 return sfc->priv;
228 }
229
aml_sfc_wait_cmd_finish(struct aml_sfc * sfc,u64 timeout_ms)230 static int aml_sfc_wait_cmd_finish(struct aml_sfc *sfc, u64 timeout_ms)
231 {
232 u32 cmd_size = 0;
233 int ret;
234
235 /*
236 * The SPINAND flash controller employs a two-stage pipeline:
237 * 1) command prefetch; 2) command execution.
238 *
239 * All commands are stored in the FIFO, with one prefetched for execution.
240 *
241 * There are cases where the FIFO is detected as empty, yet a command may
242 * still be in execution and a prefetched command pending execution.
243 *
244 * So, send two idle commands to ensure all previous commands have
245 * been executed.
246 */
247 regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0));
248 regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0));
249
250 /* Wait for the FIFO to empty. */
251 ret = regmap_read_poll_timeout(sfc->regmap_base, SFC_CMD, cmd_size,
252 !GET_CMD_SIZE(cmd_size),
253 10, timeout_ms * 1000);
254 if (ret)
255 dev_err(sfc->dev, "wait for empty CMD FIFO time out\n");
256
257 return ret;
258 }
259
aml_sfc_pre_transfer(struct aml_sfc * sfc,u32 idle_cycle,u32 cs2clk_cycle)260 static int aml_sfc_pre_transfer(struct aml_sfc *sfc, u32 idle_cycle, u32 cs2clk_cycle)
261 {
262 int ret;
263
264 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(CS_NONE, idle_cycle));
265 if (ret)
266 return ret;
267
268 return regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, cs2clk_cycle));
269 }
270
aml_sfc_end_transfer(struct aml_sfc * sfc,u32 clk2cs_cycle)271 static int aml_sfc_end_transfer(struct aml_sfc *sfc, u32 clk2cs_cycle)
272 {
273 int ret;
274
275 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, clk2cs_cycle));
276 if (ret)
277 return ret;
278
279 return aml_sfc_wait_cmd_finish(sfc, 0);
280 }
281
aml_sfc_set_bus_width(struct aml_sfc * sfc,u8 buswidth,u32 mask)282 static int aml_sfc_set_bus_width(struct aml_sfc *sfc, u8 buswidth, u32 mask)
283 {
284 int i;
285 u32 conf = 0;
286
287 for (i = 0; i <= LANE_MAX; i++) {
288 if (buswidth == 1 << i) {
289 conf = i << __ffs(mask);
290 return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
291 mask, conf);
292 }
293 }
294
295 return 0;
296 }
297
aml_sfc_send_cmd(struct aml_sfc * sfc,const struct spi_mem_op * op)298 static int aml_sfc_send_cmd(struct aml_sfc *sfc, const struct spi_mem_op *op)
299 {
300 int i, ret;
301 u8 val;
302
303 ret = aml_sfc_set_bus_width(sfc, op->cmd.buswidth, CMD_LANE);
304 if (ret)
305 return ret;
306
307 for (i = 0; i < op->cmd.nbytes; i++) {
308 val = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
309 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_COMMAND(sfc->cs_sel, val));
310 if (ret)
311 return ret;
312 }
313
314 return 0;
315 }
316
aml_sfc_send_addr(struct aml_sfc * sfc,const struct spi_mem_op * op)317 static int aml_sfc_send_addr(struct aml_sfc *sfc, const struct spi_mem_op *op)
318 {
319 int i, ret;
320 u8 val;
321
322 ret = aml_sfc_set_bus_width(sfc, op->addr.buswidth, ADDR_LANE);
323 if (ret)
324 return ret;
325
326 for (i = 0; i < op->addr.nbytes; i++) {
327 val = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
328
329 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_ADDR(sfc->cs_sel, val));
330 if (ret)
331 return ret;
332 }
333
334 return 0;
335 }
336
aml_sfc_is_xio_op(const struct spi_mem_op * op)337 static bool aml_sfc_is_xio_op(const struct spi_mem_op *op)
338 {
339 switch (op->cmd.opcode) {
340 case SPIFLASH_RD_OCTALIO:
341 case SPIFLASH_RD_QUADIO:
342 case SPIFLASH_RD_DUALIO:
343 return true;
344 default:
345 break;
346 }
347
348 return false;
349 }
350
aml_sfc_send_cmd_addr_dummy(struct aml_sfc * sfc,const struct spi_mem_op * op)351 static int aml_sfc_send_cmd_addr_dummy(struct aml_sfc *sfc, const struct spi_mem_op *op)
352 {
353 u32 dummy_cycle, cmd;
354 int ret;
355
356 ret = aml_sfc_send_cmd(sfc, op);
357 if (ret)
358 return ret;
359
360 ret = aml_sfc_send_addr(sfc, op);
361 if (ret)
362 return ret;
363
364 if (op->dummy.nbytes) {
365 /* Dummy buswidth configuration is not supported */
366 if (aml_sfc_is_xio_op(op))
367 dummy_cycle = op->dummy.nbytes * 8 / op->data.buswidth;
368 else
369 dummy_cycle = op->dummy.nbytes * 8;
370 cmd = CMD_DUMMY(sfc->cs_sel, dummy_cycle - 1);
371 return regmap_write(sfc->regmap_base, SFC_CMD, cmd);
372 }
373
374 return 0;
375 }
376
aml_sfc_is_snand_hwecc_page_op(struct aml_sfc * sfc,const struct spi_mem_op * op)377 static bool aml_sfc_is_snand_hwecc_page_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
378 {
379 switch (op->cmd.opcode) {
380 /* SPINAND read from cache cmd */
381 case SPIFLASH_RD_QUADIO:
382 case SPIFLASH_RD_QUAD:
383 case SPIFLASH_RD_DUALIO:
384 case SPIFLASH_RD_DUAL:
385 case SPIFLASH_RD_FAST:
386 case SPIFLASH_RD:
387 /* SPINAND write to cache cmd */
388 case SPIFLASH_WR_QUAD:
389 case SPIFLASH_WR:
390 case SPIFLASH_UP_QUAD:
391 case SPIFLASH_UP:
392 if (sfc->flags & SFC_HWECC)
393 return true;
394 else
395 return false;
396 default:
397 break;
398 }
399
400 return false;
401 }
402
aml_sfc_dma_buffer_setup(struct aml_sfc * sfc,void * databuf,int datalen,void * infobuf,int infolen,enum dma_data_direction dir)403 static int aml_sfc_dma_buffer_setup(struct aml_sfc *sfc, void *databuf,
404 int datalen, void *infobuf, int infolen,
405 enum dma_data_direction dir)
406 {
407 u32 cmd = 0;
408 int ret;
409
410 sfc->daddr = dma_map_single(sfc->dev, databuf, datalen, dir);
411 ret = dma_mapping_error(sfc->dev, sfc->daddr);
412 if (ret) {
413 dev_err(sfc->dev, "DMA mapping error\n");
414 return ret;
415 }
416
417 cmd = CMD_DATA_ADDRL(sfc->daddr);
418 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
419 if (ret)
420 goto out_map_data;
421
422 cmd = CMD_DATA_ADDRH(sfc->daddr);
423 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
424 if (ret)
425 goto out_map_data;
426
427 if (infobuf) {
428 sfc->iaddr = dma_map_single(sfc->dev, infobuf, infolen, dir);
429 ret = dma_mapping_error(sfc->dev, sfc->iaddr);
430 if (ret) {
431 dev_err(sfc->dev, "DMA mapping error\n");
432 goto out_map_data;
433 }
434
435 sfc->info_bytes = infolen;
436 cmd = CMD_INFO_ADDRL(sfc->iaddr);
437 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
438 if (ret)
439 goto out_map_info;
440
441 cmd = CMD_INFO_ADDRH(sfc->iaddr);
442 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
443 if (ret)
444 goto out_map_info;
445 }
446
447 return 0;
448
449 out_map_info:
450 dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir);
451 out_map_data:
452 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
453
454 return ret;
455 }
456
aml_sfc_dma_buffer_release(struct aml_sfc * sfc,int datalen,int infolen,enum dma_data_direction dir)457 static void aml_sfc_dma_buffer_release(struct aml_sfc *sfc,
458 int datalen, int infolen,
459 enum dma_data_direction dir)
460 {
461 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
462 if (infolen) {
463 dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir);
464 sfc->info_bytes = 0;
465 }
466 }
467
aml_sfc_dma_buffer_is_safe(const void * buffer)468 static bool aml_sfc_dma_buffer_is_safe(const void *buffer)
469 {
470 if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
471 return false;
472
473 if (virt_addr_valid(buffer))
474 return true;
475
476 return false;
477 }
478
aml_get_dma_safe_input_buf(const struct spi_mem_op * op)479 static void *aml_get_dma_safe_input_buf(const struct spi_mem_op *op)
480 {
481 if (aml_sfc_dma_buffer_is_safe(op->data.buf.in))
482 return op->data.buf.in;
483
484 return kzalloc(op->data.nbytes, GFP_KERNEL);
485 }
486
aml_sfc_put_dma_safe_input_buf(const struct spi_mem_op * op,void * buf)487 static void aml_sfc_put_dma_safe_input_buf(const struct spi_mem_op *op, void *buf)
488 {
489 if (WARN_ON(op->data.dir != SPI_MEM_DATA_IN) || WARN_ON(!buf))
490 return;
491
492 if (buf == op->data.buf.in)
493 return;
494
495 memcpy(op->data.buf.in, buf, op->data.nbytes);
496 kfree(buf);
497 }
498
aml_sfc_get_dma_safe_output_buf(const struct spi_mem_op * op)499 static void *aml_sfc_get_dma_safe_output_buf(const struct spi_mem_op *op)
500 {
501 if (aml_sfc_dma_buffer_is_safe(op->data.buf.out))
502 return (void *)op->data.buf.out;
503
504 return kmemdup(op->data.buf.out, op->data.nbytes, GFP_KERNEL);
505 }
506
aml_sfc_put_dma_safe_output_buf(const struct spi_mem_op * op,const void * buf)507 static void aml_sfc_put_dma_safe_output_buf(const struct spi_mem_op *op, const void *buf)
508 {
509 if (WARN_ON(op->data.dir != SPI_MEM_DATA_OUT) || WARN_ON(!buf))
510 return;
511
512 if (buf != op->data.buf.out)
513 kfree(buf);
514 }
515
aml_sfc_cal_timeout_cycle(struct aml_sfc * sfc,const struct spi_mem_op * op)516 static u64 aml_sfc_cal_timeout_cycle(struct aml_sfc *sfc, const struct spi_mem_op *op)
517 {
518 u64 ms;
519
520 /* For each byte we wait for (8 cycles / buswidth) of the SPI clock. */
521 ms = 8 * MSEC_PER_SEC * op->data.nbytes / op->data.buswidth;
522 do_div(ms, sfc->bus_rate / DEFAULT_BUS_CYCLE);
523
524 /*
525 * Double the value and add a 200 ms tolerance to compensate for
526 * the impact of specific CS hold time, CS setup time sequences,
527 * controller burst gaps, and other related timing variations.
528 */
529 ms += ms + 200;
530
531 if (ms > UINT_MAX)
532 ms = UINT_MAX;
533
534 return ms;
535 }
536
aml_sfc_check_ecc_pages_valid(struct aml_sfc * sfc,bool raw)537 static void aml_sfc_check_ecc_pages_valid(struct aml_sfc *sfc, bool raw)
538 {
539 struct aml_sfc_ecc_cfg *ecc_cfg;
540 __le64 *info;
541 int ret;
542
543 info = sfc->info_buf;
544 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
545 info += raw ? 0 : ecc_cfg->nsteps - 1;
546
547 do {
548 usleep_range(10, 15);
549 /* info is updated by nfc dma engine*/
550 smp_rmb();
551 dma_sync_single_for_cpu(sfc->dev, sfc->iaddr, sfc->info_bytes,
552 DMA_FROM_DEVICE);
553 ret = le64_to_cpu(*info) & ECC_COMPLETE;
554 } while (!ret);
555 }
556
aml_sfc_raw_io_op(struct aml_sfc * sfc,const struct spi_mem_op * op)557 static int aml_sfc_raw_io_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
558 {
559 void *buf = NULL;
560 int ret;
561 bool is_datain = false;
562 u32 cmd = 0, conf;
563 u64 timeout_ms;
564
565 if (!op->data.nbytes)
566 goto end_xfer;
567
568 conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE);
569 ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf);
570 if (ret)
571 goto err_out;
572
573 if (op->data.dir == SPI_MEM_DATA_IN) {
574 is_datain = true;
575
576 buf = aml_get_dma_safe_input_buf(op);
577 if (!buf) {
578 ret = -ENOMEM;
579 goto err_out;
580 }
581
582 cmd |= CMD_NAND2MEM(0, (op->data.nbytes & RAW_SIZE));
583 } else if (op->data.dir == SPI_MEM_DATA_OUT) {
584 is_datain = false;
585
586 buf = aml_sfc_get_dma_safe_output_buf(op);
587 if (!buf) {
588 ret = -ENOMEM;
589 goto err_out;
590 }
591
592 cmd |= CMD_MEM2NAND(0, (op->data.nbytes & RAW_SIZE));
593 } else {
594 goto end_xfer;
595 }
596
597 ret = aml_sfc_dma_buffer_setup(sfc, buf, op->data.nbytes,
598 is_datain ? sfc->info_buf : NULL,
599 is_datain ? ECC_PER_INFO_BYTE : 0,
600 is_datain ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
601 if (ret)
602 goto err_out;
603
604 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
605 if (ret)
606 goto err_out;
607
608 timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
609 ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
610 if (ret)
611 goto err_out;
612
613 if (is_datain)
614 aml_sfc_check_ecc_pages_valid(sfc, 1);
615
616 if (op->data.dir == SPI_MEM_DATA_IN)
617 aml_sfc_put_dma_safe_input_buf(op, buf);
618 else if (op->data.dir == SPI_MEM_DATA_OUT)
619 aml_sfc_put_dma_safe_output_buf(op, buf);
620
621 aml_sfc_dma_buffer_release(sfc, op->data.nbytes,
622 is_datain ? ECC_PER_INFO_BYTE : 0,
623 is_datain ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
624
625 end_xfer:
626 return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
627
628 err_out:
629 return ret;
630 }
631
aml_sfc_set_user_byte(struct aml_sfc * sfc,__le64 * info_buf,u8 * oob_buf,bool auto_oob)632 static void aml_sfc_set_user_byte(struct aml_sfc *sfc, __le64 *info_buf, u8 *oob_buf, bool auto_oob)
633 {
634 struct aml_sfc_ecc_cfg *ecc_cfg;
635 __le64 *info;
636 int i, count, step_size;
637
638 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
639
640 step_size = auto_oob ? ECC_BCH8_INFO_BYTES : ECC_BCH8_USER_BYTES;
641
642 for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += step_size) {
643 info = &info_buf[i];
644 *info &= cpu_to_le64(~0xffff);
645 *info |= cpu_to_le64((oob_buf[count + 1] << 8) + oob_buf[count]);
646 }
647 }
648
aml_sfc_get_user_byte(struct aml_sfc * sfc,__le64 * info_buf,u8 * oob_buf)649 static void aml_sfc_get_user_byte(struct aml_sfc *sfc, __le64 *info_buf, u8 *oob_buf)
650 {
651 struct aml_sfc_ecc_cfg *ecc_cfg;
652 __le64 *info;
653 int i, count;
654
655 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
656
657 for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += ECC_BCH8_INFO_BYTES) {
658 info = &info_buf[i];
659 oob_buf[count] = le64_to_cpu(*info);
660 oob_buf[count + 1] = le64_to_cpu(*info) >> 8;
661 }
662 }
663
aml_sfc_check_hwecc_status(struct aml_sfc * sfc,__le64 * info_buf)664 static int aml_sfc_check_hwecc_status(struct aml_sfc *sfc, __le64 *info_buf)
665 {
666 struct aml_sfc_ecc_cfg *ecc_cfg;
667 __le64 *info;
668 u32 i, max_bitflips = 0, per_sector_bitflips = 0;
669
670 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
671
672 sfc->ecc_stats.failed = 0;
673 sfc->ecc_stats.bitflips = 0;
674 sfc->ecc_stats.corrected = 0;
675
676 for (i = 0, info = info_buf; i < ecc_cfg->nsteps; i++, info++) {
677 if (ECC_ERR_CNT(le64_to_cpu(*info)) != ECC_UNCORRECTABLE) {
678 per_sector_bitflips = ECC_ERR_CNT(le64_to_cpu(*info));
679 max_bitflips = max_t(u32, max_bitflips, per_sector_bitflips);
680 sfc->ecc_stats.corrected += per_sector_bitflips;
681 continue;
682 }
683
684 return -EBADMSG;
685 }
686
687 return max_bitflips;
688 }
689
aml_sfc_read_page_hwecc(struct aml_sfc * sfc,const struct spi_mem_op * op)690 static int aml_sfc_read_page_hwecc(struct aml_sfc *sfc, const struct spi_mem_op *op)
691 {
692 struct aml_sfc_ecc_cfg *ecc_cfg;
693 int ret, data_len, info_len;
694 u32 page_size, cmd = 0;
695 u64 timeout_ms;
696
697 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
698
699 page_size = ecc_cfg->stepsize * ecc_cfg->nsteps;
700 data_len = page_size + ecc_cfg->oobsize;
701 info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE;
702
703 ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len,
704 sfc->info_buf, info_len, DMA_FROM_DEVICE);
705 if (ret)
706 goto err_out;
707
708 cmd |= CMD_NAND2MEM(ecc_cfg->bch, ecc_cfg->nsteps);
709 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
710 if (ret)
711 goto err_out;
712
713 timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
714 ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
715 if (ret)
716 goto err_out;
717
718 aml_sfc_check_ecc_pages_valid(sfc, 0);
719 aml_sfc_dma_buffer_release(sfc, data_len, info_len, DMA_FROM_DEVICE);
720
721 /* check ecc status here */
722 ret = aml_sfc_check_hwecc_status(sfc, sfc->info_buf);
723 if (ret < 0)
724 sfc->ecc_stats.failed++;
725 else
726 sfc->ecc_stats.bitflips = ret;
727
728 if (sfc->flags & SFC_DATA_ONLY) {
729 memcpy(op->data.buf.in, sfc->data_buf, page_size);
730 } else if (sfc->flags & SFC_OOB_ONLY) {
731 aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in);
732 } else if (sfc->flags & SFC_DATA_OOB) {
733 memcpy(op->data.buf.in, sfc->data_buf, page_size);
734 aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in + page_size);
735 }
736
737 return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
738
739 err_out:
740 return ret;
741 }
742
aml_sfc_write_page_hwecc(struct aml_sfc * sfc,const struct spi_mem_op * op)743 static int aml_sfc_write_page_hwecc(struct aml_sfc *sfc, const struct spi_mem_op *op)
744 {
745 struct aml_sfc_ecc_cfg *ecc_cfg;
746 int ret, data_len, info_len;
747 u32 page_size, cmd = 0;
748 u64 timeout_ms;
749
750 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
751
752 page_size = ecc_cfg->stepsize * ecc_cfg->nsteps;
753 data_len = page_size + ecc_cfg->oobsize;
754 info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE;
755
756 memset(sfc->info_buf, ECC_PATTERN, ecc_cfg->oobsize);
757 memcpy(sfc->data_buf, op->data.buf.out, page_size);
758
759 if (!(sfc->flags & SFC_DATA_ONLY)) {
760 if (sfc->flags & SFC_AUTO_OOB)
761 aml_sfc_set_user_byte(sfc, sfc->info_buf,
762 (u8 *)op->data.buf.out + page_size, 1);
763 else
764 aml_sfc_set_user_byte(sfc, sfc->info_buf,
765 (u8 *)op->data.buf.out + page_size, 0);
766 }
767
768 ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len,
769 sfc->info_buf, info_len, DMA_TO_DEVICE);
770 if (ret)
771 goto err_out;
772
773 cmd |= CMD_MEM2NAND(ecc_cfg->bch, ecc_cfg->nsteps);
774 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
775 if (ret)
776 goto err_out;
777
778 timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
779
780 ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
781 if (ret)
782 goto err_out;
783
784 aml_sfc_dma_buffer_release(sfc, data_len, info_len, DMA_TO_DEVICE);
785
786 return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
787
788 err_out:
789 return ret;
790 }
791
aml_sfc_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)792 static int aml_sfc_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
793 {
794 struct aml_sfc *sfc;
795 struct spi_device *spi;
796 struct aml_sfc_ecc_cfg *ecc_cfg;
797 int ret;
798
799 sfc = spi_controller_get_devdata(mem->spi->controller);
800 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
801 spi = mem->spi;
802 sfc->cs_sel = spi->chip_select[0] ? CS_1 : CS_0;
803
804 dev_dbg(sfc->dev, "cmd:0x%02x - addr:%08llX@%d:%u - dummy:%d:%u - data:%d:%u",
805 op->cmd.opcode, op->addr.val, op->addr.buswidth, op->addr.nbytes,
806 op->dummy.buswidth, op->dummy.nbytes, op->data.buswidth, op->data.nbytes);
807
808 ret = aml_sfc_pre_transfer(sfc, DEFAULT_PULLUP_CYCLE, CS_SETUP_CYCLE);
809 if (ret)
810 return ret;
811
812 ret = aml_sfc_send_cmd_addr_dummy(sfc, op);
813 if (ret)
814 return ret;
815
816 ret = aml_sfc_set_bus_width(sfc, op->data.buswidth, DATA_LANE);
817 if (ret)
818 return ret;
819
820 if (aml_sfc_is_snand_hwecc_page_op(sfc, op) &&
821 ecc_cfg && !(sfc->flags & SFC_RAW_RW)) {
822 if (op->data.dir == SPI_MEM_DATA_IN)
823 return aml_sfc_read_page_hwecc(sfc, op);
824 else
825 return aml_sfc_write_page_hwecc(sfc, op);
826 }
827
828 return aml_sfc_raw_io_op(sfc, op);
829 }
830
aml_sfc_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)831 static int aml_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
832 {
833 struct aml_sfc *sfc;
834 struct aml_sfc_ecc_cfg *ecc_cfg;
835
836 sfc = spi_controller_get_devdata(mem->spi->controller);
837 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
838
839 if (aml_sfc_is_snand_hwecc_page_op(sfc, op) && ecc_cfg) {
840 if (op->data.nbytes > ecc_cfg->stepsize * ECC_BCH_MAX_SECT_SIZE)
841 return -EOPNOTSUPP;
842 } else if (op->data.nbytes & ~RAW_MAX_RW_SIZE_MASK) {
843 return -EOPNOTSUPP;
844 }
845
846 return 0;
847 }
848
849 static const struct spi_controller_mem_ops aml_sfc_mem_ops = {
850 .adjust_op_size = aml_sfc_adjust_op_size,
851 .exec_op = aml_sfc_exec_op,
852 };
853
aml_sfc_layout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)854 static int aml_sfc_layout_ecc(struct mtd_info *mtd, int section,
855 struct mtd_oob_region *oobregion)
856 {
857 struct nand_device *nand = mtd_to_nanddev(mtd);
858
859 if (section >= nand->ecc.ctx.nsteps)
860 return -ERANGE;
861
862 oobregion->offset = ECC_BCH8_USER_BYTES + (section * ECC_BCH8_INFO_BYTES);
863 oobregion->length = ECC_BCH8_PARITY_BYTES;
864
865 return 0;
866 }
867
aml_sfc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)868 static int aml_sfc_ooblayout_free(struct mtd_info *mtd, int section,
869 struct mtd_oob_region *oobregion)
870 {
871 struct nand_device *nand = mtd_to_nanddev(mtd);
872
873 if (section >= nand->ecc.ctx.nsteps)
874 return -ERANGE;
875
876 oobregion->offset = section * ECC_BCH8_INFO_BYTES;
877 oobregion->length = ECC_BCH8_USER_BYTES;
878
879 return 0;
880 }
881
882 static const struct mtd_ooblayout_ops aml_sfc_ooblayout_ops = {
883 .ecc = aml_sfc_layout_ecc,
884 .free = aml_sfc_ooblayout_free,
885 };
886
aml_spi_settings(struct aml_sfc * sfc,struct spi_device * spi)887 static int aml_spi_settings(struct aml_sfc *sfc, struct spi_device *spi)
888 {
889 u32 conf = 0;
890
891 if (spi->mode & SPI_CPHA)
892 conf |= CPHA;
893
894 if (spi->mode & SPI_CPOL)
895 conf |= CPOL;
896
897 conf |= FIELD_PREP(RXADJ, sfc->rx_adj);
898 conf |= EN_HOLD | EN_WP;
899 return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
900 CPHA | CPOL | RXADJ |
901 EN_HOLD | EN_WP, conf);
902 }
903
aml_set_spi_clk(struct aml_sfc * sfc,struct spi_device * spi)904 static int aml_set_spi_clk(struct aml_sfc *sfc, struct spi_device *spi)
905 {
906 u32 speed_hz;
907 int ret;
908
909 if (spi->max_speed_hz > SFC_MAX_FREQUENCY)
910 speed_hz = SFC_MAX_FREQUENCY;
911 else if (!spi->max_speed_hz)
912 speed_hz = SFC_BUS_DEFAULT_CLK;
913 else if (spi->max_speed_hz < SFC_MIN_FREQUENCY)
914 speed_hz = SFC_MIN_FREQUENCY;
915 else
916 speed_hz = spi->max_speed_hz;
917
918 /* The SPI clock is generated by dividing the bus clock by four by default. */
919 ret = regmap_write(sfc->regmap_base, SFC_CFG, (DEFAULT_BUS_CYCLE - 1));
920 if (ret) {
921 dev_err(sfc->dev, "failed to set bus cycle\n");
922 return ret;
923 }
924
925 return clk_set_rate(sfc->core_clk, speed_hz * DEFAULT_BUS_CYCLE);
926 }
927
aml_sfc_setup(struct spi_device * spi)928 static int aml_sfc_setup(struct spi_device *spi)
929 {
930 struct aml_sfc *sfc;
931 int ret;
932
933 sfc = spi_controller_get_devdata(spi->controller);
934 ret = aml_spi_settings(sfc, spi);
935 if (ret)
936 return ret;
937
938 ret = aml_set_spi_clk(sfc, spi);
939 if (ret)
940 return ret;
941
942 sfc->bus_rate = clk_get_rate(sfc->core_clk);
943
944 return 0;
945 }
946
aml_sfc_ecc_init_ctx(struct nand_device * nand)947 static int aml_sfc_ecc_init_ctx(struct nand_device *nand)
948 {
949 struct mtd_info *mtd = nanddev_to_mtd(nand);
950 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
951 struct aml_sfc_ecc_cfg *ecc_cfg;
952 const struct aml_sfc_caps *caps = sfc->caps;
953 struct aml_sfc_ecc_cfg *ecc_caps = caps->ecc_caps;
954 int i, ecc_strength, ecc_step_size;
955
956 ecc_step_size = nand->ecc.user_conf.step_size;
957 ecc_strength = nand->ecc.user_conf.strength;
958
959 for (i = 0; i < caps->num_ecc_caps; i++) {
960 if (ecc_caps[i].stepsize == ecc_step_size) {
961 nand->ecc.ctx.conf.step_size = ecc_step_size;
962 nand->ecc.ctx.conf.flags |= BIT(ecc_caps[i].bch);
963 }
964
965 if (ecc_caps[i].strength == ecc_strength)
966 nand->ecc.ctx.conf.strength = ecc_strength;
967 }
968
969 if (!nand->ecc.ctx.conf.step_size) {
970 nand->ecc.ctx.conf.step_size = ECC_BCH8_DEFAULT_STEP;
971 nand->ecc.ctx.conf.flags |= BIT(ECC_DEFAULT_BCH_MODE);
972 }
973
974 if (!nand->ecc.ctx.conf.strength)
975 nand->ecc.ctx.conf.strength = ECC_BCH8_STRENGTH;
976
977 nand->ecc.ctx.nsteps = nand->memorg.pagesize / nand->ecc.ctx.conf.step_size;
978 nand->ecc.ctx.total = nand->ecc.ctx.nsteps * ECC_BCH8_PARITY_BYTES;
979
980 /* Verify the page size and OOB size against the SFC requirements. */
981 if ((nand->memorg.pagesize % nand->ecc.ctx.conf.step_size) ||
982 (nand->memorg.oobsize < (nand->ecc.ctx.total +
983 nand->ecc.ctx.nsteps * ECC_BCH8_USER_BYTES)))
984 return -EOPNOTSUPP;
985
986 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
987
988 ecc_cfg = kzalloc_obj(*ecc_cfg);
989 if (!ecc_cfg)
990 return -ENOMEM;
991
992 ecc_cfg->stepsize = nand->ecc.ctx.conf.step_size;
993 ecc_cfg->nsteps = nand->ecc.ctx.nsteps;
994 ecc_cfg->strength = nand->ecc.ctx.conf.strength;
995 ecc_cfg->oobsize = nand->memorg.oobsize;
996 ecc_cfg->bch = nand->ecc.ctx.conf.flags & BIT(ECC_DEFAULT_BCH_MODE) ? 1 : 2;
997
998 nand->ecc.ctx.priv = ecc_cfg;
999 sfc->priv = (void *)ecc_cfg;
1000 mtd_set_ooblayout(mtd, &aml_sfc_ooblayout_ops);
1001
1002 sfc->flags |= SFC_HWECC;
1003
1004 return 0;
1005 }
1006
aml_sfc_ecc_cleanup_ctx(struct nand_device * nand)1007 static void aml_sfc_ecc_cleanup_ctx(struct nand_device *nand)
1008 {
1009 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
1010
1011 sfc->flags &= ~(SFC_HWECC);
1012 kfree(nand->ecc.ctx.priv);
1013 sfc->priv = NULL;
1014 }
1015
aml_sfc_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)1016 static int aml_sfc_ecc_prepare_io_req(struct nand_device *nand,
1017 struct nand_page_io_req *req)
1018 {
1019 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
1020 struct spinand_device *spinand = nand_to_spinand(nand);
1021
1022 sfc->flags &= ~SFC_XFER_MDOE_MASK;
1023
1024 if (req->datalen && !req->ooblen)
1025 sfc->flags |= SFC_DATA_ONLY;
1026 else if (!req->datalen && req->ooblen)
1027 sfc->flags |= SFC_OOB_ONLY;
1028 else if (req->datalen && req->ooblen)
1029 sfc->flags |= SFC_DATA_OOB;
1030
1031 if (req->mode == MTD_OPS_RAW)
1032 sfc->flags |= SFC_RAW_RW;
1033 else if (req->mode == MTD_OPS_AUTO_OOB)
1034 sfc->flags |= SFC_AUTO_OOB;
1035
1036 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
1037
1038 return 0;
1039 }
1040
aml_sfc_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)1041 static int aml_sfc_ecc_finish_io_req(struct nand_device *nand,
1042 struct nand_page_io_req *req)
1043 {
1044 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
1045 struct mtd_info *mtd = nanddev_to_mtd(nand);
1046
1047 if (req->mode == MTD_OPS_RAW || req->type == NAND_PAGE_WRITE)
1048 return 0;
1049
1050 if (sfc->ecc_stats.failed)
1051 mtd->ecc_stats.failed++;
1052
1053 mtd->ecc_stats.corrected += sfc->ecc_stats.corrected;
1054
1055 return sfc->ecc_stats.failed ? -EBADMSG : sfc->ecc_stats.bitflips;
1056 }
1057
1058 static const struct spi_controller_mem_caps aml_sfc_mem_caps = {
1059 .ecc = true,
1060 };
1061
1062 static const struct nand_ecc_engine_ops aml_sfc_ecc_engine_ops = {
1063 .init_ctx = aml_sfc_ecc_init_ctx,
1064 .cleanup_ctx = aml_sfc_ecc_cleanup_ctx,
1065 .prepare_io_req = aml_sfc_ecc_prepare_io_req,
1066 .finish_io_req = aml_sfc_ecc_finish_io_req,
1067 };
1068
aml_sfc_clk_init(struct aml_sfc * sfc)1069 static int aml_sfc_clk_init(struct aml_sfc *sfc)
1070 {
1071 sfc->gate_clk = devm_clk_get_enabled(sfc->dev, "gate");
1072 if (IS_ERR(sfc->gate_clk)) {
1073 dev_err(sfc->dev, "unable to enable gate clk\n");
1074 return PTR_ERR(sfc->gate_clk);
1075 }
1076
1077 sfc->core_clk = devm_clk_get_enabled(sfc->dev, "core");
1078 if (IS_ERR(sfc->core_clk)) {
1079 dev_err(sfc->dev, "unable to enable core clk\n");
1080 return PTR_ERR(sfc->core_clk);
1081 }
1082
1083 return clk_set_rate(sfc->core_clk, SFC_BUS_DEFAULT_CLK);
1084 }
1085
aml_sfc_probe(struct platform_device * pdev)1086 static int aml_sfc_probe(struct platform_device *pdev)
1087 {
1088 struct device_node *np = pdev->dev.of_node;
1089 struct device *dev = &pdev->dev;
1090 struct spi_controller *ctrl;
1091 struct aml_sfc *sfc;
1092 void __iomem *reg_base;
1093 int ret;
1094 u32 val = 0;
1095
1096 const struct regmap_config core_config = {
1097 .reg_bits = 32,
1098 .val_bits = 32,
1099 .reg_stride = 4,
1100 .max_register = SFC_SPI_CFG,
1101 };
1102
1103 ctrl = devm_spi_alloc_host(dev, sizeof(*sfc));
1104 if (!ctrl)
1105 return -ENOMEM;
1106 platform_set_drvdata(pdev, ctrl);
1107
1108 sfc = spi_controller_get_devdata(ctrl);
1109 sfc->dev = dev;
1110 sfc->ctrl = ctrl;
1111
1112 sfc->caps = of_device_get_match_data(dev);
1113 if (!sfc->caps)
1114 return dev_err_probe(dev, -ENODEV, "failed to get device data\n");
1115
1116 reg_base = devm_platform_ioremap_resource(pdev, 0);
1117 if (IS_ERR(reg_base))
1118 return PTR_ERR(reg_base);
1119
1120 sfc->regmap_base = devm_regmap_init_mmio(dev, reg_base, &core_config);
1121 if (IS_ERR(sfc->regmap_base))
1122 return dev_err_probe(dev, PTR_ERR(sfc->regmap_base),
1123 "failed to init sfc base regmap\n");
1124
1125 sfc->data_buf = devm_kzalloc(dev, SFC_BUF_SIZE, GFP_KERNEL);
1126 if (!sfc->data_buf)
1127 return -ENOMEM;
1128 sfc->info_buf = (__le64 *)(sfc->data_buf + SFC_DATABUF_SIZE);
1129
1130 ret = aml_sfc_clk_init(sfc);
1131 if (ret)
1132 return dev_err_probe(dev, ret, "failed to initialize SFC clock\n");
1133
1134 /* Enable Amlogic flash controller spi mode */
1135 ret = regmap_write(sfc->regmap_base, SFC_SPI_CFG, SPI_MODE_EN);
1136 if (ret)
1137 return dev_err_probe(dev, ret, "failed to enable SPI mode\n");
1138
1139 ret = dma_set_mask(sfc->dev, DMA_BIT_MASK(32));
1140 if (ret)
1141 return dev_err_probe(sfc->dev, ret, "failed to set dma mask\n");
1142
1143 sfc->ecc_eng.dev = &pdev->dev;
1144 sfc->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
1145 sfc->ecc_eng.ops = &aml_sfc_ecc_engine_ops;
1146 sfc->ecc_eng.priv = sfc;
1147
1148 ret = nand_ecc_register_on_host_hw_engine(&sfc->ecc_eng);
1149 if (ret)
1150 return dev_err_probe(&pdev->dev, ret, "failed to register Aml host ecc engine.\n");
1151
1152 ret = of_property_read_u32(np, "amlogic,rx-adj", &val);
1153 if (!ret)
1154 sfc->rx_adj = val;
1155
1156 ctrl->dev.of_node = np;
1157 ctrl->mem_ops = &aml_sfc_mem_ops;
1158 ctrl->mem_caps = &aml_sfc_mem_caps;
1159 ctrl->setup = aml_sfc_setup;
1160 ctrl->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD |
1161 SPI_RX_DUAL | SPI_TX_OCTAL | SPI_RX_OCTAL;
1162 ctrl->max_speed_hz = SFC_MAX_FREQUENCY;
1163 ctrl->min_speed_hz = SFC_MIN_FREQUENCY;
1164 ctrl->num_chipselect = SFC_MAX_CS_NUM;
1165
1166 return devm_spi_register_controller(dev, ctrl);
1167 }
1168
1169 static const struct of_device_id aml_sfc_of_match[] = {
1170 {
1171 .compatible = "amlogic,a4-spifc",
1172 .data = &aml_a113l2_sfc_caps
1173 },
1174 {},
1175 };
1176 MODULE_DEVICE_TABLE(of, aml_sfc_of_match);
1177
1178 static struct platform_driver aml_sfc_driver = {
1179 .driver = {
1180 .name = "aml_sfc",
1181 .of_match_table = aml_sfc_of_match,
1182 },
1183 .probe = aml_sfc_probe,
1184 };
1185 module_platform_driver(aml_sfc_driver);
1186
1187 MODULE_DESCRIPTION("Amlogic SPI Flash Controller driver");
1188 MODULE_AUTHOR("Feng Chen <feng.chen@amlogic.com>");
1189 MODULE_LICENSE("Dual MIT/GPL");
1190