1 // SPDX-License-Identifier: (GPL-2.0-only OR MIT)
2 /*
3 * Copyright (C) 2025 Amlogic, Inc. All rights reserved
4 *
5 * Driver for the SPI Mode of Amlogic Flash Controller
6 * Authors:
7 * Liang Yang <liang.yang@amlogic.com>
8 * Feng Chen <feng.chen@amlogic.com>
9 * Xianwei Zhao <xianwei.zhao@amlogic.com>
10 */
11
12 #include <linux/platform_device.h>
13 #include <linux/clk-provider.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/bitfield.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/bitops.h>
22 #include <linux/regmap.h>
23 #include <linux/mtd/spinand.h>
24 #include <linux/spi/spi-mem.h>
25
26 #define SFC_CMD 0x00
27 #define SFC_CFG 0x04
28 #define SFC_DADR 0x08
29 #define SFC_IADR 0x0c
30 #define SFC_BUF 0x10
31 #define SFC_INFO 0x14
32 #define SFC_DC 0x18
33 #define SFC_ADR 0x1c
34 #define SFC_DL 0x20
35 #define SFC_DH 0x24
36 #define SFC_CADR 0x28
37 #define SFC_SADR 0x2c
38 #define SFC_RX_IDX 0x34
39 #define SFC_RX_DAT 0x38
40 #define SFC_SPI_CFG 0x40
41
42 /* settings in SFC_CMD */
43
44 /* 4 bits support 4 chip select, high false, low select but spi support 2*/
45 #define CHIP_SELECT_MASK GENMASK(13, 10)
46 #define CS_NONE 0xf
47 #define CS_0 0xe
48 #define CS_1 0xd
49
50 #define CLE (0x5 << 14)
51 #define ALE (0x6 << 14)
52 #define DWR (0x4 << 14)
53 #define DRD (0x8 << 14)
54 #define DUMMY (0xb << 14)
55 #define IDLE (0xc << 14)
56 #define IDLE_CYCLE_MASK GENMASK(9, 0)
57 #define EXT_CYCLE_MASK GENMASK(9, 0)
58
59 #define OP_M2N ((0 << 17) | (2 << 20))
60 #define OP_N2M ((1 << 17) | (2 << 20))
61 #define OP_STS ((3 << 17) | (2 << 20))
62 #define OP_ADL ((0 << 16) | (3 << 20))
63 #define OP_ADH ((1 << 16) | (3 << 20))
64 #define OP_AIL ((2 << 16) | (3 << 20))
65 #define OP_AIH ((3 << 16) | (3 << 20))
66 #define OP_ASL ((4 << 16) | (3 << 20))
67 #define OP_ASH ((5 << 16) | (3 << 20))
68 #define OP_SEED ((8 << 16) | (3 << 20))
69 #define SEED_MASK GENMASK(14, 0)
70 #define ENABLE_RANDOM BIT(19)
71
72 #define CMD_COMMAND(cs_sel, cmd) (CLE | ((cs_sel) << 10) | (cmd))
73 #define CMD_ADDR(cs_sel, addr) (ALE | ((cs_sel) << 10) | (addr))
74 #define CMD_DUMMY(cs_sel, cyc) (DUMMY | ((cs_sel) << 10) | ((cyc) & EXT_CYCLE_MASK))
75 #define CMD_IDLE(cs_sel, cyc) (IDLE | ((cs_sel) << 10) | ((cyc) & IDLE_CYCLE_MASK))
76 #define CMD_MEM2NAND(bch, pages) (OP_M2N | ((bch) << 14) | (pages))
77 #define CMD_NAND2MEM(bch, pages) (OP_N2M | ((bch) << 14) | (pages))
78 #define CMD_DATA_ADDRL(addr) (OP_ADL | ((addr) & 0xffff))
79 #define CMD_DATA_ADDRH(addr) (OP_ADH | (((addr) >> 16) & 0xffff))
80 #define CMD_INFO_ADDRL(addr) (OP_AIL | ((addr) & 0xffff))
81 #define CMD_INFO_ADDRH(addr) (OP_AIH | (((addr) >> 16) & 0xffff))
82 #define CMD_SEED(seed) (OP_SEED | ((seed) & SEED_MASK))
83
84 #define GET_CMD_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
85
86 #define DEFAULT_PULLUP_CYCLE 2
87 #define CS_SETUP_CYCLE 1
88 #define CS_HOLD_CYCLE 2
89 #define DEFAULT_BUS_CYCLE 4
90
91 #define RAW_SIZE GENMASK(13, 0)
92 #define RAW_SIZE_BW 14
93
94 #define DMA_ADDR_ALIGN 8
95
96 /* Bit fields in SFC_SPI_CFG */
97 #define SPI_MODE_EN BIT(31)
98 #define RAW_EXT_SIZE GENMASK(29, 18)
99 #define ADDR_LANE GENMASK(17, 16)
100 #define CPOL BIT(15)
101 #define CPHA BIT(14)
102 #define EN_HOLD BIT(13)
103 #define EN_WP BIT(12)
104 #define TXADJ GENMASK(11, 8)
105 #define RXADJ GENMASK(7, 4)
106 #define CMD_LANE GENMASK(3, 2)
107 #define DATA_LANE GENMASK(1, 0)
108 #define LANE_MAX 0x3
109
110 /* raw ext size[25:14] + raw size[13:0] */
111 #define RAW_MAX_RW_SIZE_MASK GENMASK(25, 0)
112
113 /* Ecc fields */
114 #define ECC_COMPLETE BIT(31)
115 #define ECC_UNCORRECTABLE 0x3f
116 #define ECC_ERR_CNT(x) (((x) >> 24) & 0x3f)
117 #define ECC_ZERO_CNT(x) (((x) >> 16) & 0x3f)
118
119 #define ECC_BCH8_512 1
120 #define ECC_BCH8_1K 2
121 #define ECC_BCH8_PARITY_BYTES 14
122 #define ECC_BCH8_USER_BYTES 2
123 #define ECC_BCH8_INFO_BYTES (ECC_BCH8_USER_BYTES + ECC_BCH8_PARITY_BYTES)
124 #define ECC_BCH8_STRENGTH 8
125 #define ECC_BCH8_DEFAULT_STEP 512
126 #define ECC_DEFAULT_BCH_MODE ECC_BCH8_512
127 #define ECC_PER_INFO_BYTE 8
128 #define ECC_PATTERN 0x5a
129 #define ECC_BCH_MAX_SECT_SIZE 63
130 /* soft flags for sfc */
131 #define SFC_HWECC BIT(0)
132 #define SFC_DATA_RANDOM BIT(1)
133 #define SFC_DATA_ONLY BIT(2)
134 #define SFC_OOB_ONLY BIT(3)
135 #define SFC_DATA_OOB BIT(4)
136 #define SFC_AUTO_OOB BIT(5)
137 #define SFC_RAW_RW BIT(6)
138 #define SFC_XFER_MDOE_MASK GENMASK(6, 2)
139
140 #define SFC_DATABUF_SIZE 8192
141 #define SFC_INFOBUF_SIZE 256
142 #define SFC_BUF_SIZE (SFC_DATABUF_SIZE + SFC_INFOBUF_SIZE)
143
144 /* !!! PCB and SPI-NAND chip limitations */
145 #define SFC_MAX_FREQUENCY (250 * 1000 * 1000)
146 #define SFC_MIN_FREQUENCY (4 * 1000 * 1000)
147 #define SFC_BUS_DEFAULT_CLK 40000000
148 #define SFC_MAX_CS_NUM 2
149
150 /* SPI-FLASH R/W operation cmd */
151 #define SPIFLASH_RD_OCTALIO 0xcb
152 #define SPIFLASH_RD_OCTAL 0x8b
153 #define SPIFLASH_RD_QUADIO 0xeb
154 #define SPIFLASH_RD_QUAD 0x6b
155 #define SPIFLASH_RD_DUALIO 0xbb
156 #define SPIFLASH_RD_DUAL 0x3b
157 #define SPIFLASH_RD_FAST 0x0b
158 #define SPIFLASH_RD 0x03
159 #define SPIFLASH_WR_OCTALIO 0xC2
160 #define SPIFLASH_WR_OCTAL 0x82
161 #define SPIFLASH_WR_QUAD 0x32
162 #define SPIFLASH_WR 0x02
163 #define SPIFLASH_UP_QUAD 0x34
164 #define SPIFLASH_UP 0x84
165
166 struct aml_sfc_ecc_cfg {
167 u32 stepsize;
168 u32 nsteps;
169 u32 strength;
170 u32 oobsize;
171 u32 bch;
172 };
173
174 struct aml_ecc_stats {
175 u32 corrected;
176 u32 bitflips;
177 u32 failed;
178 };
179
180 struct aml_sfc_caps {
181 struct aml_sfc_ecc_cfg *ecc_caps;
182 u32 num_ecc_caps;
183 };
184
185 struct aml_sfc {
186 struct device *dev;
187 struct clk *gate_clk;
188 struct clk *core_clk;
189 struct spi_controller *ctrl;
190 struct regmap *regmap_base;
191 const struct aml_sfc_caps *caps;
192 struct nand_ecc_engine ecc_eng;
193 struct aml_ecc_stats ecc_stats;
194 dma_addr_t daddr;
195 dma_addr_t iaddr;
196 u32 info_bytes;
197 u32 bus_rate;
198 u32 flags;
199 u32 rx_adj;
200 u32 cs_sel;
201 u8 *data_buf;
202 __le64 *info_buf;
203 u8 *priv;
204 };
205
206 #define AML_ECC_DATA(sz, s, b) { .stepsize = (sz), .strength = (s), .bch = (b) }
207
208 static struct aml_sfc_ecc_cfg aml_a113l2_ecc_caps[] = {
209 AML_ECC_DATA(512, 8, ECC_BCH8_512),
210 AML_ECC_DATA(1024, 8, ECC_BCH8_1K),
211 };
212
213 static const struct aml_sfc_caps aml_a113l2_sfc_caps = {
214 .ecc_caps = aml_a113l2_ecc_caps,
215 .num_ecc_caps = ARRAY_SIZE(aml_a113l2_ecc_caps)
216 };
217
nand_to_aml_sfc(struct nand_device * nand)218 static struct aml_sfc *nand_to_aml_sfc(struct nand_device *nand)
219 {
220 struct nand_ecc_engine *eng = nand->ecc.engine;
221
222 return container_of(eng, struct aml_sfc, ecc_eng);
223 }
224
aml_sfc_to_ecc_ctx(struct aml_sfc * sfc)225 static inline void *aml_sfc_to_ecc_ctx(struct aml_sfc *sfc)
226 {
227 return sfc->priv;
228 }
229
aml_sfc_wait_cmd_finish(struct aml_sfc * sfc,u64 timeout_ms)230 static int aml_sfc_wait_cmd_finish(struct aml_sfc *sfc, u64 timeout_ms)
231 {
232 u32 cmd_size = 0;
233 int ret;
234
235 /*
236 * The SPINAND flash controller employs a two-stage pipeline:
237 * 1) command prefetch; 2) command execution.
238 *
239 * All commands are stored in the FIFO, with one prefetched for execution.
240 *
241 * There are cases where the FIFO is detected as empty, yet a command may
242 * still be in execution and a prefetched command pending execution.
243 *
244 * So, send two idle commands to ensure all previous commands have
245 * been executed.
246 */
247 regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0));
248 regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0));
249
250 /* Wait for the FIFO to empty. */
251 ret = regmap_read_poll_timeout(sfc->regmap_base, SFC_CMD, cmd_size,
252 !GET_CMD_SIZE(cmd_size),
253 10, timeout_ms * 1000);
254 if (ret)
255 dev_err(sfc->dev, "wait for empty CMD FIFO time out\n");
256
257 return ret;
258 }
259
aml_sfc_pre_transfer(struct aml_sfc * sfc,u32 idle_cycle,u32 cs2clk_cycle)260 static int aml_sfc_pre_transfer(struct aml_sfc *sfc, u32 idle_cycle, u32 cs2clk_cycle)
261 {
262 int ret;
263
264 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(CS_NONE, idle_cycle));
265 if (ret)
266 return ret;
267
268 return regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, cs2clk_cycle));
269 }
270
aml_sfc_end_transfer(struct aml_sfc * sfc,u32 clk2cs_cycle)271 static int aml_sfc_end_transfer(struct aml_sfc *sfc, u32 clk2cs_cycle)
272 {
273 int ret;
274
275 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, clk2cs_cycle));
276 if (ret)
277 return ret;
278
279 return aml_sfc_wait_cmd_finish(sfc, 0);
280 }
281
aml_sfc_set_bus_width(struct aml_sfc * sfc,u8 buswidth,u32 mask)282 static int aml_sfc_set_bus_width(struct aml_sfc *sfc, u8 buswidth, u32 mask)
283 {
284 int i;
285 u32 conf = 0;
286
287 for (i = 0; i <= LANE_MAX; i++) {
288 if (buswidth == 1 << i) {
289 conf = i << __ffs(mask);
290 return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
291 mask, conf);
292 }
293 }
294
295 return 0;
296 }
297
aml_sfc_send_cmd(struct aml_sfc * sfc,const struct spi_mem_op * op)298 static int aml_sfc_send_cmd(struct aml_sfc *sfc, const struct spi_mem_op *op)
299 {
300 int i, ret;
301 u8 val;
302
303 ret = aml_sfc_set_bus_width(sfc, op->cmd.buswidth, CMD_LANE);
304 if (ret)
305 return ret;
306
307 for (i = 0; i < op->cmd.nbytes; i++) {
308 val = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
309 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_COMMAND(sfc->cs_sel, val));
310 if (ret)
311 return ret;
312 }
313
314 return 0;
315 }
316
aml_sfc_send_addr(struct aml_sfc * sfc,const struct spi_mem_op * op)317 static int aml_sfc_send_addr(struct aml_sfc *sfc, const struct spi_mem_op *op)
318 {
319 int i, ret;
320 u8 val;
321
322 ret = aml_sfc_set_bus_width(sfc, op->addr.buswidth, ADDR_LANE);
323 if (ret)
324 return ret;
325
326 for (i = 0; i < op->addr.nbytes; i++) {
327 val = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
328
329 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_ADDR(sfc->cs_sel, val));
330 if (ret)
331 return ret;
332 }
333
334 return 0;
335 }
336
aml_sfc_is_xio_op(const struct spi_mem_op * op)337 static bool aml_sfc_is_xio_op(const struct spi_mem_op *op)
338 {
339 switch (op->cmd.opcode) {
340 case SPIFLASH_RD_OCTALIO:
341 case SPIFLASH_RD_QUADIO:
342 case SPIFLASH_RD_DUALIO:
343 return true;
344 default:
345 break;
346 }
347
348 return false;
349 }
350
aml_sfc_send_cmd_addr_dummy(struct aml_sfc * sfc,const struct spi_mem_op * op)351 static int aml_sfc_send_cmd_addr_dummy(struct aml_sfc *sfc, const struct spi_mem_op *op)
352 {
353 u32 dummy_cycle, cmd;
354 int ret;
355
356 ret = aml_sfc_send_cmd(sfc, op);
357 if (ret)
358 return ret;
359
360 ret = aml_sfc_send_addr(sfc, op);
361 if (ret)
362 return ret;
363
364 if (op->dummy.nbytes) {
365 /* Dummy buswidth configuration is not supported */
366 if (aml_sfc_is_xio_op(op))
367 dummy_cycle = op->dummy.nbytes * 8 / op->data.buswidth;
368 else
369 dummy_cycle = op->dummy.nbytes * 8;
370 cmd = CMD_DUMMY(sfc->cs_sel, dummy_cycle - 1);
371 return regmap_write(sfc->regmap_base, SFC_CMD, cmd);
372 }
373
374 return 0;
375 }
376
aml_sfc_is_snand_hwecc_page_op(struct aml_sfc * sfc,const struct spi_mem_op * op)377 static bool aml_sfc_is_snand_hwecc_page_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
378 {
379 switch (op->cmd.opcode) {
380 /* SPINAND read from cache cmd */
381 case SPIFLASH_RD_QUADIO:
382 case SPIFLASH_RD_QUAD:
383 case SPIFLASH_RD_DUALIO:
384 case SPIFLASH_RD_DUAL:
385 case SPIFLASH_RD_FAST:
386 case SPIFLASH_RD:
387 /* SPINAND write to cache cmd */
388 case SPIFLASH_WR_QUAD:
389 case SPIFLASH_WR:
390 case SPIFLASH_UP_QUAD:
391 case SPIFLASH_UP:
392 if (sfc->flags & SFC_HWECC)
393 return true;
394 else
395 return false;
396 default:
397 break;
398 }
399
400 return false;
401 }
402
aml_sfc_dma_buffer_setup(struct aml_sfc * sfc,void * databuf,int datalen,void * infobuf,int infolen,enum dma_data_direction dir)403 static int aml_sfc_dma_buffer_setup(struct aml_sfc *sfc, void *databuf,
404 int datalen, void *infobuf, int infolen,
405 enum dma_data_direction dir)
406 {
407 u32 cmd = 0;
408 int ret;
409
410 sfc->daddr = dma_map_single(sfc->dev, databuf, datalen, dir);
411 ret = dma_mapping_error(sfc->dev, sfc->daddr);
412 if (ret) {
413 dev_err(sfc->dev, "DMA mapping error\n");
414 goto out_map_data;
415 }
416
417 cmd = CMD_DATA_ADDRL(sfc->daddr);
418 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
419 if (ret)
420 goto out_map_data;
421
422 cmd = CMD_DATA_ADDRH(sfc->daddr);
423 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
424 if (ret)
425 goto out_map_data;
426
427 if (infobuf) {
428 sfc->iaddr = dma_map_single(sfc->dev, infobuf, infolen, dir);
429 ret = dma_mapping_error(sfc->dev, sfc->iaddr);
430 if (ret) {
431 dev_err(sfc->dev, "DMA mapping error\n");
432 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
433 goto out_map_data;
434 }
435
436 sfc->info_bytes = infolen;
437 cmd = CMD_INFO_ADDRL(sfc->iaddr);
438 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
439 if (ret)
440 goto out_map_info;
441
442 cmd = CMD_INFO_ADDRH(sfc->iaddr);
443 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
444 if (ret)
445 goto out_map_info;
446 }
447
448 return 0;
449
450 out_map_info:
451 dma_unmap_single(sfc->dev, sfc->iaddr, datalen, dir);
452 out_map_data:
453 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
454
455 return ret;
456 }
457
aml_sfc_dma_buffer_release(struct aml_sfc * sfc,int datalen,int infolen,enum dma_data_direction dir)458 static void aml_sfc_dma_buffer_release(struct aml_sfc *sfc,
459 int datalen, int infolen,
460 enum dma_data_direction dir)
461 {
462 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
463 if (infolen) {
464 dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir);
465 sfc->info_bytes = 0;
466 }
467 }
468
aml_sfc_dma_buffer_is_safe(const void * buffer)469 static bool aml_sfc_dma_buffer_is_safe(const void *buffer)
470 {
471 if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
472 return false;
473
474 if (virt_addr_valid(buffer))
475 return true;
476
477 return false;
478 }
479
aml_get_dma_safe_input_buf(const struct spi_mem_op * op)480 static void *aml_get_dma_safe_input_buf(const struct spi_mem_op *op)
481 {
482 if (aml_sfc_dma_buffer_is_safe(op->data.buf.in))
483 return op->data.buf.in;
484
485 return kzalloc(op->data.nbytes, GFP_KERNEL);
486 }
487
aml_sfc_put_dma_safe_input_buf(const struct spi_mem_op * op,void * buf)488 static void aml_sfc_put_dma_safe_input_buf(const struct spi_mem_op *op, void *buf)
489 {
490 if (WARN_ON(op->data.dir != SPI_MEM_DATA_IN) || WARN_ON(!buf))
491 return;
492
493 if (buf == op->data.buf.in)
494 return;
495
496 memcpy(op->data.buf.in, buf, op->data.nbytes);
497 kfree(buf);
498 }
499
aml_sfc_get_dma_safe_output_buf(const struct spi_mem_op * op)500 static void *aml_sfc_get_dma_safe_output_buf(const struct spi_mem_op *op)
501 {
502 if (aml_sfc_dma_buffer_is_safe(op->data.buf.out))
503 return (void *)op->data.buf.out;
504
505 return kmemdup(op->data.buf.out, op->data.nbytes, GFP_KERNEL);
506 }
507
aml_sfc_put_dma_safe_output_buf(const struct spi_mem_op * op,const void * buf)508 static void aml_sfc_put_dma_safe_output_buf(const struct spi_mem_op *op, const void *buf)
509 {
510 if (WARN_ON(op->data.dir != SPI_MEM_DATA_OUT) || WARN_ON(!buf))
511 return;
512
513 if (buf != op->data.buf.out)
514 kfree(buf);
515 }
516
aml_sfc_cal_timeout_cycle(struct aml_sfc * sfc,const struct spi_mem_op * op)517 static u64 aml_sfc_cal_timeout_cycle(struct aml_sfc *sfc, const struct spi_mem_op *op)
518 {
519 u64 ms;
520
521 /* For each byte we wait for (8 cycles / buswidth) of the SPI clock. */
522 ms = 8 * MSEC_PER_SEC * op->data.nbytes / op->data.buswidth;
523 do_div(ms, sfc->bus_rate / DEFAULT_BUS_CYCLE);
524
525 /*
526 * Double the value and add a 200 ms tolerance to compensate for
527 * the impact of specific CS hold time, CS setup time sequences,
528 * controller burst gaps, and other related timing variations.
529 */
530 ms += ms + 200;
531
532 if (ms > UINT_MAX)
533 ms = UINT_MAX;
534
535 return ms;
536 }
537
aml_sfc_check_ecc_pages_valid(struct aml_sfc * sfc,bool raw)538 static void aml_sfc_check_ecc_pages_valid(struct aml_sfc *sfc, bool raw)
539 {
540 struct aml_sfc_ecc_cfg *ecc_cfg;
541 __le64 *info;
542 int ret;
543
544 info = sfc->info_buf;
545 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
546 info += raw ? 0 : ecc_cfg->nsteps - 1;
547
548 do {
549 usleep_range(10, 15);
550 /* info is updated by nfc dma engine*/
551 smp_rmb();
552 dma_sync_single_for_cpu(sfc->dev, sfc->iaddr, sfc->info_bytes,
553 DMA_FROM_DEVICE);
554 ret = le64_to_cpu(*info) & ECC_COMPLETE;
555 } while (!ret);
556 }
557
aml_sfc_raw_io_op(struct aml_sfc * sfc,const struct spi_mem_op * op)558 static int aml_sfc_raw_io_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
559 {
560 void *buf = NULL;
561 int ret;
562 bool is_datain = false;
563 u32 cmd = 0, conf;
564 u64 timeout_ms;
565
566 if (!op->data.nbytes)
567 goto end_xfer;
568
569 conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE);
570 ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf);
571 if (ret)
572 goto err_out;
573
574 if (op->data.dir == SPI_MEM_DATA_IN) {
575 is_datain = true;
576
577 buf = aml_get_dma_safe_input_buf(op);
578 if (!buf) {
579 ret = -ENOMEM;
580 goto err_out;
581 }
582
583 cmd |= CMD_NAND2MEM(0, (op->data.nbytes & RAW_SIZE));
584 } else if (op->data.dir == SPI_MEM_DATA_OUT) {
585 is_datain = false;
586
587 buf = aml_sfc_get_dma_safe_output_buf(op);
588 if (!buf) {
589 ret = -ENOMEM;
590 goto err_out;
591 }
592
593 cmd |= CMD_MEM2NAND(0, (op->data.nbytes & RAW_SIZE));
594 } else {
595 goto end_xfer;
596 }
597
598 ret = aml_sfc_dma_buffer_setup(sfc, buf, op->data.nbytes,
599 is_datain ? sfc->info_buf : NULL,
600 is_datain ? ECC_PER_INFO_BYTE : 0,
601 is_datain ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
602 if (ret)
603 goto err_out;
604
605 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
606 if (ret)
607 goto err_out;
608
609 timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
610 ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
611 if (ret)
612 goto err_out;
613
614 if (is_datain)
615 aml_sfc_check_ecc_pages_valid(sfc, 1);
616
617 if (op->data.dir == SPI_MEM_DATA_IN)
618 aml_sfc_put_dma_safe_input_buf(op, buf);
619 else if (op->data.dir == SPI_MEM_DATA_OUT)
620 aml_sfc_put_dma_safe_output_buf(op, buf);
621
622 aml_sfc_dma_buffer_release(sfc, op->data.nbytes,
623 is_datain ? ECC_PER_INFO_BYTE : 0,
624 is_datain ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
625
626 end_xfer:
627 return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
628
629 err_out:
630 return ret;
631 }
632
aml_sfc_set_user_byte(struct aml_sfc * sfc,__le64 * info_buf,u8 * oob_buf,bool auto_oob)633 static void aml_sfc_set_user_byte(struct aml_sfc *sfc, __le64 *info_buf, u8 *oob_buf, bool auto_oob)
634 {
635 struct aml_sfc_ecc_cfg *ecc_cfg;
636 __le64 *info;
637 int i, count, step_size;
638
639 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
640
641 step_size = auto_oob ? ECC_BCH8_INFO_BYTES : ECC_BCH8_USER_BYTES;
642
643 for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += step_size) {
644 info = &info_buf[i];
645 *info &= cpu_to_le64(~0xffff);
646 *info |= cpu_to_le64((oob_buf[count + 1] << 8) + oob_buf[count]);
647 }
648 }
649
aml_sfc_get_user_byte(struct aml_sfc * sfc,__le64 * info_buf,u8 * oob_buf)650 static void aml_sfc_get_user_byte(struct aml_sfc *sfc, __le64 *info_buf, u8 *oob_buf)
651 {
652 struct aml_sfc_ecc_cfg *ecc_cfg;
653 __le64 *info;
654 int i, count;
655
656 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
657
658 for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += ECC_BCH8_INFO_BYTES) {
659 info = &info_buf[i];
660 oob_buf[count] = le64_to_cpu(*info);
661 oob_buf[count + 1] = le64_to_cpu(*info) >> 8;
662 }
663 }
664
aml_sfc_check_hwecc_status(struct aml_sfc * sfc,__le64 * info_buf)665 static int aml_sfc_check_hwecc_status(struct aml_sfc *sfc, __le64 *info_buf)
666 {
667 struct aml_sfc_ecc_cfg *ecc_cfg;
668 __le64 *info;
669 u32 i, max_bitflips = 0, per_sector_bitflips = 0;
670
671 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
672
673 sfc->ecc_stats.failed = 0;
674 sfc->ecc_stats.bitflips = 0;
675 sfc->ecc_stats.corrected = 0;
676
677 for (i = 0, info = info_buf; i < ecc_cfg->nsteps; i++, info++) {
678 if (ECC_ERR_CNT(le64_to_cpu(*info)) != ECC_UNCORRECTABLE) {
679 per_sector_bitflips = ECC_ERR_CNT(le64_to_cpu(*info));
680 max_bitflips = max_t(u32, max_bitflips, per_sector_bitflips);
681 sfc->ecc_stats.corrected += per_sector_bitflips;
682 continue;
683 }
684
685 return -EBADMSG;
686 }
687
688 return max_bitflips;
689 }
690
aml_sfc_read_page_hwecc(struct aml_sfc * sfc,const struct spi_mem_op * op)691 static int aml_sfc_read_page_hwecc(struct aml_sfc *sfc, const struct spi_mem_op *op)
692 {
693 struct aml_sfc_ecc_cfg *ecc_cfg;
694 int ret, data_len, info_len;
695 u32 page_size, cmd = 0;
696 u64 timeout_ms;
697
698 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
699
700 page_size = ecc_cfg->stepsize * ecc_cfg->nsteps;
701 data_len = page_size + ecc_cfg->oobsize;
702 info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE;
703
704 ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len,
705 sfc->info_buf, info_len, DMA_FROM_DEVICE);
706 if (ret)
707 goto err_out;
708
709 cmd |= CMD_NAND2MEM(ecc_cfg->bch, ecc_cfg->nsteps);
710 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
711 if (ret)
712 goto err_out;
713
714 timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
715 ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
716 if (ret)
717 goto err_out;
718
719 aml_sfc_check_ecc_pages_valid(sfc, 0);
720 aml_sfc_dma_buffer_release(sfc, data_len, info_len, DMA_FROM_DEVICE);
721
722 /* check ecc status here */
723 ret = aml_sfc_check_hwecc_status(sfc, sfc->info_buf);
724 if (ret < 0)
725 sfc->ecc_stats.failed++;
726 else
727 sfc->ecc_stats.bitflips = ret;
728
729 if (sfc->flags & SFC_DATA_ONLY) {
730 memcpy(op->data.buf.in, sfc->data_buf, page_size);
731 } else if (sfc->flags & SFC_OOB_ONLY) {
732 aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in);
733 } else if (sfc->flags & SFC_DATA_OOB) {
734 memcpy(op->data.buf.in, sfc->data_buf, page_size);
735 aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in + page_size);
736 }
737
738 return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
739
740 err_out:
741 return ret;
742 }
743
aml_sfc_write_page_hwecc(struct aml_sfc * sfc,const struct spi_mem_op * op)744 static int aml_sfc_write_page_hwecc(struct aml_sfc *sfc, const struct spi_mem_op *op)
745 {
746 struct aml_sfc_ecc_cfg *ecc_cfg;
747 int ret, data_len, info_len;
748 u32 page_size, cmd = 0;
749 u64 timeout_ms;
750
751 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
752
753 page_size = ecc_cfg->stepsize * ecc_cfg->nsteps;
754 data_len = page_size + ecc_cfg->oobsize;
755 info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE;
756
757 memset(sfc->info_buf, ECC_PATTERN, ecc_cfg->oobsize);
758 memcpy(sfc->data_buf, op->data.buf.out, page_size);
759
760 if (!(sfc->flags & SFC_DATA_ONLY)) {
761 if (sfc->flags & SFC_AUTO_OOB)
762 aml_sfc_set_user_byte(sfc, sfc->info_buf,
763 (u8 *)op->data.buf.out + page_size, 1);
764 else
765 aml_sfc_set_user_byte(sfc, sfc->info_buf,
766 (u8 *)op->data.buf.out + page_size, 0);
767 }
768
769 ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len,
770 sfc->info_buf, info_len, DMA_TO_DEVICE);
771 if (ret)
772 goto err_out;
773
774 cmd |= CMD_MEM2NAND(ecc_cfg->bch, ecc_cfg->nsteps);
775 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
776 if (ret)
777 goto err_out;
778
779 timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
780
781 ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
782 if (ret)
783 goto err_out;
784
785 aml_sfc_dma_buffer_release(sfc, data_len, info_len, DMA_TO_DEVICE);
786
787 return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
788
789 err_out:
790 return ret;
791 }
792
aml_sfc_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)793 static int aml_sfc_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
794 {
795 struct aml_sfc *sfc;
796 struct spi_device *spi;
797 struct aml_sfc_ecc_cfg *ecc_cfg;
798 int ret;
799
800 sfc = spi_controller_get_devdata(mem->spi->controller);
801 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
802 spi = mem->spi;
803 sfc->cs_sel = spi->chip_select[0] ? CS_1 : CS_0;
804
805 dev_dbg(sfc->dev, "cmd:0x%02x - addr:%08llX@%d:%u - dummy:%d:%u - data:%d:%u",
806 op->cmd.opcode, op->addr.val, op->addr.buswidth, op->addr.nbytes,
807 op->dummy.buswidth, op->dummy.nbytes, op->data.buswidth, op->data.nbytes);
808
809 ret = aml_sfc_pre_transfer(sfc, DEFAULT_PULLUP_CYCLE, CS_SETUP_CYCLE);
810 if (ret)
811 return ret;
812
813 ret = aml_sfc_send_cmd_addr_dummy(sfc, op);
814 if (ret)
815 return ret;
816
817 ret = aml_sfc_set_bus_width(sfc, op->data.buswidth, DATA_LANE);
818 if (ret)
819 return ret;
820
821 if (aml_sfc_is_snand_hwecc_page_op(sfc, op) &&
822 ecc_cfg && !(sfc->flags & SFC_RAW_RW)) {
823 if (op->data.dir == SPI_MEM_DATA_IN)
824 return aml_sfc_read_page_hwecc(sfc, op);
825 else
826 return aml_sfc_write_page_hwecc(sfc, op);
827 }
828
829 return aml_sfc_raw_io_op(sfc, op);
830 }
831
aml_sfc_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)832 static int aml_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
833 {
834 struct aml_sfc *sfc;
835 struct aml_sfc_ecc_cfg *ecc_cfg;
836
837 sfc = spi_controller_get_devdata(mem->spi->controller);
838 ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
839
840 if (aml_sfc_is_snand_hwecc_page_op(sfc, op) && ecc_cfg) {
841 if (op->data.nbytes > ecc_cfg->stepsize * ECC_BCH_MAX_SECT_SIZE)
842 return -EOPNOTSUPP;
843 } else if (op->data.nbytes & ~RAW_MAX_RW_SIZE_MASK) {
844 return -EOPNOTSUPP;
845 }
846
847 return 0;
848 }
849
850 static const struct spi_controller_mem_ops aml_sfc_mem_ops = {
851 .adjust_op_size = aml_sfc_adjust_op_size,
852 .exec_op = aml_sfc_exec_op,
853 };
854
aml_sfc_layout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)855 static int aml_sfc_layout_ecc(struct mtd_info *mtd, int section,
856 struct mtd_oob_region *oobregion)
857 {
858 struct nand_device *nand = mtd_to_nanddev(mtd);
859
860 if (section >= nand->ecc.ctx.nsteps)
861 return -ERANGE;
862
863 oobregion->offset = ECC_BCH8_USER_BYTES + (section * ECC_BCH8_INFO_BYTES);
864 oobregion->length = ECC_BCH8_PARITY_BYTES;
865
866 return 0;
867 }
868
aml_sfc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)869 static int aml_sfc_ooblayout_free(struct mtd_info *mtd, int section,
870 struct mtd_oob_region *oobregion)
871 {
872 struct nand_device *nand = mtd_to_nanddev(mtd);
873
874 if (section >= nand->ecc.ctx.nsteps)
875 return -ERANGE;
876
877 oobregion->offset = section * ECC_BCH8_INFO_BYTES;
878 oobregion->length = ECC_BCH8_USER_BYTES;
879
880 return 0;
881 }
882
883 static const struct mtd_ooblayout_ops aml_sfc_ooblayout_ops = {
884 .ecc = aml_sfc_layout_ecc,
885 .free = aml_sfc_ooblayout_free,
886 };
887
aml_spi_settings(struct aml_sfc * sfc,struct spi_device * spi)888 static int aml_spi_settings(struct aml_sfc *sfc, struct spi_device *spi)
889 {
890 u32 conf = 0;
891
892 if (spi->mode & SPI_CPHA)
893 conf |= CPHA;
894
895 if (spi->mode & SPI_CPOL)
896 conf |= CPOL;
897
898 conf |= FIELD_PREP(RXADJ, sfc->rx_adj);
899 conf |= EN_HOLD | EN_WP;
900 return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
901 CPHA | CPOL | RXADJ |
902 EN_HOLD | EN_WP, conf);
903 }
904
aml_set_spi_clk(struct aml_sfc * sfc,struct spi_device * spi)905 static int aml_set_spi_clk(struct aml_sfc *sfc, struct spi_device *spi)
906 {
907 u32 speed_hz;
908 int ret;
909
910 if (spi->max_speed_hz > SFC_MAX_FREQUENCY)
911 speed_hz = SFC_MAX_FREQUENCY;
912 else if (!spi->max_speed_hz)
913 speed_hz = SFC_BUS_DEFAULT_CLK;
914 else if (spi->max_speed_hz < SFC_MIN_FREQUENCY)
915 speed_hz = SFC_MIN_FREQUENCY;
916 else
917 speed_hz = spi->max_speed_hz;
918
919 /* The SPI clock is generated by dividing the bus clock by four by default. */
920 ret = regmap_write(sfc->regmap_base, SFC_CFG, (DEFAULT_BUS_CYCLE - 1));
921 if (ret) {
922 dev_err(sfc->dev, "failed to set bus cycle\n");
923 return ret;
924 }
925
926 return clk_set_rate(sfc->core_clk, speed_hz * DEFAULT_BUS_CYCLE);
927 }
928
aml_sfc_setup(struct spi_device * spi)929 static int aml_sfc_setup(struct spi_device *spi)
930 {
931 struct aml_sfc *sfc;
932 int ret;
933
934 sfc = spi_controller_get_devdata(spi->controller);
935 ret = aml_spi_settings(sfc, spi);
936 if (ret)
937 return ret;
938
939 ret = aml_set_spi_clk(sfc, spi);
940 if (ret)
941 return ret;
942
943 sfc->bus_rate = clk_get_rate(sfc->core_clk);
944
945 return 0;
946 }
947
aml_sfc_ecc_init_ctx(struct nand_device * nand)948 static int aml_sfc_ecc_init_ctx(struct nand_device *nand)
949 {
950 struct mtd_info *mtd = nanddev_to_mtd(nand);
951 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
952 struct aml_sfc_ecc_cfg *ecc_cfg;
953 const struct aml_sfc_caps *caps = sfc->caps;
954 struct aml_sfc_ecc_cfg *ecc_caps = caps->ecc_caps;
955 int i, ecc_strength, ecc_step_size;
956
957 ecc_step_size = nand->ecc.user_conf.step_size;
958 ecc_strength = nand->ecc.user_conf.strength;
959
960 for (i = 0; i < caps->num_ecc_caps; i++) {
961 if (ecc_caps[i].stepsize == ecc_step_size) {
962 nand->ecc.ctx.conf.step_size = ecc_step_size;
963 nand->ecc.ctx.conf.flags |= BIT(ecc_caps[i].bch);
964 }
965
966 if (ecc_caps[i].strength == ecc_strength)
967 nand->ecc.ctx.conf.strength = ecc_strength;
968 }
969
970 if (!nand->ecc.ctx.conf.step_size) {
971 nand->ecc.ctx.conf.step_size = ECC_BCH8_DEFAULT_STEP;
972 nand->ecc.ctx.conf.flags |= BIT(ECC_DEFAULT_BCH_MODE);
973 }
974
975 if (!nand->ecc.ctx.conf.strength)
976 nand->ecc.ctx.conf.strength = ECC_BCH8_STRENGTH;
977
978 nand->ecc.ctx.nsteps = nand->memorg.pagesize / nand->ecc.ctx.conf.step_size;
979 nand->ecc.ctx.total = nand->ecc.ctx.nsteps * ECC_BCH8_PARITY_BYTES;
980
981 /* Verify the page size and OOB size against the SFC requirements. */
982 if ((nand->memorg.pagesize % nand->ecc.ctx.conf.step_size) ||
983 (nand->memorg.oobsize < (nand->ecc.ctx.total +
984 nand->ecc.ctx.nsteps * ECC_BCH8_USER_BYTES)))
985 return -EOPNOTSUPP;
986
987 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
988
989 ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
990 if (!ecc_cfg)
991 return -ENOMEM;
992
993 ecc_cfg->stepsize = nand->ecc.ctx.conf.step_size;
994 ecc_cfg->nsteps = nand->ecc.ctx.nsteps;
995 ecc_cfg->strength = nand->ecc.ctx.conf.strength;
996 ecc_cfg->oobsize = nand->memorg.oobsize;
997 ecc_cfg->bch = nand->ecc.ctx.conf.flags & BIT(ECC_DEFAULT_BCH_MODE) ? 1 : 2;
998
999 nand->ecc.ctx.priv = ecc_cfg;
1000 sfc->priv = (void *)ecc_cfg;
1001 mtd_set_ooblayout(mtd, &aml_sfc_ooblayout_ops);
1002
1003 sfc->flags |= SFC_HWECC;
1004
1005 return 0;
1006 }
1007
aml_sfc_ecc_cleanup_ctx(struct nand_device * nand)1008 static void aml_sfc_ecc_cleanup_ctx(struct nand_device *nand)
1009 {
1010 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
1011
1012 sfc->flags &= ~(SFC_HWECC);
1013 kfree(nand->ecc.ctx.priv);
1014 sfc->priv = NULL;
1015 }
1016
aml_sfc_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)1017 static int aml_sfc_ecc_prepare_io_req(struct nand_device *nand,
1018 struct nand_page_io_req *req)
1019 {
1020 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
1021 struct spinand_device *spinand = nand_to_spinand(nand);
1022
1023 sfc->flags &= ~SFC_XFER_MDOE_MASK;
1024
1025 if (req->datalen && !req->ooblen)
1026 sfc->flags |= SFC_DATA_ONLY;
1027 else if (!req->datalen && req->ooblen)
1028 sfc->flags |= SFC_OOB_ONLY;
1029 else if (req->datalen && req->ooblen)
1030 sfc->flags |= SFC_DATA_OOB;
1031
1032 if (req->mode == MTD_OPS_RAW)
1033 sfc->flags |= SFC_RAW_RW;
1034 else if (req->mode == MTD_OPS_AUTO_OOB)
1035 sfc->flags |= SFC_AUTO_OOB;
1036
1037 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
1038
1039 return 0;
1040 }
1041
aml_sfc_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)1042 static int aml_sfc_ecc_finish_io_req(struct nand_device *nand,
1043 struct nand_page_io_req *req)
1044 {
1045 struct aml_sfc *sfc = nand_to_aml_sfc(nand);
1046 struct mtd_info *mtd = nanddev_to_mtd(nand);
1047
1048 if (req->mode == MTD_OPS_RAW || req->type == NAND_PAGE_WRITE)
1049 return 0;
1050
1051 if (sfc->ecc_stats.failed)
1052 mtd->ecc_stats.failed++;
1053
1054 mtd->ecc_stats.corrected += sfc->ecc_stats.corrected;
1055
1056 return sfc->ecc_stats.failed ? -EBADMSG : sfc->ecc_stats.bitflips;
1057 }
1058
1059 static const struct spi_controller_mem_caps aml_sfc_mem_caps = {
1060 .ecc = true,
1061 };
1062
1063 static const struct nand_ecc_engine_ops aml_sfc_ecc_engine_ops = {
1064 .init_ctx = aml_sfc_ecc_init_ctx,
1065 .cleanup_ctx = aml_sfc_ecc_cleanup_ctx,
1066 .prepare_io_req = aml_sfc_ecc_prepare_io_req,
1067 .finish_io_req = aml_sfc_ecc_finish_io_req,
1068 };
1069
aml_sfc_clk_init(struct aml_sfc * sfc)1070 static int aml_sfc_clk_init(struct aml_sfc *sfc)
1071 {
1072 sfc->gate_clk = devm_clk_get_enabled(sfc->dev, "gate");
1073 if (IS_ERR(sfc->gate_clk)) {
1074 dev_err(sfc->dev, "unable to enable gate clk\n");
1075 return PTR_ERR(sfc->gate_clk);
1076 }
1077
1078 sfc->core_clk = devm_clk_get_enabled(sfc->dev, "core");
1079 if (IS_ERR(sfc->core_clk)) {
1080 dev_err(sfc->dev, "unable to enable core clk\n");
1081 return PTR_ERR(sfc->core_clk);
1082 }
1083
1084 return clk_set_rate(sfc->core_clk, SFC_BUS_DEFAULT_CLK);
1085 }
1086
aml_sfc_disable_clk(struct aml_sfc * sfc)1087 static int aml_sfc_disable_clk(struct aml_sfc *sfc)
1088 {
1089 clk_disable_unprepare(sfc->core_clk);
1090 clk_disable_unprepare(sfc->gate_clk);
1091
1092 return 0;
1093 }
1094
aml_sfc_probe(struct platform_device * pdev)1095 static int aml_sfc_probe(struct platform_device *pdev)
1096 {
1097 struct device_node *np = pdev->dev.of_node;
1098 struct device *dev = &pdev->dev;
1099 struct spi_controller *ctrl;
1100 struct aml_sfc *sfc;
1101 void __iomem *reg_base;
1102 int ret;
1103 u32 val = 0;
1104
1105 const struct regmap_config core_config = {
1106 .reg_bits = 32,
1107 .val_bits = 32,
1108 .reg_stride = 4,
1109 .max_register = SFC_SPI_CFG,
1110 };
1111
1112 ctrl = devm_spi_alloc_host(dev, sizeof(*sfc));
1113 if (!ctrl)
1114 return -ENOMEM;
1115 platform_set_drvdata(pdev, ctrl);
1116
1117 sfc = spi_controller_get_devdata(ctrl);
1118 sfc->dev = dev;
1119 sfc->ctrl = ctrl;
1120
1121 sfc->caps = of_device_get_match_data(dev);
1122 if (!sfc->caps)
1123 return dev_err_probe(dev, -ENODEV, "failed to get device data\n");
1124
1125 reg_base = devm_platform_ioremap_resource(pdev, 0);
1126 if (IS_ERR(reg_base))
1127 return PTR_ERR(reg_base);
1128
1129 sfc->regmap_base = devm_regmap_init_mmio(dev, reg_base, &core_config);
1130 if (IS_ERR(sfc->regmap_base))
1131 return dev_err_probe(dev, PTR_ERR(sfc->regmap_base),
1132 "failed to init sfc base regmap\n");
1133
1134 sfc->data_buf = devm_kzalloc(dev, SFC_BUF_SIZE, GFP_KERNEL);
1135 if (!sfc->data_buf)
1136 return -ENOMEM;
1137 sfc->info_buf = (__le64 *)(sfc->data_buf + SFC_DATABUF_SIZE);
1138
1139 ret = aml_sfc_clk_init(sfc);
1140 if (ret)
1141 return dev_err_probe(dev, ret, "failed to initialize SFC clock\n");
1142
1143 /* Enable Amlogic flash controller spi mode */
1144 ret = regmap_write(sfc->regmap_base, SFC_SPI_CFG, SPI_MODE_EN);
1145 if (ret) {
1146 dev_err(dev, "failed to enable SPI mode\n");
1147 goto err_out;
1148 }
1149
1150 ret = dma_set_mask(sfc->dev, DMA_BIT_MASK(32));
1151 if (ret) {
1152 dev_err(sfc->dev, "failed to set dma mask\n");
1153 goto err_out;
1154 }
1155
1156 sfc->ecc_eng.dev = &pdev->dev;
1157 sfc->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
1158 sfc->ecc_eng.ops = &aml_sfc_ecc_engine_ops;
1159 sfc->ecc_eng.priv = sfc;
1160
1161 ret = nand_ecc_register_on_host_hw_engine(&sfc->ecc_eng);
1162 if (ret) {
1163 dev_err(&pdev->dev, "failed to register Aml host ecc engine.\n");
1164 goto err_out;
1165 }
1166
1167 ret = of_property_read_u32(np, "amlogic,rx-adj", &val);
1168 if (!ret)
1169 sfc->rx_adj = val;
1170
1171 ctrl->dev.of_node = np;
1172 ctrl->mem_ops = &aml_sfc_mem_ops;
1173 ctrl->mem_caps = &aml_sfc_mem_caps;
1174 ctrl->setup = aml_sfc_setup;
1175 ctrl->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD |
1176 SPI_RX_DUAL | SPI_TX_OCTAL | SPI_RX_OCTAL;
1177 ctrl->max_speed_hz = SFC_MAX_FREQUENCY;
1178 ctrl->min_speed_hz = SFC_MIN_FREQUENCY;
1179 ctrl->num_chipselect = SFC_MAX_CS_NUM;
1180
1181 ret = devm_spi_register_controller(dev, ctrl);
1182 if (ret)
1183 goto err_out;
1184
1185 return 0;
1186
1187 err_out:
1188 aml_sfc_disable_clk(sfc);
1189
1190 return ret;
1191 }
1192
aml_sfc_remove(struct platform_device * pdev)1193 static void aml_sfc_remove(struct platform_device *pdev)
1194 {
1195 struct spi_controller *ctlr = platform_get_drvdata(pdev);
1196 struct aml_sfc *sfc = spi_controller_get_devdata(ctlr);
1197
1198 aml_sfc_disable_clk(sfc);
1199 }
1200
1201 static const struct of_device_id aml_sfc_of_match[] = {
1202 {
1203 .compatible = "amlogic,a4-spifc",
1204 .data = &aml_a113l2_sfc_caps
1205 },
1206 {},
1207 };
1208 MODULE_DEVICE_TABLE(of, aml_sfc_of_match);
1209
1210 static struct platform_driver aml_sfc_driver = {
1211 .driver = {
1212 .name = "aml_sfc",
1213 .of_match_table = aml_sfc_of_match,
1214 },
1215 .probe = aml_sfc_probe,
1216 .remove = aml_sfc_remove,
1217 };
1218 module_platform_driver(aml_sfc_driver);
1219
1220 MODULE_DESCRIPTION("Amlogic SPI Flash Controller driver");
1221 MODULE_AUTHOR("Feng Chen <feng.chen@amlogic.com>");
1222 MODULE_LICENSE("Dual MIT/GPL");
1223