1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2017 ATMEL
4 * Copyright 2017 Free Electrons
5 *
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 *
8 * Derived from the atmel_nand.c driver which contained the following
9 * copyrights:
10 *
11 * Copyright 2003 Rick Bronson
12 *
13 * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
14 * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
15 *
16 * Derived from drivers/mtd/spia.c (removed in v3.8)
17 * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
18 *
19 *
20 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
21 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
22 *
23 * Derived from Das U-Boot source code
24 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
25 * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
26 *
27 * Add Programmable Multibit ECC support for various AT91 SoC
28 * Copyright 2012 ATMEL, Hong Xu
29 *
30 * Add Nand Flash Controller support for SAMA5 SoC
31 * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
32 *
33 * A few words about the naming convention in this file. This convention
34 * applies to structure and function names.
35 *
36 * Prefixes:
37 *
38 * - atmel_nand_: all generic structures/functions
39 * - atmel_smc_nand_: all structures/functions specific to the SMC interface
40 * (at91sam9 and avr32 SoCs)
41 * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
42 * (sama5 SoCs and later)
43 * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
44 * that is available in the HSMC block
45 * - <soc>_nand_: all SoC specific structures/functions
46 */
47
48 #include <linux/clk.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/dmaengine.h>
51 #include <linux/genalloc.h>
52 #include <linux/gpio/consumer.h>
53 #include <linux/interrupt.h>
54 #include <linux/mfd/syscon.h>
55 #include <linux/mfd/syscon/atmel-matrix.h>
56 #include <linux/mfd/syscon/atmel-smc.h>
57 #include <linux/module.h>
58 #include <linux/mtd/rawnand.h>
59 #include <linux/of_address.h>
60 #include <linux/of_irq.h>
61 #include <linux/of_platform.h>
62 #include <linux/iopoll.h>
63 #include <linux/platform_device.h>
64 #include <linux/regmap.h>
65 #include <soc/at91/atmel-sfr.h>
66
67 #include "pmecc.h"
68
69 #define ATMEL_HSMC_NFC_CFG 0x0
70 #define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
71 #define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
72 #define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
73 #define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
74 #define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
75 #define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
76 #define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
77 #define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
78 #define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
79 #define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
80
81 #define ATMEL_HSMC_NFC_CTRL 0x4
82 #define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
83 #define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
84
85 #define ATMEL_HSMC_NFC_SR 0x8
86 #define ATMEL_HSMC_NFC_IER 0xc
87 #define ATMEL_HSMC_NFC_IDR 0x10
88 #define ATMEL_HSMC_NFC_IMR 0x14
89 #define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
90 #define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
91 #define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
92 #define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
93 #define ATMEL_HSMC_NFC_SR_WR BIT(11)
94 #define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
95 #define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
96 #define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
97 #define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
98 #define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
99 #define ATMEL_HSMC_NFC_SR_AWB BIT(22)
100 #define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
101 #define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
102 ATMEL_HSMC_NFC_SR_UNDEF | \
103 ATMEL_HSMC_NFC_SR_AWB | \
104 ATMEL_HSMC_NFC_SR_NFCASE)
105 #define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
106
107 #define ATMEL_HSMC_NFC_ADDR 0x18
108 #define ATMEL_HSMC_NFC_BANK 0x1c
109
110 #define ATMEL_NFC_MAX_RB_ID 7
111
112 #define ATMEL_NFC_SRAM_SIZE 0x2400
113
114 #define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
115 #define ATMEL_NFC_VCMD2 BIT(18)
116 #define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
117 #define ATMEL_NFC_CSID(cs) ((cs) << 22)
118 #define ATMEL_NFC_DATAEN BIT(25)
119 #define ATMEL_NFC_NFCWR BIT(26)
120
121 #define ATMEL_NFC_MAX_ADDR_CYCLES 5
122
123 #define ATMEL_NAND_ALE_OFFSET BIT(21)
124 #define ATMEL_NAND_CLE_OFFSET BIT(22)
125
126 #define DEFAULT_TIMEOUT_MS 1000
127 #define MIN_DMA_LEN 128
128
129 static bool atmel_nand_avoid_dma __read_mostly;
130
131 MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
132 module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
133
134 enum atmel_nand_rb_type {
135 ATMEL_NAND_NO_RB,
136 ATMEL_NAND_NATIVE_RB,
137 ATMEL_NAND_GPIO_RB,
138 };
139
140 struct atmel_nand_rb {
141 enum atmel_nand_rb_type type;
142 union {
143 struct gpio_desc *gpio;
144 int id;
145 };
146 };
147
148 struct atmel_nand_cs {
149 int id;
150 struct atmel_nand_rb rb;
151 struct gpio_desc *csgpio;
152 struct {
153 void __iomem *virt;
154 dma_addr_t dma;
155 } io;
156
157 struct atmel_smc_cs_conf smcconf;
158 };
159
160 struct atmel_nand {
161 struct list_head node;
162 struct device *dev;
163 struct nand_chip base;
164 struct atmel_nand_cs *activecs;
165 struct atmel_pmecc_user *pmecc;
166 struct gpio_desc *cdgpio;
167 int numcs;
168 struct atmel_nand_cs cs[] __counted_by(numcs);
169 };
170
to_atmel_nand(struct nand_chip * chip)171 static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
172 {
173 return container_of(chip, struct atmel_nand, base);
174 }
175
176 enum atmel_nfc_data_xfer {
177 ATMEL_NFC_NO_DATA,
178 ATMEL_NFC_READ_DATA,
179 ATMEL_NFC_WRITE_DATA,
180 };
181
182 struct atmel_nfc_op {
183 u8 cs;
184 u8 ncmds;
185 u8 cmds[2];
186 u8 naddrs;
187 u8 addrs[5];
188 enum atmel_nfc_data_xfer data;
189 u32 wait;
190 u32 errors;
191 };
192
193 struct atmel_nand_controller;
194 struct atmel_nand_controller_caps;
195
196 struct atmel_nand_controller_ops {
197 int (*probe)(struct platform_device *pdev,
198 const struct atmel_nand_controller_caps *caps);
199 int (*remove)(struct atmel_nand_controller *nc);
200 void (*nand_init)(struct atmel_nand_controller *nc,
201 struct atmel_nand *nand);
202 int (*ecc_init)(struct nand_chip *chip);
203 int (*setup_interface)(struct atmel_nand *nand, int csline,
204 const struct nand_interface_config *conf);
205 int (*exec_op)(struct atmel_nand *nand,
206 const struct nand_operation *op, bool check_only);
207 };
208
209 struct atmel_nand_controller_caps {
210 bool has_dma;
211 bool legacy_of_bindings;
212 u32 ale_offs;
213 u32 cle_offs;
214 const char *ebi_csa_regmap_name;
215 const struct atmel_nand_controller_ops *ops;
216 };
217
218 struct atmel_nand_controller {
219 struct nand_controller base;
220 const struct atmel_nand_controller_caps *caps;
221 struct device *dev;
222 struct regmap *smc;
223 struct dma_chan *dmac;
224 struct atmel_pmecc *pmecc;
225 struct list_head chips;
226 struct clk *mck;
227 };
228
229 static inline struct atmel_nand_controller *
to_nand_controller(struct nand_controller * ctl)230 to_nand_controller(struct nand_controller *ctl)
231 {
232 return container_of(ctl, struct atmel_nand_controller, base);
233 }
234
235 struct atmel_smc_nand_ebi_csa_cfg {
236 u32 offs;
237 u32 nfd0_on_d16;
238 };
239
240 struct atmel_smc_nand_controller {
241 struct atmel_nand_controller base;
242 struct regmap *ebi_csa_regmap;
243 struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
244 };
245
246 static inline struct atmel_smc_nand_controller *
to_smc_nand_controller(struct nand_controller * ctl)247 to_smc_nand_controller(struct nand_controller *ctl)
248 {
249 return container_of(to_nand_controller(ctl),
250 struct atmel_smc_nand_controller, base);
251 }
252
253 struct atmel_hsmc_nand_controller {
254 struct atmel_nand_controller base;
255 struct {
256 struct gen_pool *pool;
257 void __iomem *virt;
258 dma_addr_t dma;
259 } sram;
260 const struct atmel_hsmc_reg_layout *hsmc_layout;
261 struct regmap *io;
262 struct atmel_nfc_op op;
263 struct completion complete;
264 u32 cfg;
265 int irq;
266
267 /* Only used when instantiating from legacy DT bindings. */
268 struct clk *clk;
269 };
270
271 static inline struct atmel_hsmc_nand_controller *
to_hsmc_nand_controller(struct nand_controller * ctl)272 to_hsmc_nand_controller(struct nand_controller *ctl)
273 {
274 return container_of(to_nand_controller(ctl),
275 struct atmel_hsmc_nand_controller, base);
276 }
277
atmel_nfc_op_done(struct atmel_nfc_op * op,u32 status)278 static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
279 {
280 op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
281 op->wait ^= status & op->wait;
282
283 return !op->wait || op->errors;
284 }
285
atmel_nfc_interrupt(int irq,void * data)286 static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
287 {
288 struct atmel_hsmc_nand_controller *nc = data;
289 u32 sr, rcvd;
290 bool done;
291
292 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
293
294 rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
295 done = atmel_nfc_op_done(&nc->op, sr);
296
297 if (rcvd)
298 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
299
300 if (done)
301 complete(&nc->complete);
302
303 return rcvd ? IRQ_HANDLED : IRQ_NONE;
304 }
305
atmel_nfc_wait(struct atmel_hsmc_nand_controller * nc,bool poll,unsigned int timeout_ms)306 static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
307 unsigned int timeout_ms)
308 {
309 int ret;
310
311 if (!timeout_ms)
312 timeout_ms = DEFAULT_TIMEOUT_MS;
313
314 if (poll) {
315 u32 status;
316
317 ret = regmap_read_poll_timeout(nc->base.smc,
318 ATMEL_HSMC_NFC_SR, status,
319 atmel_nfc_op_done(&nc->op,
320 status),
321 0, timeout_ms * 1000);
322 } else {
323 init_completion(&nc->complete);
324 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
325 nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
326 ret = wait_for_completion_timeout(&nc->complete,
327 msecs_to_jiffies(timeout_ms));
328 if (!ret)
329 ret = -ETIMEDOUT;
330 else
331 ret = 0;
332
333 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
334 }
335
336 if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
337 dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
338 ret = -ETIMEDOUT;
339 }
340
341 if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
342 dev_err(nc->base.dev, "Access to an undefined area\n");
343 ret = -EIO;
344 }
345
346 if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
347 dev_err(nc->base.dev, "Access while busy\n");
348 ret = -EIO;
349 }
350
351 if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
352 dev_err(nc->base.dev, "Wrong access size\n");
353 ret = -EIO;
354 }
355
356 return ret;
357 }
358
atmel_nand_dma_transfer_finished(void * data)359 static void atmel_nand_dma_transfer_finished(void *data)
360 {
361 struct completion *finished = data;
362
363 complete(finished);
364 }
365
atmel_nand_dma_transfer(struct atmel_nand_controller * nc,void * buf,dma_addr_t dev_dma,size_t len,enum dma_data_direction dir)366 static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
367 void *buf, dma_addr_t dev_dma, size_t len,
368 enum dma_data_direction dir)
369 {
370 DECLARE_COMPLETION_ONSTACK(finished);
371 dma_addr_t src_dma, dst_dma, buf_dma;
372 struct dma_async_tx_descriptor *tx;
373 dma_cookie_t cookie;
374
375 buf_dma = dma_map_single(nc->dev, buf, len, dir);
376 if (dma_mapping_error(nc->dev, buf_dma)) {
377 dev_err(nc->dev,
378 "Failed to prepare a buffer for DMA access\n");
379 goto err;
380 }
381
382 if (dir == DMA_FROM_DEVICE) {
383 src_dma = dev_dma;
384 dst_dma = buf_dma;
385 } else {
386 src_dma = buf_dma;
387 dst_dma = dev_dma;
388 }
389
390 tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
391 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
392 if (!tx) {
393 dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
394 goto err_unmap;
395 }
396
397 tx->callback = atmel_nand_dma_transfer_finished;
398 tx->callback_param = &finished;
399
400 cookie = dmaengine_submit(tx);
401 if (dma_submit_error(cookie)) {
402 dev_err(nc->dev, "Failed to do DMA tx_submit\n");
403 goto err_unmap;
404 }
405
406 dma_async_issue_pending(nc->dmac);
407 wait_for_completion(&finished);
408 dma_unmap_single(nc->dev, buf_dma, len, dir);
409
410 return 0;
411
412 err_unmap:
413 dma_unmap_single(nc->dev, buf_dma, len, dir);
414
415 err:
416 dev_dbg(nc->dev, "Fall back to CPU I/O\n");
417
418 return -EIO;
419 }
420
atmel_nfc_exec_op(struct atmel_hsmc_nand_controller * nc,bool poll)421 static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
422 {
423 u8 *addrs = nc->op.addrs;
424 unsigned int op = 0;
425 u32 addr, val;
426 int i, ret;
427
428 nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
429
430 for (i = 0; i < nc->op.ncmds; i++)
431 op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
432
433 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
434 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
435
436 op |= ATMEL_NFC_CSID(nc->op.cs) |
437 ATMEL_NFC_ACYCLE(nc->op.naddrs);
438
439 if (nc->op.ncmds > 1)
440 op |= ATMEL_NFC_VCMD2;
441
442 addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
443 (addrs[3] << 24);
444
445 if (nc->op.data != ATMEL_NFC_NO_DATA) {
446 op |= ATMEL_NFC_DATAEN;
447 nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
448
449 if (nc->op.data == ATMEL_NFC_WRITE_DATA)
450 op |= ATMEL_NFC_NFCWR;
451 }
452
453 /* Clear all flags. */
454 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
455
456 /* Send the command. */
457 regmap_write(nc->io, op, addr);
458
459 ret = atmel_nfc_wait(nc, poll, 0);
460 if (ret)
461 dev_err(nc->base.dev,
462 "Failed to send NAND command (err = %d)!",
463 ret);
464
465 /* Reset the op state. */
466 memset(&nc->op, 0, sizeof(nc->op));
467
468 return ret;
469 }
470
atmel_nand_data_in(struct atmel_nand * nand,void * buf,unsigned int len,bool force_8bit)471 static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
472 unsigned int len, bool force_8bit)
473 {
474 struct atmel_nand_controller *nc;
475
476 nc = to_nand_controller(nand->base.controller);
477
478 /*
479 * If the controller supports DMA, the buffer address is DMA-able and
480 * len is long enough to make DMA transfers profitable, let's trigger
481 * a DMA transfer. If it fails, fallback to PIO mode.
482 */
483 if (nc->dmac && virt_addr_valid(buf) &&
484 len >= MIN_DMA_LEN && !force_8bit &&
485 !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
486 DMA_FROM_DEVICE))
487 return;
488
489 if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
490 ioread16_rep(nand->activecs->io.virt, buf, len / 2);
491 else
492 ioread8_rep(nand->activecs->io.virt, buf, len);
493 }
494
atmel_nand_data_out(struct atmel_nand * nand,const void * buf,unsigned int len,bool force_8bit)495 static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
496 unsigned int len, bool force_8bit)
497 {
498 struct atmel_nand_controller *nc;
499
500 nc = to_nand_controller(nand->base.controller);
501
502 /*
503 * If the controller supports DMA, the buffer address is DMA-able and
504 * len is long enough to make DMA transfers profitable, let's trigger
505 * a DMA transfer. If it fails, fallback to PIO mode.
506 */
507 if (nc->dmac && virt_addr_valid(buf) &&
508 len >= MIN_DMA_LEN && !force_8bit &&
509 !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
510 len, DMA_TO_DEVICE))
511 return;
512
513 if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
514 iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
515 else
516 iowrite8_rep(nand->activecs->io.virt, buf, len);
517 }
518
atmel_nand_waitrdy(struct atmel_nand * nand,unsigned int timeout_ms)519 static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
520 {
521 if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
522 return nand_soft_waitrdy(&nand->base, timeout_ms);
523
524 return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
525 timeout_ms);
526 }
527
atmel_hsmc_nand_waitrdy(struct atmel_nand * nand,unsigned int timeout_ms)528 static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
529 unsigned int timeout_ms)
530 {
531 struct atmel_hsmc_nand_controller *nc;
532 u32 status, mask;
533
534 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
535 return atmel_nand_waitrdy(nand, timeout_ms);
536
537 nc = to_hsmc_nand_controller(nand->base.controller);
538 mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
539 return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
540 status, status & mask,
541 10, timeout_ms * 1000);
542 }
543
atmel_nand_select_target(struct atmel_nand * nand,unsigned int cs)544 static void atmel_nand_select_target(struct atmel_nand *nand,
545 unsigned int cs)
546 {
547 nand->activecs = &nand->cs[cs];
548 }
549
atmel_hsmc_nand_select_target(struct atmel_nand * nand,unsigned int cs)550 static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
551 unsigned int cs)
552 {
553 struct mtd_info *mtd = nand_to_mtd(&nand->base);
554 struct atmel_hsmc_nand_controller *nc;
555 u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
556 ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
557 ATMEL_HSMC_NFC_CFG_RSPARE;
558
559 nand->activecs = &nand->cs[cs];
560 nc = to_hsmc_nand_controller(nand->base.controller);
561 if (nc->cfg == cfg)
562 return;
563
564 regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
565 ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
566 ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
567 ATMEL_HSMC_NFC_CFG_RSPARE |
568 ATMEL_HSMC_NFC_CFG_WSPARE,
569 cfg);
570 nc->cfg = cfg;
571 }
572
atmel_smc_nand_exec_instr(struct atmel_nand * nand,const struct nand_op_instr * instr)573 static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
574 const struct nand_op_instr *instr)
575 {
576 struct atmel_nand_controller *nc;
577 unsigned int i;
578
579 nc = to_nand_controller(nand->base.controller);
580 switch (instr->type) {
581 case NAND_OP_CMD_INSTR:
582 writeb(instr->ctx.cmd.opcode,
583 nand->activecs->io.virt + nc->caps->cle_offs);
584 return 0;
585 case NAND_OP_ADDR_INSTR:
586 for (i = 0; i < instr->ctx.addr.naddrs; i++)
587 writeb(instr->ctx.addr.addrs[i],
588 nand->activecs->io.virt + nc->caps->ale_offs);
589 return 0;
590 case NAND_OP_DATA_IN_INSTR:
591 atmel_nand_data_in(nand, instr->ctx.data.buf.in,
592 instr->ctx.data.len,
593 instr->ctx.data.force_8bit);
594 return 0;
595 case NAND_OP_DATA_OUT_INSTR:
596 atmel_nand_data_out(nand, instr->ctx.data.buf.out,
597 instr->ctx.data.len,
598 instr->ctx.data.force_8bit);
599 return 0;
600 case NAND_OP_WAITRDY_INSTR:
601 return atmel_nand_waitrdy(nand,
602 instr->ctx.waitrdy.timeout_ms);
603 default:
604 break;
605 }
606
607 return -EINVAL;
608 }
609
atmel_smc_nand_exec_op(struct atmel_nand * nand,const struct nand_operation * op,bool check_only)610 static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
611 const struct nand_operation *op,
612 bool check_only)
613 {
614 unsigned int i;
615 int ret = 0;
616
617 if (check_only)
618 return 0;
619
620 atmel_nand_select_target(nand, op->cs);
621 gpiod_set_value(nand->activecs->csgpio, 0);
622 for (i = 0; i < op->ninstrs; i++) {
623 ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
624 if (ret)
625 break;
626 }
627 gpiod_set_value(nand->activecs->csgpio, 1);
628
629 return ret;
630 }
631
atmel_hsmc_exec_cmd_addr(struct nand_chip * chip,const struct nand_subop * subop)632 static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
633 const struct nand_subop *subop)
634 {
635 struct atmel_nand *nand = to_atmel_nand(chip);
636 struct atmel_hsmc_nand_controller *nc;
637 unsigned int i, j;
638
639 nc = to_hsmc_nand_controller(chip->controller);
640
641 nc->op.cs = nand->activecs->id;
642 for (i = 0; i < subop->ninstrs; i++) {
643 const struct nand_op_instr *instr = &subop->instrs[i];
644
645 if (instr->type == NAND_OP_CMD_INSTR) {
646 nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
647 continue;
648 }
649
650 for (j = nand_subop_get_addr_start_off(subop, i);
651 j < nand_subop_get_num_addr_cyc(subop, i); j++) {
652 nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
653 nc->op.naddrs++;
654 }
655 }
656
657 return atmel_nfc_exec_op(nc, true);
658 }
659
atmel_hsmc_exec_rw(struct nand_chip * chip,const struct nand_subop * subop)660 static int atmel_hsmc_exec_rw(struct nand_chip *chip,
661 const struct nand_subop *subop)
662 {
663 const struct nand_op_instr *instr = subop->instrs;
664 struct atmel_nand *nand = to_atmel_nand(chip);
665
666 if (instr->type == NAND_OP_DATA_IN_INSTR)
667 atmel_nand_data_in(nand, instr->ctx.data.buf.in,
668 instr->ctx.data.len,
669 instr->ctx.data.force_8bit);
670 else
671 atmel_nand_data_out(nand, instr->ctx.data.buf.out,
672 instr->ctx.data.len,
673 instr->ctx.data.force_8bit);
674
675 return 0;
676 }
677
atmel_hsmc_exec_waitrdy(struct nand_chip * chip,const struct nand_subop * subop)678 static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
679 const struct nand_subop *subop)
680 {
681 const struct nand_op_instr *instr = subop->instrs;
682 struct atmel_nand *nand = to_atmel_nand(chip);
683
684 return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
685 }
686
687 static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
688 NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
689 NAND_OP_PARSER_PAT_CMD_ELEM(true),
690 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
691 NAND_OP_PARSER_PAT_CMD_ELEM(true)),
692 NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
693 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
694 NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
695 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
696 NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
697 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
698 );
699
atmel_hsmc_nand_exec_op(struct atmel_nand * nand,const struct nand_operation * op,bool check_only)700 static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
701 const struct nand_operation *op,
702 bool check_only)
703 {
704 int ret;
705
706 if (check_only)
707 return nand_op_parser_exec_op(&nand->base,
708 &atmel_hsmc_op_parser, op, true);
709
710 atmel_hsmc_nand_select_target(nand, op->cs);
711 ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
712 false);
713
714 return ret;
715 }
716
atmel_nfc_copy_to_sram(struct nand_chip * chip,const u8 * buf,bool oob_required)717 static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
718 bool oob_required)
719 {
720 struct mtd_info *mtd = nand_to_mtd(chip);
721 struct atmel_hsmc_nand_controller *nc;
722 int ret = -EIO;
723
724 nc = to_hsmc_nand_controller(chip->controller);
725
726 if (nc->base.dmac)
727 ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
728 nc->sram.dma, mtd->writesize,
729 DMA_TO_DEVICE);
730
731 /* Falling back to CPU copy. */
732 if (ret)
733 memcpy_toio(nc->sram.virt, buf, mtd->writesize);
734
735 if (oob_required)
736 memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
737 mtd->oobsize);
738 }
739
atmel_nfc_copy_from_sram(struct nand_chip * chip,u8 * buf,bool oob_required)740 static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
741 bool oob_required)
742 {
743 struct mtd_info *mtd = nand_to_mtd(chip);
744 struct atmel_hsmc_nand_controller *nc;
745 int ret = -EIO;
746
747 nc = to_hsmc_nand_controller(chip->controller);
748
749 if (nc->base.dmac)
750 ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
751 mtd->writesize, DMA_FROM_DEVICE);
752
753 /* Falling back to CPU copy. */
754 if (ret)
755 memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
756
757 if (oob_required)
758 memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
759 mtd->oobsize);
760 }
761
atmel_nfc_set_op_addr(struct nand_chip * chip,int page,int column)762 static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
763 {
764 struct mtd_info *mtd = nand_to_mtd(chip);
765 struct atmel_hsmc_nand_controller *nc;
766
767 nc = to_hsmc_nand_controller(chip->controller);
768
769 if (column >= 0) {
770 nc->op.addrs[nc->op.naddrs++] = column;
771
772 /*
773 * 2 address cycles for the column offset on large page NANDs.
774 */
775 if (mtd->writesize > 512)
776 nc->op.addrs[nc->op.naddrs++] = column >> 8;
777 }
778
779 if (page >= 0) {
780 nc->op.addrs[nc->op.naddrs++] = page;
781 nc->op.addrs[nc->op.naddrs++] = page >> 8;
782
783 if (chip->options & NAND_ROW_ADDR_3)
784 nc->op.addrs[nc->op.naddrs++] = page >> 16;
785 }
786 }
787
atmel_nand_pmecc_enable(struct nand_chip * chip,int op,bool raw)788 static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
789 {
790 struct atmel_nand *nand = to_atmel_nand(chip);
791 struct atmel_nand_controller *nc;
792 int ret;
793
794 nc = to_nand_controller(chip->controller);
795
796 if (raw)
797 return 0;
798
799 ret = atmel_pmecc_enable(nand->pmecc, op);
800 if (ret)
801 dev_err(nc->dev,
802 "Failed to enable ECC engine (err = %d)\n", ret);
803
804 return ret;
805 }
806
atmel_nand_pmecc_disable(struct nand_chip * chip,bool raw)807 static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
808 {
809 struct atmel_nand *nand = to_atmel_nand(chip);
810
811 if (!raw)
812 atmel_pmecc_disable(nand->pmecc);
813 }
814
atmel_nand_pmecc_generate_eccbytes(struct nand_chip * chip,bool raw)815 static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
816 {
817 struct atmel_nand *nand = to_atmel_nand(chip);
818 struct mtd_info *mtd = nand_to_mtd(chip);
819 struct atmel_nand_controller *nc;
820 struct mtd_oob_region oobregion;
821 void *eccbuf;
822 int ret, i;
823
824 nc = to_nand_controller(chip->controller);
825
826 if (raw)
827 return 0;
828
829 ret = atmel_pmecc_wait_rdy(nand->pmecc);
830 if (ret) {
831 dev_err(nc->dev,
832 "Failed to transfer NAND page data (err = %d)\n",
833 ret);
834 return ret;
835 }
836
837 mtd_ooblayout_ecc(mtd, 0, &oobregion);
838 eccbuf = chip->oob_poi + oobregion.offset;
839
840 for (i = 0; i < chip->ecc.steps; i++) {
841 atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
842 eccbuf);
843 eccbuf += chip->ecc.bytes;
844 }
845
846 return 0;
847 }
848
atmel_nand_pmecc_correct_data(struct nand_chip * chip,void * buf,bool raw)849 static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
850 bool raw)
851 {
852 struct atmel_nand *nand = to_atmel_nand(chip);
853 struct mtd_info *mtd = nand_to_mtd(chip);
854 struct atmel_nand_controller *nc;
855 struct mtd_oob_region oobregion;
856 int ret, i, max_bitflips = 0;
857 void *databuf, *eccbuf;
858
859 nc = to_nand_controller(chip->controller);
860
861 if (raw)
862 return 0;
863
864 ret = atmel_pmecc_wait_rdy(nand->pmecc);
865 if (ret) {
866 dev_err(nc->dev,
867 "Failed to read NAND page data (err = %d)\n",
868 ret);
869 return ret;
870 }
871
872 mtd_ooblayout_ecc(mtd, 0, &oobregion);
873 eccbuf = chip->oob_poi + oobregion.offset;
874 databuf = buf;
875
876 for (i = 0; i < chip->ecc.steps; i++) {
877 ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
878 eccbuf);
879 if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
880 ret = nand_check_erased_ecc_chunk(databuf,
881 chip->ecc.size,
882 eccbuf,
883 chip->ecc.bytes,
884 NULL, 0,
885 chip->ecc.strength);
886
887 if (ret >= 0) {
888 mtd->ecc_stats.corrected += ret;
889 max_bitflips = max(ret, max_bitflips);
890 } else {
891 mtd->ecc_stats.failed++;
892 }
893
894 databuf += chip->ecc.size;
895 eccbuf += chip->ecc.bytes;
896 }
897
898 return max_bitflips;
899 }
900
atmel_nand_pmecc_write_pg(struct nand_chip * chip,const u8 * buf,bool oob_required,int page,bool raw)901 static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
902 bool oob_required, int page, bool raw)
903 {
904 struct mtd_info *mtd = nand_to_mtd(chip);
905 struct atmel_nand *nand = to_atmel_nand(chip);
906 int ret;
907
908 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
909
910 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
911 if (ret)
912 return ret;
913
914 nand_write_data_op(chip, buf, mtd->writesize, false);
915
916 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
917 if (ret) {
918 atmel_pmecc_disable(nand->pmecc);
919 return ret;
920 }
921
922 atmel_nand_pmecc_disable(chip, raw);
923
924 nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
925
926 return nand_prog_page_end_op(chip);
927 }
928
atmel_nand_pmecc_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)929 static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
930 int oob_required, int page)
931 {
932 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
933 }
934
atmel_nand_pmecc_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)935 static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
936 const u8 *buf, int oob_required,
937 int page)
938 {
939 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
940 }
941
atmel_nand_pmecc_read_pg(struct nand_chip * chip,u8 * buf,bool oob_required,int page,bool raw)942 static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
943 bool oob_required, int page, bool raw)
944 {
945 struct mtd_info *mtd = nand_to_mtd(chip);
946 int ret;
947
948 nand_read_page_op(chip, page, 0, NULL, 0);
949
950 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
951 if (ret)
952 return ret;
953
954 ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
955 if (ret)
956 goto out_disable;
957
958 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
959 if (ret)
960 goto out_disable;
961
962 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
963
964 out_disable:
965 atmel_nand_pmecc_disable(chip, raw);
966
967 return ret;
968 }
969
atmel_nand_pmecc_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)970 static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
971 int oob_required, int page)
972 {
973 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
974 }
975
atmel_nand_pmecc_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)976 static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
977 int oob_required, int page)
978 {
979 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
980 }
981
atmel_hsmc_nand_pmecc_write_pg(struct nand_chip * chip,const u8 * buf,bool oob_required,int page,bool raw)982 static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
983 const u8 *buf, bool oob_required,
984 int page, bool raw)
985 {
986 struct mtd_info *mtd = nand_to_mtd(chip);
987 struct atmel_nand *nand = to_atmel_nand(chip);
988 struct atmel_hsmc_nand_controller *nc;
989 int ret;
990
991 atmel_hsmc_nand_select_target(nand, chip->cur_cs);
992 nc = to_hsmc_nand_controller(chip->controller);
993
994 atmel_nfc_copy_to_sram(chip, buf, false);
995
996 nc->op.cmds[0] = NAND_CMD_SEQIN;
997 nc->op.ncmds = 1;
998 atmel_nfc_set_op_addr(chip, page, 0x0);
999 nc->op.cs = nand->activecs->id;
1000 nc->op.data = ATMEL_NFC_WRITE_DATA;
1001
1002 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
1003 if (ret)
1004 return ret;
1005
1006 ret = atmel_nfc_exec_op(nc, false);
1007 if (ret) {
1008 atmel_nand_pmecc_disable(chip, raw);
1009 dev_err(nc->base.dev,
1010 "Failed to transfer NAND page data (err = %d)\n",
1011 ret);
1012 return ret;
1013 }
1014
1015 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
1016
1017 atmel_nand_pmecc_disable(chip, raw);
1018
1019 if (ret)
1020 return ret;
1021
1022 nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
1023
1024 return nand_prog_page_end_op(chip);
1025 }
1026
atmel_hsmc_nand_pmecc_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1027 static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
1028 const u8 *buf, int oob_required,
1029 int page)
1030 {
1031 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
1032 false);
1033 }
1034
atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1035 static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
1036 const u8 *buf,
1037 int oob_required, int page)
1038 {
1039 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
1040 true);
1041 }
1042
atmel_hsmc_nand_pmecc_read_pg(struct nand_chip * chip,u8 * buf,bool oob_required,int page,bool raw)1043 static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
1044 bool oob_required, int page,
1045 bool raw)
1046 {
1047 struct mtd_info *mtd = nand_to_mtd(chip);
1048 struct atmel_nand *nand = to_atmel_nand(chip);
1049 struct atmel_hsmc_nand_controller *nc;
1050 int ret;
1051
1052 atmel_hsmc_nand_select_target(nand, chip->cur_cs);
1053 nc = to_hsmc_nand_controller(chip->controller);
1054
1055 /*
1056 * Optimized read page accessors only work when the NAND R/B pin is
1057 * connected to a native SoC R/B pin. If that's not the case, fallback
1058 * to the non-optimized one.
1059 */
1060 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
1061 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
1062 raw);
1063
1064 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1065
1066 if (mtd->writesize > 512)
1067 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1068
1069 atmel_nfc_set_op_addr(chip, page, 0x0);
1070 nc->op.cs = nand->activecs->id;
1071 nc->op.data = ATMEL_NFC_READ_DATA;
1072
1073 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1074 if (ret)
1075 return ret;
1076
1077 ret = atmel_nfc_exec_op(nc, false);
1078 if (ret) {
1079 atmel_nand_pmecc_disable(chip, raw);
1080 dev_err(nc->base.dev,
1081 "Failed to load NAND page data (err = %d)\n",
1082 ret);
1083 return ret;
1084 }
1085
1086 atmel_nfc_copy_from_sram(chip, buf, true);
1087
1088 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1089
1090 atmel_nand_pmecc_disable(chip, raw);
1091
1092 return ret;
1093 }
1094
atmel_hsmc_nand_pmecc_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)1095 static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
1096 int oob_required, int page)
1097 {
1098 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1099 false);
1100 }
1101
atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)1102 static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
1103 u8 *buf, int oob_required,
1104 int page)
1105 {
1106 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1107 true);
1108 }
1109
atmel_nand_pmecc_init(struct nand_chip * chip)1110 static int atmel_nand_pmecc_init(struct nand_chip *chip)
1111 {
1112 const struct nand_ecc_props *requirements =
1113 nanddev_get_ecc_requirements(&chip->base);
1114 struct mtd_info *mtd = nand_to_mtd(chip);
1115 struct nand_device *nanddev = mtd_to_nanddev(mtd);
1116 struct atmel_nand *nand = to_atmel_nand(chip);
1117 struct atmel_nand_controller *nc;
1118 struct atmel_pmecc_user_req req;
1119
1120 nc = to_nand_controller(chip->controller);
1121
1122 if (!nc->pmecc) {
1123 dev_err(nc->dev, "HW ECC not supported\n");
1124 return -ENOTSUPP;
1125 }
1126
1127 if (nc->caps->legacy_of_bindings) {
1128 u32 val;
1129
1130 if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1131 &val))
1132 chip->ecc.strength = val;
1133
1134 if (!of_property_read_u32(nc->dev->of_node,
1135 "atmel,pmecc-sector-size",
1136 &val))
1137 chip->ecc.size = val;
1138 }
1139
1140 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
1141 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1142 else if (chip->ecc.strength)
1143 req.ecc.strength = chip->ecc.strength;
1144 else if (requirements->strength)
1145 req.ecc.strength = requirements->strength;
1146 else
1147 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1148
1149 if (chip->ecc.size)
1150 req.ecc.sectorsize = chip->ecc.size;
1151 else if (requirements->step_size)
1152 req.ecc.sectorsize = requirements->step_size;
1153 else
1154 req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1155
1156 req.pagesize = mtd->writesize;
1157 req.oobsize = mtd->oobsize;
1158
1159 if (mtd->writesize <= 512) {
1160 req.ecc.bytes = 4;
1161 req.ecc.ooboffset = 0;
1162 } else {
1163 req.ecc.bytes = mtd->oobsize - 2;
1164 req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1165 }
1166
1167 nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1168 if (IS_ERR(nand->pmecc))
1169 return PTR_ERR(nand->pmecc);
1170
1171 chip->ecc.algo = NAND_ECC_ALGO_BCH;
1172 chip->ecc.size = req.ecc.sectorsize;
1173 chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1174 chip->ecc.strength = req.ecc.strength;
1175
1176 chip->options |= NAND_NO_SUBPAGE_WRITE;
1177
1178 mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
1179
1180 return 0;
1181 }
1182
atmel_nand_ecc_init(struct nand_chip * chip)1183 static int atmel_nand_ecc_init(struct nand_chip *chip)
1184 {
1185 struct atmel_nand_controller *nc;
1186 int ret;
1187
1188 nc = to_nand_controller(chip->controller);
1189
1190 switch (chip->ecc.engine_type) {
1191 case NAND_ECC_ENGINE_TYPE_NONE:
1192 case NAND_ECC_ENGINE_TYPE_SOFT:
1193 /*
1194 * Nothing to do, the core will initialize everything for us.
1195 */
1196 break;
1197
1198 case NAND_ECC_ENGINE_TYPE_ON_HOST:
1199 ret = atmel_nand_pmecc_init(chip);
1200 if (ret)
1201 return ret;
1202
1203 chip->ecc.read_page = atmel_nand_pmecc_read_page;
1204 chip->ecc.write_page = atmel_nand_pmecc_write_page;
1205 chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1206 chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1207 break;
1208
1209 default:
1210 /* Other modes are not supported. */
1211 dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1212 chip->ecc.engine_type);
1213 return -ENOTSUPP;
1214 }
1215
1216 return 0;
1217 }
1218
atmel_hsmc_nand_ecc_init(struct nand_chip * chip)1219 static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
1220 {
1221 int ret;
1222
1223 ret = atmel_nand_ecc_init(chip);
1224 if (ret)
1225 return ret;
1226
1227 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
1228 return 0;
1229
1230 /* Adjust the ECC operations for the HSMC IP. */
1231 chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1232 chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1233 chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1234 chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1235
1236 return 0;
1237 }
1238
atmel_smc_nand_prepare_smcconf(struct atmel_nand * nand,const struct nand_interface_config * conf,struct atmel_smc_cs_conf * smcconf)1239 static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1240 const struct nand_interface_config *conf,
1241 struct atmel_smc_cs_conf *smcconf)
1242 {
1243 u32 ncycles, totalcycles, timeps, mckperiodps, pulse;
1244 struct atmel_nand_controller *nc;
1245 int ret;
1246
1247 nc = to_nand_controller(nand->base.controller);
1248
1249 /* DDR interface not supported. */
1250 if (!nand_interface_is_sdr(conf))
1251 return -ENOTSUPP;
1252
1253 /*
1254 * tRC < 30ns implies EDO mode. This controller does not support this
1255 * mode.
1256 */
1257 if (conf->timings.sdr.tRC_min < 30000)
1258 return -ENOTSUPP;
1259
1260 atmel_smc_cs_conf_init(smcconf);
1261
1262 mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
1263 mckperiodps *= 1000;
1264
1265 /*
1266 * Set write pulse timing. This one is easy to extract:
1267 *
1268 * NWE_PULSE = tWP
1269 */
1270 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
1271 totalcycles = ncycles;
1272 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
1273 ncycles);
1274 if (ret)
1275 return ret;
1276
1277 /*
1278 * The write setup timing depends on the operation done on the NAND.
1279 * All operations goes through the same data bus, but the operation
1280 * type depends on the address we are writing to (ALE/CLE address
1281 * lines).
1282 * Since we have no way to differentiate the different operations at
1283 * the SMC level, we must consider the worst case (the biggest setup
1284 * time among all operation types):
1285 *
1286 * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
1287 */
1288 timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
1289 conf->timings.sdr.tALS_min);
1290 timeps = max(timeps, conf->timings.sdr.tDS_min);
1291 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1292 ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
1293 totalcycles += ncycles;
1294 ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
1295 ncycles);
1296 if (ret)
1297 return ret;
1298
1299 /*
1300 * As for the write setup timing, the write hold timing depends on the
1301 * operation done on the NAND:
1302 *
1303 * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
1304 */
1305 timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
1306 conf->timings.sdr.tALH_min);
1307 timeps = max3(timeps, conf->timings.sdr.tDH_min,
1308 conf->timings.sdr.tWH_min);
1309 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1310 totalcycles += ncycles;
1311
1312 /*
1313 * The write cycle timing is directly matching tWC, but is also
1314 * dependent on the other timings on the setup and hold timings we
1315 * calculated earlier, which gives:
1316 *
1317 * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
1318 */
1319 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
1320 ncycles = max(totalcycles, ncycles);
1321 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
1322 ncycles);
1323 if (ret)
1324 return ret;
1325
1326 /*
1327 * We don't want the CS line to be toggled between each byte/word
1328 * transfer to the NAND. The only way to guarantee that is to have the
1329 * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1330 *
1331 * NCS_WR_PULSE = NWE_CYCLE
1332 */
1333 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
1334 ncycles);
1335 if (ret)
1336 return ret;
1337
1338 /*
1339 * As for the write setup timing, the read hold timing depends on the
1340 * operation done on the NAND:
1341 *
1342 * NRD_HOLD = max(tREH, tRHOH)
1343 */
1344 timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
1345 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1346 totalcycles = ncycles;
1347
1348 /*
1349 * TDF = tRHZ - NRD_HOLD
1350 */
1351 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
1352 ncycles -= totalcycles;
1353
1354 /*
1355 * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
1356 * we might end up with a config that does not fit in the TDF field.
1357 * Just take the max value in this case and hope that the NAND is more
1358 * tolerant than advertised.
1359 */
1360 if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
1361 ncycles = ATMEL_SMC_MODE_TDF_MAX;
1362 else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
1363 ncycles = ATMEL_SMC_MODE_TDF_MIN;
1364
1365 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
1366 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
1367
1368 /*
1369 * Read pulse timing would directly match tRP,
1370 * but some NAND flash chips (S34ML01G2 and W29N02KVxxAF)
1371 * do not work properly in timing mode 3.
1372 * The workaround is to extend the SMC NRD pulse to meet tREA
1373 * timing.
1374 *
1375 * NRD_PULSE = max(tRP, tREA)
1376 */
1377 pulse = max(conf->timings.sdr.tRP_min, conf->timings.sdr.tREA_max);
1378 ncycles = DIV_ROUND_UP(pulse, mckperiodps);
1379 totalcycles += ncycles;
1380 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
1381 ncycles);
1382 if (ret)
1383 return ret;
1384
1385 /*
1386 * Read setup timing depends on the operation done on the NAND:
1387 *
1388 * NRD_SETUP = max(tAR, tCLR)
1389 */
1390 timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
1391 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1392 totalcycles += ncycles;
1393 ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
1394 if (ret)
1395 return ret;
1396
1397 /*
1398 * The read cycle timing is directly matching tRC, but is also
1399 * dependent on the setup and hold timings we calculated earlier,
1400 * which gives:
1401 *
1402 * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
1403 */
1404 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
1405 ncycles = max(totalcycles, ncycles);
1406 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
1407 ncycles);
1408 if (ret)
1409 return ret;
1410
1411 /*
1412 * We don't want the CS line to be toggled between each byte/word
1413 * transfer from the NAND. The only way to guarantee that is to have
1414 * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1415 *
1416 * NCS_RD_PULSE = NRD_CYCLE
1417 */
1418 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
1419 ncycles);
1420 if (ret)
1421 return ret;
1422
1423 /* Txxx timings are directly matching tXXX ones. */
1424 ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
1425 ret = atmel_smc_cs_conf_set_timing(smcconf,
1426 ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
1427 ncycles);
1428 if (ret)
1429 return ret;
1430
1431 ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
1432 ret = atmel_smc_cs_conf_set_timing(smcconf,
1433 ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1434 ncycles);
1435 /*
1436 * Version 4 of the ONFI spec mandates that tADL be at least 400
1437 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1438 * fit in the tADL field of the SMC reg. We need to relax the check and
1439 * accept the -ERANGE return code.
1440 *
1441 * Note that previous versions of the ONFI spec had a lower tADL_min
1442 * (100 or 200 ns). It's not clear why this timing constraint got
1443 * increased but it seems most NANDs are fine with values lower than
1444 * 400ns, so we should be safe.
1445 */
1446 if (ret && ret != -ERANGE)
1447 return ret;
1448
1449 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
1450 ret = atmel_smc_cs_conf_set_timing(smcconf,
1451 ATMEL_HSMC_TIMINGS_TAR_SHIFT,
1452 ncycles);
1453 if (ret)
1454 return ret;
1455
1456 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
1457 ret = atmel_smc_cs_conf_set_timing(smcconf,
1458 ATMEL_HSMC_TIMINGS_TRR_SHIFT,
1459 ncycles);
1460 if (ret)
1461 return ret;
1462
1463 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
1464 ret = atmel_smc_cs_conf_set_timing(smcconf,
1465 ATMEL_HSMC_TIMINGS_TWB_SHIFT,
1466 ncycles);
1467 if (ret)
1468 return ret;
1469
1470 /* Attach the CS line to the NFC logic. */
1471 smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
1472
1473 /* Set the appropriate data bus width. */
1474 if (nand->base.options & NAND_BUSWIDTH_16)
1475 smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
1476
1477 /* Operate in NRD/NWE READ/WRITEMODE. */
1478 smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
1479 ATMEL_SMC_MODE_WRITEMODE_NWE;
1480
1481 return 0;
1482 }
1483
atmel_smc_nand_setup_interface(struct atmel_nand * nand,int csline,const struct nand_interface_config * conf)1484 static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
1485 int csline,
1486 const struct nand_interface_config *conf)
1487 {
1488 struct atmel_nand_controller *nc;
1489 struct atmel_smc_cs_conf smcconf;
1490 struct atmel_nand_cs *cs;
1491 int ret;
1492
1493 nc = to_nand_controller(nand->base.controller);
1494
1495 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1496 if (ret)
1497 return ret;
1498
1499 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1500 return 0;
1501
1502 cs = &nand->cs[csline];
1503 cs->smcconf = smcconf;
1504 atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1505
1506 return 0;
1507 }
1508
atmel_hsmc_nand_setup_interface(struct atmel_nand * nand,int csline,const struct nand_interface_config * conf)1509 static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
1510 int csline,
1511 const struct nand_interface_config *conf)
1512 {
1513 struct atmel_hsmc_nand_controller *nc;
1514 struct atmel_smc_cs_conf smcconf;
1515 struct atmel_nand_cs *cs;
1516 int ret;
1517
1518 nc = to_hsmc_nand_controller(nand->base.controller);
1519
1520 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1521 if (ret)
1522 return ret;
1523
1524 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1525 return 0;
1526
1527 cs = &nand->cs[csline];
1528 cs->smcconf = smcconf;
1529
1530 if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
1531 cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
1532
1533 atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
1534 &cs->smcconf);
1535
1536 return 0;
1537 }
1538
atmel_nand_setup_interface(struct nand_chip * chip,int csline,const struct nand_interface_config * conf)1539 static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
1540 const struct nand_interface_config *conf)
1541 {
1542 struct atmel_nand *nand = to_atmel_nand(chip);
1543 const struct nand_sdr_timings *sdr;
1544 struct atmel_nand_controller *nc;
1545
1546 sdr = nand_get_sdr_timings(conf);
1547 if (IS_ERR(sdr))
1548 return PTR_ERR(sdr);
1549
1550 nc = to_nand_controller(nand->base.controller);
1551
1552 if (csline >= nand->numcs ||
1553 (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
1554 return -EINVAL;
1555
1556 return nc->caps->ops->setup_interface(nand, csline, conf);
1557 }
1558
atmel_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)1559 static int atmel_nand_exec_op(struct nand_chip *chip,
1560 const struct nand_operation *op,
1561 bool check_only)
1562 {
1563 struct atmel_nand *nand = to_atmel_nand(chip);
1564 struct atmel_nand_controller *nc;
1565
1566 nc = to_nand_controller(nand->base.controller);
1567
1568 return nc->caps->ops->exec_op(nand, op, check_only);
1569 }
1570
atmel_nand_init(struct atmel_nand_controller * nc,struct atmel_nand * nand)1571 static void atmel_nand_init(struct atmel_nand_controller *nc,
1572 struct atmel_nand *nand)
1573 {
1574 struct nand_chip *chip = &nand->base;
1575 struct mtd_info *mtd = nand_to_mtd(chip);
1576
1577 mtd->dev.parent = nc->dev;
1578 nand->base.controller = &nc->base;
1579
1580 if (!nc->mck || !nc->caps->ops->setup_interface)
1581 chip->options |= NAND_KEEP_TIMINGS;
1582
1583 /*
1584 * Use a bounce buffer when the buffer passed by the MTD user is not
1585 * suitable for DMA.
1586 */
1587 if (nc->dmac)
1588 chip->options |= NAND_USES_DMA;
1589
1590 /* Default to HW ECC if pmecc is available. */
1591 if (nc->pmecc)
1592 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
1593 }
1594
atmel_smc_nand_init(struct atmel_nand_controller * nc,struct atmel_nand * nand)1595 static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1596 struct atmel_nand *nand)
1597 {
1598 struct nand_chip *chip = &nand->base;
1599 struct atmel_smc_nand_controller *smc_nc;
1600 int i;
1601
1602 atmel_nand_init(nc, nand);
1603
1604 smc_nc = to_smc_nand_controller(chip->controller);
1605 if (!smc_nc->ebi_csa_regmap)
1606 return;
1607
1608 /* Attach the CS to the NAND Flash logic. */
1609 for (i = 0; i < nand->numcs; i++)
1610 regmap_update_bits(smc_nc->ebi_csa_regmap,
1611 smc_nc->ebi_csa->offs,
1612 BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1613
1614 if (smc_nc->ebi_csa->nfd0_on_d16)
1615 regmap_update_bits(smc_nc->ebi_csa_regmap,
1616 smc_nc->ebi_csa->offs,
1617 smc_nc->ebi_csa->nfd0_on_d16,
1618 smc_nc->ebi_csa->nfd0_on_d16);
1619 }
1620
atmel_nand_controller_remove_nand(struct atmel_nand * nand)1621 static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
1622 {
1623 struct nand_chip *chip = &nand->base;
1624 struct mtd_info *mtd = nand_to_mtd(chip);
1625 int ret;
1626
1627 ret = mtd_device_unregister(mtd);
1628 if (ret)
1629 return ret;
1630
1631 nand_cleanup(chip);
1632 list_del(&nand->node);
1633
1634 return 0;
1635 }
1636
atmel_nand_create(struct atmel_nand_controller * nc,struct device_node * np,int reg_cells)1637 static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1638 struct device_node *np,
1639 int reg_cells)
1640 {
1641 struct atmel_nand *nand;
1642 struct gpio_desc *gpio;
1643 int numcs, ret, i;
1644
1645 numcs = of_property_count_elems_of_size(np, "reg",
1646 reg_cells * sizeof(u32));
1647 if (numcs < 1) {
1648 dev_err(nc->dev, "Missing or invalid reg property\n");
1649 return ERR_PTR(-EINVAL);
1650 }
1651
1652 nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
1653 if (!nand)
1654 return ERR_PTR(-ENOMEM);
1655
1656 nand->numcs = numcs;
1657
1658 gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
1659 "det", GPIOD_IN, "nand-det");
1660 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1661 dev_err(nc->dev,
1662 "Failed to get detect gpio (err = %ld)\n",
1663 PTR_ERR(gpio));
1664 return ERR_CAST(gpio);
1665 }
1666
1667 if (!IS_ERR(gpio))
1668 nand->cdgpio = gpio;
1669
1670 for (i = 0; i < numcs; i++) {
1671 struct resource res;
1672 u32 val;
1673
1674 ret = of_address_to_resource(np, 0, &res);
1675 if (ret) {
1676 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1677 ret);
1678 return ERR_PTR(ret);
1679 }
1680
1681 ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1682 &val);
1683 if (ret) {
1684 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1685 ret);
1686 return ERR_PTR(ret);
1687 }
1688
1689 nand->cs[i].id = val;
1690
1691 nand->cs[i].io.dma = res.start;
1692 nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1693 if (IS_ERR(nand->cs[i].io.virt))
1694 return ERR_CAST(nand->cs[i].io.virt);
1695
1696 if (!of_property_read_u32(np, "atmel,rb", &val)) {
1697 if (val > ATMEL_NFC_MAX_RB_ID)
1698 return ERR_PTR(-EINVAL);
1699
1700 nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1701 nand->cs[i].rb.id = val;
1702 } else {
1703 gpio = devm_fwnode_gpiod_get_index(nc->dev,
1704 of_fwnode_handle(np),
1705 "rb", i, GPIOD_IN,
1706 "nand-rb");
1707 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1708 dev_err(nc->dev,
1709 "Failed to get R/B gpio (err = %ld)\n",
1710 PTR_ERR(gpio));
1711 return ERR_CAST(gpio);
1712 }
1713
1714 if (!IS_ERR(gpio)) {
1715 nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1716 nand->cs[i].rb.gpio = gpio;
1717 }
1718 }
1719
1720 gpio = devm_fwnode_gpiod_get_index(nc->dev,
1721 of_fwnode_handle(np),
1722 "cs", i, GPIOD_OUT_HIGH,
1723 "nand-cs");
1724 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1725 dev_err(nc->dev,
1726 "Failed to get CS gpio (err = %ld)\n",
1727 PTR_ERR(gpio));
1728 return ERR_CAST(gpio);
1729 }
1730
1731 if (!IS_ERR(gpio))
1732 nand->cs[i].csgpio = gpio;
1733 }
1734
1735 nand_set_flash_node(&nand->base, np);
1736
1737 return nand;
1738 }
1739
1740 static int
atmel_nand_controller_add_nand(struct atmel_nand_controller * nc,struct atmel_nand * nand)1741 atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1742 struct atmel_nand *nand)
1743 {
1744 struct nand_chip *chip = &nand->base;
1745 struct mtd_info *mtd = nand_to_mtd(chip);
1746 int ret;
1747
1748 /* No card inserted, skip this NAND. */
1749 if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1750 dev_info(nc->dev, "No SmartMedia card inserted.\n");
1751 return 0;
1752 }
1753
1754 nc->caps->ops->nand_init(nc, nand);
1755
1756 ret = nand_scan(chip, nand->numcs);
1757 if (ret) {
1758 dev_err(nc->dev, "NAND scan failed: %d\n", ret);
1759 return ret;
1760 }
1761
1762 ret = mtd_device_register(mtd, NULL, 0);
1763 if (ret) {
1764 dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1765 nand_cleanup(chip);
1766 return ret;
1767 }
1768
1769 list_add_tail(&nand->node, &nc->chips);
1770
1771 return 0;
1772 }
1773
1774 static int
atmel_nand_controller_remove_nands(struct atmel_nand_controller * nc)1775 atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1776 {
1777 struct atmel_nand *nand, *tmp;
1778 int ret;
1779
1780 list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1781 ret = atmel_nand_controller_remove_nand(nand);
1782 if (ret)
1783 return ret;
1784 }
1785
1786 return 0;
1787 }
1788
1789 static int
atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller * nc)1790 atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1791 {
1792 struct device *dev = nc->dev;
1793 struct platform_device *pdev = to_platform_device(dev);
1794 struct atmel_nand *nand;
1795 struct gpio_desc *gpio;
1796 struct resource *res;
1797
1798 /*
1799 * Legacy bindings only allow connecting a single NAND with a unique CS
1800 * line to the controller.
1801 */
1802 nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1803 GFP_KERNEL);
1804 if (!nand)
1805 return -ENOMEM;
1806
1807 nand->numcs = 1;
1808
1809 nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1810 if (IS_ERR(nand->cs[0].io.virt))
1811 return PTR_ERR(nand->cs[0].io.virt);
1812
1813 nand->cs[0].io.dma = res->start;
1814
1815 /*
1816 * The old driver was hardcoding the CS id to 3 for all sama5
1817 * controllers. Since this id is only meaningful for the sama5
1818 * controller we can safely assign this id to 3 no matter the
1819 * controller.
1820 * If one wants to connect a NAND to a different CS line, he will
1821 * have to use the new bindings.
1822 */
1823 nand->cs[0].id = 3;
1824
1825 /* R/B GPIO. */
1826 gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
1827 if (IS_ERR(gpio)) {
1828 dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1829 PTR_ERR(gpio));
1830 return PTR_ERR(gpio);
1831 }
1832
1833 if (gpio) {
1834 nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1835 nand->cs[0].rb.gpio = gpio;
1836 }
1837
1838 /* CS GPIO. */
1839 gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1840 if (IS_ERR(gpio)) {
1841 dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1842 PTR_ERR(gpio));
1843 return PTR_ERR(gpio);
1844 }
1845
1846 nand->cs[0].csgpio = gpio;
1847
1848 /* Card detect GPIO. */
1849 gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1850 if (IS_ERR(gpio)) {
1851 dev_err(dev,
1852 "Failed to get detect gpio (err = %ld)\n",
1853 PTR_ERR(gpio));
1854 return PTR_ERR(gpio);
1855 }
1856
1857 nand->cdgpio = gpio;
1858
1859 nand_set_flash_node(&nand->base, nc->dev->of_node);
1860
1861 return atmel_nand_controller_add_nand(nc, nand);
1862 }
1863
atmel_nand_controller_add_nands(struct atmel_nand_controller * nc)1864 static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1865 {
1866 struct device_node *np;
1867 struct device *dev = nc->dev;
1868 int ret, reg_cells;
1869 u32 val;
1870
1871 /* We do not retrieve the SMC syscon when parsing old DTs. */
1872 if (nc->caps->legacy_of_bindings)
1873 return atmel_nand_controller_legacy_add_nands(nc);
1874
1875 np = dev->of_node;
1876
1877 ret = of_property_read_u32(np, "#address-cells", &val);
1878 if (ret) {
1879 dev_err(dev, "missing #address-cells property\n");
1880 return ret;
1881 }
1882
1883 reg_cells = val;
1884
1885 ret = of_property_read_u32(np, "#size-cells", &val);
1886 if (ret) {
1887 dev_err(dev, "missing #size-cells property\n");
1888 return ret;
1889 }
1890
1891 reg_cells += val;
1892
1893 for_each_child_of_node_scoped(np, nand_np) {
1894 struct atmel_nand *nand;
1895
1896 nand = atmel_nand_create(nc, nand_np, reg_cells);
1897 if (IS_ERR(nand)) {
1898 ret = PTR_ERR(nand);
1899 goto err;
1900 }
1901
1902 ret = atmel_nand_controller_add_nand(nc, nand);
1903 if (ret)
1904 goto err;
1905 }
1906
1907 return 0;
1908
1909 err:
1910 atmel_nand_controller_remove_nands(nc);
1911
1912 return ret;
1913 }
1914
atmel_nand_controller_cleanup(struct atmel_nand_controller * nc)1915 static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1916 {
1917 if (nc->dmac)
1918 dma_release_channel(nc->dmac);
1919
1920 clk_put(nc->mck);
1921 }
1922
1923 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
1924 .offs = AT91SAM9260_MATRIX_EBICSA,
1925 };
1926
1927 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
1928 .offs = AT91SAM9261_MATRIX_EBICSA,
1929 };
1930
1931 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
1932 .offs = AT91SAM9263_MATRIX_EBI0CSA,
1933 };
1934
1935 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
1936 .offs = AT91SAM9RL_MATRIX_EBICSA,
1937 };
1938
1939 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
1940 .offs = AT91SAM9G45_MATRIX_EBICSA,
1941 };
1942
1943 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
1944 .offs = AT91SAM9N12_MATRIX_EBICSA,
1945 };
1946
1947 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
1948 .offs = AT91SAM9X5_MATRIX_EBICSA,
1949 };
1950
1951 static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
1952 .offs = AT91_SFR_CCFG_EBICSA,
1953 .nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
1954 };
1955
1956 static const struct of_device_id __maybe_unused atmel_ebi_csa_regmap_of_ids[] = {
1957 {
1958 .compatible = "atmel,at91sam9260-matrix",
1959 .data = &at91sam9260_ebi_csa,
1960 },
1961 {
1962 .compatible = "atmel,at91sam9261-matrix",
1963 .data = &at91sam9261_ebi_csa,
1964 },
1965 {
1966 .compatible = "atmel,at91sam9263-matrix",
1967 .data = &at91sam9263_ebi_csa,
1968 },
1969 {
1970 .compatible = "atmel,at91sam9rl-matrix",
1971 .data = &at91sam9rl_ebi_csa,
1972 },
1973 {
1974 .compatible = "atmel,at91sam9g45-matrix",
1975 .data = &at91sam9g45_ebi_csa,
1976 },
1977 {
1978 .compatible = "atmel,at91sam9n12-matrix",
1979 .data = &at91sam9n12_ebi_csa,
1980 },
1981 {
1982 .compatible = "atmel,at91sam9x5-matrix",
1983 .data = &at91sam9x5_ebi_csa,
1984 },
1985 {
1986 .compatible = "microchip,sam9x60-sfr",
1987 .data = &sam9x60_ebi_csa,
1988 },
1989 { /* sentinel */ },
1990 };
1991
atmel_nand_attach_chip(struct nand_chip * chip)1992 static int atmel_nand_attach_chip(struct nand_chip *chip)
1993 {
1994 struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
1995 struct atmel_nand *nand = to_atmel_nand(chip);
1996 struct mtd_info *mtd = nand_to_mtd(chip);
1997 int ret;
1998
1999 ret = nc->caps->ops->ecc_init(chip);
2000 if (ret)
2001 return ret;
2002
2003 if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
2004 /*
2005 * We keep the MTD name unchanged to avoid breaking platforms
2006 * where the MTD cmdline parser is used and the bootloader
2007 * has not been updated to use the new naming scheme.
2008 */
2009 mtd->name = "atmel_nand";
2010 } else if (!mtd->name) {
2011 /*
2012 * If the new bindings are used and the bootloader has not been
2013 * updated to pass a new mtdparts parameter on the cmdline, you
2014 * should define the following property in your nand node:
2015 *
2016 * label = "atmel_nand";
2017 *
2018 * This way, mtd->name will be set by the core when
2019 * nand_set_flash_node() is called.
2020 */
2021 mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
2022 "%s:nand.%d", dev_name(nc->dev),
2023 nand->cs[0].id);
2024 if (!mtd->name) {
2025 dev_err(nc->dev, "Failed to allocate mtd->name\n");
2026 return -ENOMEM;
2027 }
2028 }
2029
2030 return 0;
2031 }
2032
2033 static const struct nand_controller_ops atmel_nand_controller_ops = {
2034 .attach_chip = atmel_nand_attach_chip,
2035 .setup_interface = atmel_nand_setup_interface,
2036 .exec_op = atmel_nand_exec_op,
2037 };
2038
atmel_nand_controller_init(struct atmel_nand_controller * nc,struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2039 static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
2040 struct platform_device *pdev,
2041 const struct atmel_nand_controller_caps *caps)
2042 {
2043 struct device *dev = &pdev->dev;
2044 struct device_node *np = dev->of_node;
2045 int ret;
2046
2047 nand_controller_init(&nc->base);
2048 nc->base.ops = &atmel_nand_controller_ops;
2049 INIT_LIST_HEAD(&nc->chips);
2050 nc->dev = dev;
2051 nc->caps = caps;
2052
2053 platform_set_drvdata(pdev, nc);
2054
2055 nc->pmecc = devm_atmel_pmecc_get(dev);
2056 if (IS_ERR(nc->pmecc))
2057 return dev_err_probe(dev, PTR_ERR(nc->pmecc),
2058 "Could not get PMECC object\n");
2059
2060 if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
2061 dma_cap_mask_t mask;
2062
2063 dma_cap_zero(mask);
2064 dma_cap_set(DMA_MEMCPY, mask);
2065
2066 nc->dmac = dma_request_channel(mask, NULL, NULL);
2067 if (nc->dmac)
2068 dev_info(nc->dev, "using %s for DMA transfers\n",
2069 dma_chan_name(nc->dmac));
2070 else
2071 dev_err(nc->dev, "Failed to request DMA channel\n");
2072 }
2073
2074 /* We do not retrieve the SMC syscon when parsing old DTs. */
2075 if (nc->caps->legacy_of_bindings)
2076 return 0;
2077
2078 nc->mck = of_clk_get(dev->parent->of_node, 0);
2079 if (IS_ERR(nc->mck)) {
2080 dev_err(dev, "Failed to retrieve MCK clk\n");
2081 ret = PTR_ERR(nc->mck);
2082 goto out_release_dma;
2083 }
2084
2085 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2086 if (!np) {
2087 dev_err(dev, "Missing or invalid atmel,smc property\n");
2088 ret = -EINVAL;
2089 goto out_release_dma;
2090 }
2091
2092 nc->smc = syscon_node_to_regmap(np);
2093 of_node_put(np);
2094 if (IS_ERR(nc->smc)) {
2095 ret = PTR_ERR(nc->smc);
2096 dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
2097 goto out_release_dma;
2098 }
2099
2100 return 0;
2101
2102 out_release_dma:
2103 if (nc->dmac)
2104 dma_release_channel(nc->dmac);
2105
2106 return ret;
2107 }
2108
2109 static int
atmel_smc_nand_controller_init(struct atmel_smc_nand_controller * nc)2110 atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
2111 {
2112 struct device *dev = nc->base.dev;
2113 const struct of_device_id *match;
2114 struct device_node *np;
2115 int ret;
2116
2117 /* We do not retrieve the EBICSA regmap when parsing old DTs. */
2118 if (nc->base.caps->legacy_of_bindings)
2119 return 0;
2120
2121 np = of_parse_phandle(dev->parent->of_node,
2122 nc->base.caps->ebi_csa_regmap_name, 0);
2123 if (!np)
2124 return 0;
2125
2126 match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
2127 if (!match) {
2128 of_node_put(np);
2129 return 0;
2130 }
2131
2132 nc->ebi_csa_regmap = syscon_node_to_regmap(np);
2133 of_node_put(np);
2134 if (IS_ERR(nc->ebi_csa_regmap)) {
2135 ret = PTR_ERR(nc->ebi_csa_regmap);
2136 dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
2137 return ret;
2138 }
2139
2140 nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
2141
2142 /*
2143 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
2144 * add 4 to ->ebi_csa->offs.
2145 */
2146 if (of_device_is_compatible(dev->parent->of_node,
2147 "atmel,at91sam9263-ebi1"))
2148 nc->ebi_csa->offs += 4;
2149
2150 return 0;
2151 }
2152
2153 static int
atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller * nc)2154 atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2155 {
2156 struct regmap_config regmap_conf = {
2157 .reg_bits = 32,
2158 .val_bits = 32,
2159 .reg_stride = 4,
2160 };
2161
2162 struct device *dev = nc->base.dev;
2163 struct device_node *nand_np, *nfc_np;
2164 void __iomem *iomem;
2165 struct resource res;
2166 int ret;
2167
2168 nand_np = dev->of_node;
2169 nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
2170 if (!nfc_np) {
2171 dev_err(dev, "Could not find device node for sama5d3-nfc\n");
2172 return -ENODEV;
2173 }
2174
2175 nc->clk = of_clk_get(nfc_np, 0);
2176 if (IS_ERR(nc->clk)) {
2177 ret = PTR_ERR(nc->clk);
2178 dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
2179 ret);
2180 goto out;
2181 }
2182
2183 ret = clk_prepare_enable(nc->clk);
2184 if (ret) {
2185 dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
2186 ret);
2187 goto out;
2188 }
2189
2190 nc->irq = of_irq_get(nand_np, 0);
2191 if (nc->irq <= 0) {
2192 ret = nc->irq ?: -ENXIO;
2193 if (ret != -EPROBE_DEFER)
2194 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2195 ret);
2196 goto out;
2197 }
2198
2199 ret = of_address_to_resource(nfc_np, 0, &res);
2200 if (ret) {
2201 dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
2202 ret);
2203 goto out;
2204 }
2205
2206 iomem = devm_ioremap_resource(dev, &res);
2207 if (IS_ERR(iomem)) {
2208 ret = PTR_ERR(iomem);
2209 goto out;
2210 }
2211
2212 regmap_conf.name = "nfc-io";
2213 regmap_conf.max_register = resource_size(&res) - 4;
2214 nc->io = devm_regmap_init_mmio(dev, iomem, ®map_conf);
2215 if (IS_ERR(nc->io)) {
2216 ret = PTR_ERR(nc->io);
2217 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2218 ret);
2219 goto out;
2220 }
2221
2222 ret = of_address_to_resource(nfc_np, 1, &res);
2223 if (ret) {
2224 dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
2225 ret);
2226 goto out;
2227 }
2228
2229 iomem = devm_ioremap_resource(dev, &res);
2230 if (IS_ERR(iomem)) {
2231 ret = PTR_ERR(iomem);
2232 goto out;
2233 }
2234
2235 regmap_conf.name = "smc";
2236 regmap_conf.max_register = resource_size(&res) - 4;
2237 nc->base.smc = devm_regmap_init_mmio(dev, iomem, ®map_conf);
2238 if (IS_ERR(nc->base.smc)) {
2239 ret = PTR_ERR(nc->base.smc);
2240 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2241 ret);
2242 goto out;
2243 }
2244
2245 ret = of_address_to_resource(nfc_np, 2, &res);
2246 if (ret) {
2247 dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
2248 ret);
2249 goto out;
2250 }
2251
2252 nc->sram.virt = devm_ioremap_resource(dev, &res);
2253 if (IS_ERR(nc->sram.virt)) {
2254 ret = PTR_ERR(nc->sram.virt);
2255 goto out;
2256 }
2257
2258 nc->sram.dma = res.start;
2259
2260 out:
2261 of_node_put(nfc_np);
2262
2263 return ret;
2264 }
2265
2266 static int
atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller * nc)2267 atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
2268 {
2269 struct device *dev = nc->base.dev;
2270 struct device_node *np;
2271 int ret;
2272
2273 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2274 if (!np) {
2275 dev_err(dev, "Missing or invalid atmel,smc property\n");
2276 return -EINVAL;
2277 }
2278
2279 nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
2280
2281 nc->irq = of_irq_get(np, 0);
2282 of_node_put(np);
2283 if (nc->irq <= 0) {
2284 ret = nc->irq ?: -ENXIO;
2285 if (ret != -EPROBE_DEFER)
2286 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2287 ret);
2288 return ret;
2289 }
2290
2291 np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
2292 if (!np) {
2293 dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
2294 return -EINVAL;
2295 }
2296
2297 nc->io = syscon_node_to_regmap(np);
2298 of_node_put(np);
2299 if (IS_ERR(nc->io)) {
2300 ret = PTR_ERR(nc->io);
2301 dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
2302 return ret;
2303 }
2304
2305 nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
2306 "atmel,nfc-sram", 0);
2307 if (!nc->sram.pool) {
2308 dev_err(nc->base.dev, "Missing SRAM\n");
2309 return -ENOMEM;
2310 }
2311
2312 nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
2313 ATMEL_NFC_SRAM_SIZE,
2314 &nc->sram.dma);
2315 if (!nc->sram.virt) {
2316 dev_err(nc->base.dev,
2317 "Could not allocate memory from the NFC SRAM pool\n");
2318 return -ENOMEM;
2319 }
2320
2321 return 0;
2322 }
2323
2324 static int
atmel_hsmc_nand_controller_remove(struct atmel_nand_controller * nc)2325 atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
2326 {
2327 struct atmel_hsmc_nand_controller *hsmc_nc;
2328 int ret;
2329
2330 ret = atmel_nand_controller_remove_nands(nc);
2331 if (ret)
2332 return ret;
2333
2334 hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
2335 regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
2336 ATMEL_HSMC_NFC_CTRL_DIS);
2337
2338 if (hsmc_nc->sram.pool)
2339 gen_pool_free(hsmc_nc->sram.pool,
2340 (unsigned long)hsmc_nc->sram.virt,
2341 ATMEL_NFC_SRAM_SIZE);
2342
2343 if (hsmc_nc->clk) {
2344 clk_disable_unprepare(hsmc_nc->clk);
2345 clk_put(hsmc_nc->clk);
2346 }
2347
2348 atmel_nand_controller_cleanup(nc);
2349
2350 return 0;
2351 }
2352
atmel_hsmc_nand_controller_probe(struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2353 static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
2354 const struct atmel_nand_controller_caps *caps)
2355 {
2356 struct device *dev = &pdev->dev;
2357 struct atmel_hsmc_nand_controller *nc;
2358 int ret;
2359
2360 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2361 if (!nc)
2362 return -ENOMEM;
2363
2364 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2365 if (ret)
2366 return ret;
2367
2368 if (caps->legacy_of_bindings)
2369 ret = atmel_hsmc_nand_controller_legacy_init(nc);
2370 else
2371 ret = atmel_hsmc_nand_controller_init(nc);
2372
2373 if (ret)
2374 return ret;
2375
2376 /* Make sure all irqs are masked before registering our IRQ handler. */
2377 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
2378 ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
2379 IRQF_SHARED, "nfc", nc);
2380 if (ret) {
2381 dev_err(dev,
2382 "Could not get register NFC interrupt handler (err = %d)\n",
2383 ret);
2384 goto err;
2385 }
2386
2387 /* Initial NFC configuration. */
2388 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
2389 ATMEL_HSMC_NFC_CFG_DTO_MAX);
2390 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
2391 ATMEL_HSMC_NFC_CTRL_EN);
2392
2393 ret = atmel_nand_controller_add_nands(&nc->base);
2394 if (ret)
2395 goto err;
2396
2397 return 0;
2398
2399 err:
2400 atmel_hsmc_nand_controller_remove(&nc->base);
2401
2402 return ret;
2403 }
2404
2405 static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
2406 .probe = atmel_hsmc_nand_controller_probe,
2407 .remove = atmel_hsmc_nand_controller_remove,
2408 .ecc_init = atmel_hsmc_nand_ecc_init,
2409 .nand_init = atmel_nand_init,
2410 .setup_interface = atmel_hsmc_nand_setup_interface,
2411 .exec_op = atmel_hsmc_nand_exec_op,
2412 };
2413
2414 static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
2415 .has_dma = true,
2416 .ale_offs = BIT(21),
2417 .cle_offs = BIT(22),
2418 .ops = &atmel_hsmc_nc_ops,
2419 };
2420
2421 /* Only used to parse old bindings. */
2422 static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
2423 .has_dma = true,
2424 .ale_offs = BIT(21),
2425 .cle_offs = BIT(22),
2426 .ops = &atmel_hsmc_nc_ops,
2427 .legacy_of_bindings = true,
2428 };
2429
atmel_smc_nand_controller_probe(struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2430 static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2431 const struct atmel_nand_controller_caps *caps)
2432 {
2433 struct device *dev = &pdev->dev;
2434 struct atmel_smc_nand_controller *nc;
2435 int ret;
2436
2437 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2438 if (!nc)
2439 return -ENOMEM;
2440
2441 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2442 if (ret)
2443 return ret;
2444
2445 ret = atmel_smc_nand_controller_init(nc);
2446 if (ret)
2447 return ret;
2448
2449 return atmel_nand_controller_add_nands(&nc->base);
2450 }
2451
2452 static int
atmel_smc_nand_controller_remove(struct atmel_nand_controller * nc)2453 atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2454 {
2455 int ret;
2456
2457 ret = atmel_nand_controller_remove_nands(nc);
2458 if (ret)
2459 return ret;
2460
2461 atmel_nand_controller_cleanup(nc);
2462
2463 return 0;
2464 }
2465
2466 /*
2467 * The SMC reg layout of at91rm9200 is completely different which prevents us
2468 * from re-using atmel_smc_nand_setup_interface() for the
2469 * ->setup_interface() hook.
2470 * At this point, there's no support for the at91rm9200 SMC IP, so we leave
2471 * ->setup_interface() unassigned.
2472 */
2473 static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
2474 .probe = atmel_smc_nand_controller_probe,
2475 .remove = atmel_smc_nand_controller_remove,
2476 .ecc_init = atmel_nand_ecc_init,
2477 .nand_init = atmel_smc_nand_init,
2478 .exec_op = atmel_smc_nand_exec_op,
2479 };
2480
2481 static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2482 .ale_offs = BIT(21),
2483 .cle_offs = BIT(22),
2484 .ebi_csa_regmap_name = "atmel,matrix",
2485 .ops = &at91rm9200_nc_ops,
2486 };
2487
2488 static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2489 .probe = atmel_smc_nand_controller_probe,
2490 .remove = atmel_smc_nand_controller_remove,
2491 .ecc_init = atmel_nand_ecc_init,
2492 .nand_init = atmel_smc_nand_init,
2493 .setup_interface = atmel_smc_nand_setup_interface,
2494 .exec_op = atmel_smc_nand_exec_op,
2495 };
2496
2497 static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
2498 .ale_offs = BIT(21),
2499 .cle_offs = BIT(22),
2500 .ebi_csa_regmap_name = "atmel,matrix",
2501 .ops = &atmel_smc_nc_ops,
2502 };
2503
2504 static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2505 .ale_offs = BIT(22),
2506 .cle_offs = BIT(21),
2507 .ebi_csa_regmap_name = "atmel,matrix",
2508 .ops = &atmel_smc_nc_ops,
2509 };
2510
2511 static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2512 .has_dma = true,
2513 .ale_offs = BIT(21),
2514 .cle_offs = BIT(22),
2515 .ebi_csa_regmap_name = "atmel,matrix",
2516 .ops = &atmel_smc_nc_ops,
2517 };
2518
2519 static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
2520 .has_dma = true,
2521 .ale_offs = BIT(21),
2522 .cle_offs = BIT(22),
2523 .ebi_csa_regmap_name = "microchip,sfr",
2524 .ops = &atmel_smc_nc_ops,
2525 };
2526
2527 /* Only used to parse old bindings. */
2528 static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2529 .ale_offs = BIT(21),
2530 .cle_offs = BIT(22),
2531 .ops = &atmel_smc_nc_ops,
2532 .legacy_of_bindings = true,
2533 };
2534
2535 static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2536 .ale_offs = BIT(22),
2537 .cle_offs = BIT(21),
2538 .ops = &atmel_smc_nc_ops,
2539 .legacy_of_bindings = true,
2540 };
2541
2542 static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2543 .has_dma = true,
2544 .ale_offs = BIT(21),
2545 .cle_offs = BIT(22),
2546 .ops = &atmel_smc_nc_ops,
2547 .legacy_of_bindings = true,
2548 };
2549
2550 static const struct of_device_id atmel_nand_controller_of_ids[] = {
2551 {
2552 .compatible = "atmel,at91rm9200-nand-controller",
2553 .data = &atmel_rm9200_nc_caps,
2554 },
2555 {
2556 .compatible = "atmel,at91sam9260-nand-controller",
2557 .data = &atmel_sam9260_nc_caps,
2558 },
2559 {
2560 .compatible = "atmel,at91sam9261-nand-controller",
2561 .data = &atmel_sam9261_nc_caps,
2562 },
2563 {
2564 .compatible = "atmel,at91sam9g45-nand-controller",
2565 .data = &atmel_sam9g45_nc_caps,
2566 },
2567 {
2568 .compatible = "atmel,sama5d3-nand-controller",
2569 .data = &atmel_sama5_nc_caps,
2570 },
2571 {
2572 .compatible = "microchip,sam9x60-nand-controller",
2573 .data = µchip_sam9x60_nc_caps,
2574 },
2575 /* Support for old/deprecated bindings: */
2576 {
2577 .compatible = "atmel,at91rm9200-nand",
2578 .data = &atmel_rm9200_nand_caps,
2579 },
2580 {
2581 .compatible = "atmel,sama5d4-nand",
2582 .data = &atmel_rm9200_nand_caps,
2583 },
2584 {
2585 .compatible = "atmel,sama5d2-nand",
2586 .data = &atmel_rm9200_nand_caps,
2587 },
2588 { /* sentinel */ },
2589 };
2590 MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2591
atmel_nand_controller_probe(struct platform_device * pdev)2592 static int atmel_nand_controller_probe(struct platform_device *pdev)
2593 {
2594 const struct atmel_nand_controller_caps *caps;
2595
2596 if (pdev->id_entry)
2597 caps = (void *)pdev->id_entry->driver_data;
2598 else
2599 caps = of_device_get_match_data(&pdev->dev);
2600
2601 if (!caps) {
2602 dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2603 return -EINVAL;
2604 }
2605
2606 if (caps->legacy_of_bindings) {
2607 struct device_node *nfc_node;
2608 u32 ale_offs = 21;
2609
2610 /*
2611 * If we are parsing legacy DT props and the DT contains a
2612 * valid NFC node, forward the request to the sama5 logic.
2613 */
2614 nfc_node = of_get_compatible_child(pdev->dev.of_node,
2615 "atmel,sama5d3-nfc");
2616 if (nfc_node) {
2617 caps = &atmel_sama5_nand_caps;
2618 of_node_put(nfc_node);
2619 }
2620
2621 /*
2622 * Even if the compatible says we are dealing with an
2623 * at91rm9200 controller, the atmel,nand-has-dma specify that
2624 * this controller supports DMA, which means we are in fact
2625 * dealing with an at91sam9g45+ controller.
2626 */
2627 if (!caps->has_dma &&
2628 of_property_read_bool(pdev->dev.of_node,
2629 "atmel,nand-has-dma"))
2630 caps = &atmel_sam9g45_nand_caps;
2631
2632 /*
2633 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2634 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2635 * actually dealing with an at91sam9261 controller.
2636 */
2637 of_property_read_u32(pdev->dev.of_node,
2638 "atmel,nand-addr-offset", &ale_offs);
2639 if (ale_offs != 21)
2640 caps = &atmel_sam9261_nand_caps;
2641 }
2642
2643 return caps->ops->probe(pdev, caps);
2644 }
2645
atmel_nand_controller_remove(struct platform_device * pdev)2646 static void atmel_nand_controller_remove(struct platform_device *pdev)
2647 {
2648 struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2649
2650 WARN_ON(nc->caps->ops->remove(nc));
2651 }
2652
atmel_nand_controller_resume(struct device * dev)2653 static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
2654 {
2655 struct atmel_nand_controller *nc = dev_get_drvdata(dev);
2656 struct atmel_nand *nand;
2657
2658 if (nc->pmecc)
2659 atmel_pmecc_reset(nc->pmecc);
2660
2661 list_for_each_entry(nand, &nc->chips, node) {
2662 int i;
2663
2664 for (i = 0; i < nand->numcs; i++)
2665 nand_reset(&nand->base, i);
2666 }
2667
2668 return 0;
2669 }
2670
2671 static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
2672 atmel_nand_controller_resume);
2673
2674 static struct platform_driver atmel_nand_controller_driver = {
2675 .driver = {
2676 .name = "atmel-nand-controller",
2677 .of_match_table = atmel_nand_controller_of_ids,
2678 .pm = &atmel_nand_controller_pm_ops,
2679 },
2680 .probe = atmel_nand_controller_probe,
2681 .remove = atmel_nand_controller_remove,
2682 };
2683 module_platform_driver(atmel_nand_controller_driver);
2684
2685 MODULE_LICENSE("GPL");
2686 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2687 MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2688 MODULE_ALIAS("platform:atmel-nand-controller");
2689