xref: /linux/drivers/mtd/nand/raw/atmel/nand-controller.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /*
2  * Copyright 2017 ATMEL
3  * Copyright 2017 Free Electrons
4  *
5  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6  *
7  * Derived from the atmel_nand.c driver which contained the following
8  * copyrights:
9  *
10  *   Copyright 2003 Rick Bronson
11  *
12  *   Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
13  *	Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
14  *
15  *   Derived from drivers/mtd/spia.c (removed in v3.8)
16  *	Copyright 2000 Steven J. Hill (sjhill@cotw.com)
17  *
18  *
19  *   Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
20  *	Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
21  *
22  *   Derived from Das U-Boot source code
23  *	(u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
24  *	Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
25  *
26  *   Add Programmable Multibit ECC support for various AT91 SoC
27  *	Copyright 2012 ATMEL, Hong Xu
28  *
29  *   Add Nand Flash Controller support for SAMA5 SoC
30  *	Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
31  *
32  * This program is free software; you can redistribute it and/or modify
33  * it under the terms of the GNU General Public License version 2 as
34  * published by the Free Software Foundation.
35  *
36  * A few words about the naming convention in this file. This convention
37  * applies to structure and function names.
38  *
39  * Prefixes:
40  *
41  * - atmel_nand_: all generic structures/functions
42  * - atmel_smc_nand_: all structures/functions specific to the SMC interface
43  *		      (at91sam9 and avr32 SoCs)
44  * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
45  *		       (sama5 SoCs and later)
46  * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
47  *		 that is available in the HSMC block
48  * - <soc>_nand_: all SoC specific structures/functions
49  */
50 
51 #include <linux/clk.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/dmaengine.h>
54 #include <linux/genalloc.h>
55 #include <linux/gpio.h>
56 #include <linux/gpio/consumer.h>
57 #include <linux/interrupt.h>
58 #include <linux/mfd/syscon.h>
59 #include <linux/mfd/syscon/atmel-matrix.h>
60 #include <linux/mfd/syscon/atmel-smc.h>
61 #include <linux/module.h>
62 #include <linux/mtd/rawnand.h>
63 #include <linux/of_address.h>
64 #include <linux/of_irq.h>
65 #include <linux/of_platform.h>
66 #include <linux/iopoll.h>
67 #include <linux/platform_device.h>
68 #include <linux/regmap.h>
69 
70 #include "pmecc.h"
71 
72 #define ATMEL_HSMC_NFC_CFG			0x0
73 #define ATMEL_HSMC_NFC_CFG_SPARESIZE(x)		(((x) / 4) << 24)
74 #define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK	GENMASK(30, 24)
75 #define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul)	(((cyc) << 16) | ((mul) << 20))
76 #define ATMEL_HSMC_NFC_CFG_DTO_MAX		GENMASK(22, 16)
77 #define ATMEL_HSMC_NFC_CFG_RBEDGE		BIT(13)
78 #define ATMEL_HSMC_NFC_CFG_FALLING_EDGE		BIT(12)
79 #define ATMEL_HSMC_NFC_CFG_RSPARE		BIT(9)
80 #define ATMEL_HSMC_NFC_CFG_WSPARE		BIT(8)
81 #define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK	GENMASK(2, 0)
82 #define ATMEL_HSMC_NFC_CFG_PAGESIZE(x)		(fls((x) / 512) - 1)
83 
84 #define ATMEL_HSMC_NFC_CTRL			0x4
85 #define ATMEL_HSMC_NFC_CTRL_EN			BIT(0)
86 #define ATMEL_HSMC_NFC_CTRL_DIS			BIT(1)
87 
88 #define ATMEL_HSMC_NFC_SR			0x8
89 #define ATMEL_HSMC_NFC_IER			0xc
90 #define ATMEL_HSMC_NFC_IDR			0x10
91 #define ATMEL_HSMC_NFC_IMR			0x14
92 #define ATMEL_HSMC_NFC_SR_ENABLED		BIT(1)
93 #define ATMEL_HSMC_NFC_SR_RB_RISE		BIT(4)
94 #define ATMEL_HSMC_NFC_SR_RB_FALL		BIT(5)
95 #define ATMEL_HSMC_NFC_SR_BUSY			BIT(8)
96 #define ATMEL_HSMC_NFC_SR_WR			BIT(11)
97 #define ATMEL_HSMC_NFC_SR_CSID			GENMASK(14, 12)
98 #define ATMEL_HSMC_NFC_SR_XFRDONE		BIT(16)
99 #define ATMEL_HSMC_NFC_SR_CMDDONE		BIT(17)
100 #define ATMEL_HSMC_NFC_SR_DTOE			BIT(20)
101 #define ATMEL_HSMC_NFC_SR_UNDEF			BIT(21)
102 #define ATMEL_HSMC_NFC_SR_AWB			BIT(22)
103 #define ATMEL_HSMC_NFC_SR_NFCASE		BIT(23)
104 #define ATMEL_HSMC_NFC_SR_ERRORS		(ATMEL_HSMC_NFC_SR_DTOE | \
105 						 ATMEL_HSMC_NFC_SR_UNDEF | \
106 						 ATMEL_HSMC_NFC_SR_AWB | \
107 						 ATMEL_HSMC_NFC_SR_NFCASE)
108 #define ATMEL_HSMC_NFC_SR_RBEDGE(x)		BIT((x) + 24)
109 
110 #define ATMEL_HSMC_NFC_ADDR			0x18
111 #define ATMEL_HSMC_NFC_BANK			0x1c
112 
113 #define ATMEL_NFC_MAX_RB_ID			7
114 
115 #define ATMEL_NFC_SRAM_SIZE			0x2400
116 
117 #define ATMEL_NFC_CMD(pos, cmd)			((cmd) << (((pos) * 8) + 2))
118 #define ATMEL_NFC_VCMD2				BIT(18)
119 #define ATMEL_NFC_ACYCLE(naddrs)		((naddrs) << 19)
120 #define ATMEL_NFC_CSID(cs)			((cs) << 22)
121 #define ATMEL_NFC_DATAEN			BIT(25)
122 #define ATMEL_NFC_NFCWR				BIT(26)
123 
124 #define ATMEL_NFC_MAX_ADDR_CYCLES		5
125 
126 #define ATMEL_NAND_ALE_OFFSET			BIT(21)
127 #define ATMEL_NAND_CLE_OFFSET			BIT(22)
128 
129 #define DEFAULT_TIMEOUT_MS			1000
130 #define MIN_DMA_LEN				128
131 
132 enum atmel_nand_rb_type {
133 	ATMEL_NAND_NO_RB,
134 	ATMEL_NAND_NATIVE_RB,
135 	ATMEL_NAND_GPIO_RB,
136 };
137 
138 struct atmel_nand_rb {
139 	enum atmel_nand_rb_type type;
140 	union {
141 		struct gpio_desc *gpio;
142 		int id;
143 	};
144 };
145 
146 struct atmel_nand_cs {
147 	int id;
148 	struct atmel_nand_rb rb;
149 	struct gpio_desc *csgpio;
150 	struct {
151 		void __iomem *virt;
152 		dma_addr_t dma;
153 	} io;
154 
155 	struct atmel_smc_cs_conf smcconf;
156 };
157 
158 struct atmel_nand {
159 	struct list_head node;
160 	struct device *dev;
161 	struct nand_chip base;
162 	struct atmel_nand_cs *activecs;
163 	struct atmel_pmecc_user *pmecc;
164 	struct gpio_desc *cdgpio;
165 	int numcs;
166 	struct atmel_nand_cs cs[];
167 };
168 
169 static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
170 {
171 	return container_of(chip, struct atmel_nand, base);
172 }
173 
174 enum atmel_nfc_data_xfer {
175 	ATMEL_NFC_NO_DATA,
176 	ATMEL_NFC_READ_DATA,
177 	ATMEL_NFC_WRITE_DATA,
178 };
179 
180 struct atmel_nfc_op {
181 	u8 cs;
182 	u8 ncmds;
183 	u8 cmds[2];
184 	u8 naddrs;
185 	u8 addrs[5];
186 	enum atmel_nfc_data_xfer data;
187 	u32 wait;
188 	u32 errors;
189 };
190 
191 struct atmel_nand_controller;
192 struct atmel_nand_controller_caps;
193 
194 struct atmel_nand_controller_ops {
195 	int (*probe)(struct platform_device *pdev,
196 		     const struct atmel_nand_controller_caps *caps);
197 	int (*remove)(struct atmel_nand_controller *nc);
198 	void (*nand_init)(struct atmel_nand_controller *nc,
199 			  struct atmel_nand *nand);
200 	int (*ecc_init)(struct atmel_nand *nand);
201 	int (*setup_data_interface)(struct atmel_nand *nand, int csline,
202 				    const struct nand_data_interface *conf);
203 };
204 
205 struct atmel_nand_controller_caps {
206 	bool has_dma;
207 	bool legacy_of_bindings;
208 	u32 ale_offs;
209 	u32 cle_offs;
210 	const struct atmel_nand_controller_ops *ops;
211 };
212 
213 struct atmel_nand_controller {
214 	struct nand_hw_control base;
215 	const struct atmel_nand_controller_caps *caps;
216 	struct device *dev;
217 	struct regmap *smc;
218 	struct dma_chan *dmac;
219 	struct atmel_pmecc *pmecc;
220 	struct list_head chips;
221 	struct clk *mck;
222 };
223 
224 static inline struct atmel_nand_controller *
225 to_nand_controller(struct nand_hw_control *ctl)
226 {
227 	return container_of(ctl, struct atmel_nand_controller, base);
228 }
229 
230 struct atmel_smc_nand_controller {
231 	struct atmel_nand_controller base;
232 	struct regmap *matrix;
233 	unsigned int ebi_csa_offs;
234 };
235 
236 static inline struct atmel_smc_nand_controller *
237 to_smc_nand_controller(struct nand_hw_control *ctl)
238 {
239 	return container_of(to_nand_controller(ctl),
240 			    struct atmel_smc_nand_controller, base);
241 }
242 
243 struct atmel_hsmc_nand_controller {
244 	struct atmel_nand_controller base;
245 	struct {
246 		struct gen_pool *pool;
247 		void __iomem *virt;
248 		dma_addr_t dma;
249 	} sram;
250 	const struct atmel_hsmc_reg_layout *hsmc_layout;
251 	struct regmap *io;
252 	struct atmel_nfc_op op;
253 	struct completion complete;
254 	int irq;
255 
256 	/* Only used when instantiating from legacy DT bindings. */
257 	struct clk *clk;
258 };
259 
260 static inline struct atmel_hsmc_nand_controller *
261 to_hsmc_nand_controller(struct nand_hw_control *ctl)
262 {
263 	return container_of(to_nand_controller(ctl),
264 			    struct atmel_hsmc_nand_controller, base);
265 }
266 
267 static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
268 {
269 	op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
270 	op->wait ^= status & op->wait;
271 
272 	return !op->wait || op->errors;
273 }
274 
275 static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
276 {
277 	struct atmel_hsmc_nand_controller *nc = data;
278 	u32 sr, rcvd;
279 	bool done;
280 
281 	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
282 
283 	rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
284 	done = atmel_nfc_op_done(&nc->op, sr);
285 
286 	if (rcvd)
287 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
288 
289 	if (done)
290 		complete(&nc->complete);
291 
292 	return rcvd ? IRQ_HANDLED : IRQ_NONE;
293 }
294 
295 static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
296 			  unsigned int timeout_ms)
297 {
298 	int ret;
299 
300 	if (!timeout_ms)
301 		timeout_ms = DEFAULT_TIMEOUT_MS;
302 
303 	if (poll) {
304 		u32 status;
305 
306 		ret = regmap_read_poll_timeout(nc->base.smc,
307 					       ATMEL_HSMC_NFC_SR, status,
308 					       atmel_nfc_op_done(&nc->op,
309 								 status),
310 					       0, timeout_ms * 1000);
311 	} else {
312 		init_completion(&nc->complete);
313 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
314 			     nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
315 		ret = wait_for_completion_timeout(&nc->complete,
316 						msecs_to_jiffies(timeout_ms));
317 		if (!ret)
318 			ret = -ETIMEDOUT;
319 		else
320 			ret = 0;
321 
322 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
323 	}
324 
325 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
326 		dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
327 		ret = -ETIMEDOUT;
328 	}
329 
330 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
331 		dev_err(nc->base.dev, "Access to an undefined area\n");
332 		ret = -EIO;
333 	}
334 
335 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
336 		dev_err(nc->base.dev, "Access while busy\n");
337 		ret = -EIO;
338 	}
339 
340 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
341 		dev_err(nc->base.dev, "Wrong access size\n");
342 		ret = -EIO;
343 	}
344 
345 	return ret;
346 }
347 
348 static void atmel_nand_dma_transfer_finished(void *data)
349 {
350 	struct completion *finished = data;
351 
352 	complete(finished);
353 }
354 
355 static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
356 				   void *buf, dma_addr_t dev_dma, size_t len,
357 				   enum dma_data_direction dir)
358 {
359 	DECLARE_COMPLETION_ONSTACK(finished);
360 	dma_addr_t src_dma, dst_dma, buf_dma;
361 	struct dma_async_tx_descriptor *tx;
362 	dma_cookie_t cookie;
363 
364 	buf_dma = dma_map_single(nc->dev, buf, len, dir);
365 	if (dma_mapping_error(nc->dev, dev_dma)) {
366 		dev_err(nc->dev,
367 			"Failed to prepare a buffer for DMA access\n");
368 		goto err;
369 	}
370 
371 	if (dir == DMA_FROM_DEVICE) {
372 		src_dma = dev_dma;
373 		dst_dma = buf_dma;
374 	} else {
375 		src_dma = buf_dma;
376 		dst_dma = dev_dma;
377 	}
378 
379 	tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
380 				       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
381 	if (!tx) {
382 		dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
383 		goto err_unmap;
384 	}
385 
386 	tx->callback = atmel_nand_dma_transfer_finished;
387 	tx->callback_param = &finished;
388 
389 	cookie = dmaengine_submit(tx);
390 	if (dma_submit_error(cookie)) {
391 		dev_err(nc->dev, "Failed to do DMA tx_submit\n");
392 		goto err_unmap;
393 	}
394 
395 	dma_async_issue_pending(nc->dmac);
396 	wait_for_completion(&finished);
397 
398 	return 0;
399 
400 err_unmap:
401 	dma_unmap_single(nc->dev, buf_dma, len, dir);
402 
403 err:
404 	dev_dbg(nc->dev, "Fall back to CPU I/O\n");
405 
406 	return -EIO;
407 }
408 
409 static u8 atmel_nand_read_byte(struct mtd_info *mtd)
410 {
411 	struct nand_chip *chip = mtd_to_nand(mtd);
412 	struct atmel_nand *nand = to_atmel_nand(chip);
413 
414 	return ioread8(nand->activecs->io.virt);
415 }
416 
417 static u16 atmel_nand_read_word(struct mtd_info *mtd)
418 {
419 	struct nand_chip *chip = mtd_to_nand(mtd);
420 	struct atmel_nand *nand = to_atmel_nand(chip);
421 
422 	return ioread16(nand->activecs->io.virt);
423 }
424 
425 static void atmel_nand_write_byte(struct mtd_info *mtd, u8 byte)
426 {
427 	struct nand_chip *chip = mtd_to_nand(mtd);
428 	struct atmel_nand *nand = to_atmel_nand(chip);
429 
430 	if (chip->options & NAND_BUSWIDTH_16)
431 		iowrite16(byte | (byte << 8), nand->activecs->io.virt);
432 	else
433 		iowrite8(byte, nand->activecs->io.virt);
434 }
435 
436 static void atmel_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
437 {
438 	struct nand_chip *chip = mtd_to_nand(mtd);
439 	struct atmel_nand *nand = to_atmel_nand(chip);
440 	struct atmel_nand_controller *nc;
441 
442 	nc = to_nand_controller(chip->controller);
443 
444 	/*
445 	 * If the controller supports DMA, the buffer address is DMA-able and
446 	 * len is long enough to make DMA transfers profitable, let's trigger
447 	 * a DMA transfer. If it fails, fallback to PIO mode.
448 	 */
449 	if (nc->dmac && virt_addr_valid(buf) &&
450 	    len >= MIN_DMA_LEN &&
451 	    !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
452 				     DMA_FROM_DEVICE))
453 		return;
454 
455 	if (chip->options & NAND_BUSWIDTH_16)
456 		ioread16_rep(nand->activecs->io.virt, buf, len / 2);
457 	else
458 		ioread8_rep(nand->activecs->io.virt, buf, len);
459 }
460 
461 static void atmel_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
462 {
463 	struct nand_chip *chip = mtd_to_nand(mtd);
464 	struct atmel_nand *nand = to_atmel_nand(chip);
465 	struct atmel_nand_controller *nc;
466 
467 	nc = to_nand_controller(chip->controller);
468 
469 	/*
470 	 * If the controller supports DMA, the buffer address is DMA-able and
471 	 * len is long enough to make DMA transfers profitable, let's trigger
472 	 * a DMA transfer. If it fails, fallback to PIO mode.
473 	 */
474 	if (nc->dmac && virt_addr_valid(buf) &&
475 	    len >= MIN_DMA_LEN &&
476 	    !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
477 				     len, DMA_TO_DEVICE))
478 		return;
479 
480 	if (chip->options & NAND_BUSWIDTH_16)
481 		iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
482 	else
483 		iowrite8_rep(nand->activecs->io.virt, buf, len);
484 }
485 
486 static int atmel_nand_dev_ready(struct mtd_info *mtd)
487 {
488 	struct nand_chip *chip = mtd_to_nand(mtd);
489 	struct atmel_nand *nand = to_atmel_nand(chip);
490 
491 	return gpiod_get_value(nand->activecs->rb.gpio);
492 }
493 
494 static void atmel_nand_select_chip(struct mtd_info *mtd, int cs)
495 {
496 	struct nand_chip *chip = mtd_to_nand(mtd);
497 	struct atmel_nand *nand = to_atmel_nand(chip);
498 
499 	if (cs < 0 || cs >= nand->numcs) {
500 		nand->activecs = NULL;
501 		chip->dev_ready = NULL;
502 		return;
503 	}
504 
505 	nand->activecs = &nand->cs[cs];
506 
507 	if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
508 		chip->dev_ready = atmel_nand_dev_ready;
509 }
510 
511 static int atmel_hsmc_nand_dev_ready(struct mtd_info *mtd)
512 {
513 	struct nand_chip *chip = mtd_to_nand(mtd);
514 	struct atmel_nand *nand = to_atmel_nand(chip);
515 	struct atmel_hsmc_nand_controller *nc;
516 	u32 status;
517 
518 	nc = to_hsmc_nand_controller(chip->controller);
519 
520 	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status);
521 
522 	return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
523 }
524 
525 static void atmel_hsmc_nand_select_chip(struct mtd_info *mtd, int cs)
526 {
527 	struct nand_chip *chip = mtd_to_nand(mtd);
528 	struct atmel_nand *nand = to_atmel_nand(chip);
529 	struct atmel_hsmc_nand_controller *nc;
530 
531 	nc = to_hsmc_nand_controller(chip->controller);
532 
533 	atmel_nand_select_chip(mtd, cs);
534 
535 	if (!nand->activecs) {
536 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
537 			     ATMEL_HSMC_NFC_CTRL_DIS);
538 		return;
539 	}
540 
541 	if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
542 		chip->dev_ready = atmel_hsmc_nand_dev_ready;
543 
544 	regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
545 			   ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
546 			   ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
547 			   ATMEL_HSMC_NFC_CFG_RSPARE |
548 			   ATMEL_HSMC_NFC_CFG_WSPARE,
549 			   ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
550 			   ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
551 			   ATMEL_HSMC_NFC_CFG_RSPARE);
552 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
553 		     ATMEL_HSMC_NFC_CTRL_EN);
554 }
555 
556 static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
557 {
558 	u8 *addrs = nc->op.addrs;
559 	unsigned int op = 0;
560 	u32 addr, val;
561 	int i, ret;
562 
563 	nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
564 
565 	for (i = 0; i < nc->op.ncmds; i++)
566 		op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
567 
568 	if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
569 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
570 
571 	op |= ATMEL_NFC_CSID(nc->op.cs) |
572 	      ATMEL_NFC_ACYCLE(nc->op.naddrs);
573 
574 	if (nc->op.ncmds > 1)
575 		op |= ATMEL_NFC_VCMD2;
576 
577 	addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
578 	       (addrs[3] << 24);
579 
580 	if (nc->op.data != ATMEL_NFC_NO_DATA) {
581 		op |= ATMEL_NFC_DATAEN;
582 		nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
583 
584 		if (nc->op.data == ATMEL_NFC_WRITE_DATA)
585 			op |= ATMEL_NFC_NFCWR;
586 	}
587 
588 	/* Clear all flags. */
589 	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
590 
591 	/* Send the command. */
592 	regmap_write(nc->io, op, addr);
593 
594 	ret = atmel_nfc_wait(nc, poll, 0);
595 	if (ret)
596 		dev_err(nc->base.dev,
597 			"Failed to send NAND command (err = %d)!",
598 			ret);
599 
600 	/* Reset the op state. */
601 	memset(&nc->op, 0, sizeof(nc->op));
602 
603 	return ret;
604 }
605 
606 static void atmel_hsmc_nand_cmd_ctrl(struct mtd_info *mtd, int dat,
607 				     unsigned int ctrl)
608 {
609 	struct nand_chip *chip = mtd_to_nand(mtd);
610 	struct atmel_nand *nand = to_atmel_nand(chip);
611 	struct atmel_hsmc_nand_controller *nc;
612 
613 	nc = to_hsmc_nand_controller(chip->controller);
614 
615 	if (ctrl & NAND_ALE) {
616 		if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
617 			return;
618 
619 		nc->op.addrs[nc->op.naddrs++] = dat;
620 	} else if (ctrl & NAND_CLE) {
621 		if (nc->op.ncmds > 1)
622 			return;
623 
624 		nc->op.cmds[nc->op.ncmds++] = dat;
625 	}
626 
627 	if (dat == NAND_CMD_NONE) {
628 		nc->op.cs = nand->activecs->id;
629 		atmel_nfc_exec_op(nc, true);
630 	}
631 }
632 
633 static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
634 				unsigned int ctrl)
635 {
636 	struct nand_chip *chip = mtd_to_nand(mtd);
637 	struct atmel_nand *nand = to_atmel_nand(chip);
638 	struct atmel_nand_controller *nc;
639 
640 	nc = to_nand_controller(chip->controller);
641 
642 	if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) {
643 		if (ctrl & NAND_NCE)
644 			gpiod_set_value(nand->activecs->csgpio, 0);
645 		else
646 			gpiod_set_value(nand->activecs->csgpio, 1);
647 	}
648 
649 	if (ctrl & NAND_ALE)
650 		writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs);
651 	else if (ctrl & NAND_CLE)
652 		writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs);
653 }
654 
655 static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
656 				   bool oob_required)
657 {
658 	struct mtd_info *mtd = nand_to_mtd(chip);
659 	struct atmel_hsmc_nand_controller *nc;
660 	int ret = -EIO;
661 
662 	nc = to_hsmc_nand_controller(chip->controller);
663 
664 	if (nc->base.dmac)
665 		ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
666 					      nc->sram.dma, mtd->writesize,
667 					      DMA_TO_DEVICE);
668 
669 	/* Falling back to CPU copy. */
670 	if (ret)
671 		memcpy_toio(nc->sram.virt, buf, mtd->writesize);
672 
673 	if (oob_required)
674 		memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
675 			    mtd->oobsize);
676 }
677 
678 static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
679 				     bool oob_required)
680 {
681 	struct mtd_info *mtd = nand_to_mtd(chip);
682 	struct atmel_hsmc_nand_controller *nc;
683 	int ret = -EIO;
684 
685 	nc = to_hsmc_nand_controller(chip->controller);
686 
687 	if (nc->base.dmac)
688 		ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
689 					      mtd->writesize, DMA_FROM_DEVICE);
690 
691 	/* Falling back to CPU copy. */
692 	if (ret)
693 		memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
694 
695 	if (oob_required)
696 		memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
697 			      mtd->oobsize);
698 }
699 
700 static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
701 {
702 	struct mtd_info *mtd = nand_to_mtd(chip);
703 	struct atmel_hsmc_nand_controller *nc;
704 
705 	nc = to_hsmc_nand_controller(chip->controller);
706 
707 	if (column >= 0) {
708 		nc->op.addrs[nc->op.naddrs++] = column;
709 
710 		/*
711 		 * 2 address cycles for the column offset on large page NANDs.
712 		 */
713 		if (mtd->writesize > 512)
714 			nc->op.addrs[nc->op.naddrs++] = column >> 8;
715 	}
716 
717 	if (page >= 0) {
718 		nc->op.addrs[nc->op.naddrs++] = page;
719 		nc->op.addrs[nc->op.naddrs++] = page >> 8;
720 
721 		if (chip->options & NAND_ROW_ADDR_3)
722 			nc->op.addrs[nc->op.naddrs++] = page >> 16;
723 	}
724 }
725 
726 static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
727 {
728 	struct atmel_nand *nand = to_atmel_nand(chip);
729 	struct atmel_nand_controller *nc;
730 	int ret;
731 
732 	nc = to_nand_controller(chip->controller);
733 
734 	if (raw)
735 		return 0;
736 
737 	ret = atmel_pmecc_enable(nand->pmecc, op);
738 	if (ret)
739 		dev_err(nc->dev,
740 			"Failed to enable ECC engine (err = %d)\n", ret);
741 
742 	return ret;
743 }
744 
745 static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
746 {
747 	struct atmel_nand *nand = to_atmel_nand(chip);
748 
749 	if (!raw)
750 		atmel_pmecc_disable(nand->pmecc);
751 }
752 
753 static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
754 {
755 	struct atmel_nand *nand = to_atmel_nand(chip);
756 	struct mtd_info *mtd = nand_to_mtd(chip);
757 	struct atmel_nand_controller *nc;
758 	struct mtd_oob_region oobregion;
759 	void *eccbuf;
760 	int ret, i;
761 
762 	nc = to_nand_controller(chip->controller);
763 
764 	if (raw)
765 		return 0;
766 
767 	ret = atmel_pmecc_wait_rdy(nand->pmecc);
768 	if (ret) {
769 		dev_err(nc->dev,
770 			"Failed to transfer NAND page data (err = %d)\n",
771 			ret);
772 		return ret;
773 	}
774 
775 	mtd_ooblayout_ecc(mtd, 0, &oobregion);
776 	eccbuf = chip->oob_poi + oobregion.offset;
777 
778 	for (i = 0; i < chip->ecc.steps; i++) {
779 		atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
780 						   eccbuf);
781 		eccbuf += chip->ecc.bytes;
782 	}
783 
784 	return 0;
785 }
786 
787 static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
788 					 bool raw)
789 {
790 	struct atmel_nand *nand = to_atmel_nand(chip);
791 	struct mtd_info *mtd = nand_to_mtd(chip);
792 	struct atmel_nand_controller *nc;
793 	struct mtd_oob_region oobregion;
794 	int ret, i, max_bitflips = 0;
795 	void *databuf, *eccbuf;
796 
797 	nc = to_nand_controller(chip->controller);
798 
799 	if (raw)
800 		return 0;
801 
802 	ret = atmel_pmecc_wait_rdy(nand->pmecc);
803 	if (ret) {
804 		dev_err(nc->dev,
805 			"Failed to read NAND page data (err = %d)\n",
806 			ret);
807 		return ret;
808 	}
809 
810 	mtd_ooblayout_ecc(mtd, 0, &oobregion);
811 	eccbuf = chip->oob_poi + oobregion.offset;
812 	databuf = buf;
813 
814 	for (i = 0; i < chip->ecc.steps; i++) {
815 		ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
816 						 eccbuf);
817 		if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
818 			ret = nand_check_erased_ecc_chunk(databuf,
819 							  chip->ecc.size,
820 							  eccbuf,
821 							  chip->ecc.bytes,
822 							  NULL, 0,
823 							  chip->ecc.strength);
824 
825 		if (ret >= 0)
826 			max_bitflips = max(ret, max_bitflips);
827 		else
828 			mtd->ecc_stats.failed++;
829 
830 		databuf += chip->ecc.size;
831 		eccbuf += chip->ecc.bytes;
832 	}
833 
834 	return max_bitflips;
835 }
836 
837 static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
838 				     bool oob_required, int page, bool raw)
839 {
840 	struct mtd_info *mtd = nand_to_mtd(chip);
841 	struct atmel_nand *nand = to_atmel_nand(chip);
842 	int ret;
843 
844 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
845 
846 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
847 	if (ret)
848 		return ret;
849 
850 	atmel_nand_write_buf(mtd, buf, mtd->writesize);
851 
852 	ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
853 	if (ret) {
854 		atmel_pmecc_disable(nand->pmecc);
855 		return ret;
856 	}
857 
858 	atmel_nand_pmecc_disable(chip, raw);
859 
860 	atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
861 
862 	return nand_prog_page_end_op(chip);
863 }
864 
865 static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
866 				       struct nand_chip *chip, const u8 *buf,
867 				       int oob_required, int page)
868 {
869 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
870 }
871 
872 static int atmel_nand_pmecc_write_page_raw(struct mtd_info *mtd,
873 					   struct nand_chip *chip,
874 					   const u8 *buf, int oob_required,
875 					   int page)
876 {
877 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
878 }
879 
880 static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
881 				    bool oob_required, int page, bool raw)
882 {
883 	struct mtd_info *mtd = nand_to_mtd(chip);
884 	int ret;
885 
886 	nand_read_page_op(chip, page, 0, NULL, 0);
887 
888 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
889 	if (ret)
890 		return ret;
891 
892 	atmel_nand_read_buf(mtd, buf, mtd->writesize);
893 	atmel_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
894 
895 	ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
896 
897 	atmel_nand_pmecc_disable(chip, raw);
898 
899 	return ret;
900 }
901 
902 static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
903 				      struct nand_chip *chip, u8 *buf,
904 				      int oob_required, int page)
905 {
906 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
907 }
908 
909 static int atmel_nand_pmecc_read_page_raw(struct mtd_info *mtd,
910 					  struct nand_chip *chip, u8 *buf,
911 					  int oob_required, int page)
912 {
913 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
914 }
915 
916 static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
917 					  const u8 *buf, bool oob_required,
918 					  int page, bool raw)
919 {
920 	struct mtd_info *mtd = nand_to_mtd(chip);
921 	struct atmel_nand *nand = to_atmel_nand(chip);
922 	struct atmel_hsmc_nand_controller *nc;
923 	int ret, status;
924 
925 	nc = to_hsmc_nand_controller(chip->controller);
926 
927 	atmel_nfc_copy_to_sram(chip, buf, false);
928 
929 	nc->op.cmds[0] = NAND_CMD_SEQIN;
930 	nc->op.ncmds = 1;
931 	atmel_nfc_set_op_addr(chip, page, 0x0);
932 	nc->op.cs = nand->activecs->id;
933 	nc->op.data = ATMEL_NFC_WRITE_DATA;
934 
935 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
936 	if (ret)
937 		return ret;
938 
939 	ret = atmel_nfc_exec_op(nc, false);
940 	if (ret) {
941 		atmel_nand_pmecc_disable(chip, raw);
942 		dev_err(nc->base.dev,
943 			"Failed to transfer NAND page data (err = %d)\n",
944 			ret);
945 		return ret;
946 	}
947 
948 	ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
949 
950 	atmel_nand_pmecc_disable(chip, raw);
951 
952 	if (ret)
953 		return ret;
954 
955 	atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
956 
957 	nc->op.cmds[0] = NAND_CMD_PAGEPROG;
958 	nc->op.ncmds = 1;
959 	nc->op.cs = nand->activecs->id;
960 	ret = atmel_nfc_exec_op(nc, false);
961 	if (ret)
962 		dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
963 			ret);
964 
965 	status = chip->waitfunc(mtd, chip);
966 	if (status & NAND_STATUS_FAIL)
967 		return -EIO;
968 
969 	return ret;
970 }
971 
972 static int atmel_hsmc_nand_pmecc_write_page(struct mtd_info *mtd,
973 					    struct nand_chip *chip,
974 					    const u8 *buf, int oob_required,
975 					    int page)
976 {
977 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
978 					      false);
979 }
980 
981 static int atmel_hsmc_nand_pmecc_write_page_raw(struct mtd_info *mtd,
982 						struct nand_chip *chip,
983 						const u8 *buf,
984 						int oob_required, int page)
985 {
986 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
987 					      true);
988 }
989 
990 static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
991 					 bool oob_required, int page,
992 					 bool raw)
993 {
994 	struct mtd_info *mtd = nand_to_mtd(chip);
995 	struct atmel_nand *nand = to_atmel_nand(chip);
996 	struct atmel_hsmc_nand_controller *nc;
997 	int ret;
998 
999 	nc = to_hsmc_nand_controller(chip->controller);
1000 
1001 	/*
1002 	 * Optimized read page accessors only work when the NAND R/B pin is
1003 	 * connected to a native SoC R/B pin. If that's not the case, fallback
1004 	 * to the non-optimized one.
1005 	 */
1006 	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
1007 		nand_read_page_op(chip, page, 0, NULL, 0);
1008 
1009 		return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
1010 						raw);
1011 	}
1012 
1013 	nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1014 
1015 	if (mtd->writesize > 512)
1016 		nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1017 
1018 	atmel_nfc_set_op_addr(chip, page, 0x0);
1019 	nc->op.cs = nand->activecs->id;
1020 	nc->op.data = ATMEL_NFC_READ_DATA;
1021 
1022 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1023 	if (ret)
1024 		return ret;
1025 
1026 	ret = atmel_nfc_exec_op(nc, false);
1027 	if (ret) {
1028 		atmel_nand_pmecc_disable(chip, raw);
1029 		dev_err(nc->base.dev,
1030 			"Failed to load NAND page data (err = %d)\n",
1031 			ret);
1032 		return ret;
1033 	}
1034 
1035 	atmel_nfc_copy_from_sram(chip, buf, true);
1036 
1037 	ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1038 
1039 	atmel_nand_pmecc_disable(chip, raw);
1040 
1041 	return ret;
1042 }
1043 
1044 static int atmel_hsmc_nand_pmecc_read_page(struct mtd_info *mtd,
1045 					   struct nand_chip *chip, u8 *buf,
1046 					   int oob_required, int page)
1047 {
1048 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1049 					     false);
1050 }
1051 
1052 static int atmel_hsmc_nand_pmecc_read_page_raw(struct mtd_info *mtd,
1053 					       struct nand_chip *chip,
1054 					       u8 *buf, int oob_required,
1055 					       int page)
1056 {
1057 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1058 					     true);
1059 }
1060 
1061 static int atmel_nand_pmecc_init(struct nand_chip *chip)
1062 {
1063 	struct mtd_info *mtd = nand_to_mtd(chip);
1064 	struct atmel_nand *nand = to_atmel_nand(chip);
1065 	struct atmel_nand_controller *nc;
1066 	struct atmel_pmecc_user_req req;
1067 
1068 	nc = to_nand_controller(chip->controller);
1069 
1070 	if (!nc->pmecc) {
1071 		dev_err(nc->dev, "HW ECC not supported\n");
1072 		return -ENOTSUPP;
1073 	}
1074 
1075 	if (nc->caps->legacy_of_bindings) {
1076 		u32 val;
1077 
1078 		if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1079 					  &val))
1080 			chip->ecc.strength = val;
1081 
1082 		if (!of_property_read_u32(nc->dev->of_node,
1083 					  "atmel,pmecc-sector-size",
1084 					  &val))
1085 			chip->ecc.size = val;
1086 	}
1087 
1088 	if (chip->ecc.options & NAND_ECC_MAXIMIZE)
1089 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1090 	else if (chip->ecc.strength)
1091 		req.ecc.strength = chip->ecc.strength;
1092 	else if (chip->ecc_strength_ds)
1093 		req.ecc.strength = chip->ecc_strength_ds;
1094 	else
1095 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1096 
1097 	if (chip->ecc.size)
1098 		req.ecc.sectorsize = chip->ecc.size;
1099 	else if (chip->ecc_step_ds)
1100 		req.ecc.sectorsize = chip->ecc_step_ds;
1101 	else
1102 		req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1103 
1104 	req.pagesize = mtd->writesize;
1105 	req.oobsize = mtd->oobsize;
1106 
1107 	if (mtd->writesize <= 512) {
1108 		req.ecc.bytes = 4;
1109 		req.ecc.ooboffset = 0;
1110 	} else {
1111 		req.ecc.bytes = mtd->oobsize - 2;
1112 		req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1113 	}
1114 
1115 	nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1116 	if (IS_ERR(nand->pmecc))
1117 		return PTR_ERR(nand->pmecc);
1118 
1119 	chip->ecc.algo = NAND_ECC_BCH;
1120 	chip->ecc.size = req.ecc.sectorsize;
1121 	chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1122 	chip->ecc.strength = req.ecc.strength;
1123 
1124 	chip->options |= NAND_NO_SUBPAGE_WRITE;
1125 
1126 	mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1127 
1128 	return 0;
1129 }
1130 
1131 static int atmel_nand_ecc_init(struct atmel_nand *nand)
1132 {
1133 	struct nand_chip *chip = &nand->base;
1134 	struct atmel_nand_controller *nc;
1135 	int ret;
1136 
1137 	nc = to_nand_controller(chip->controller);
1138 
1139 	switch (chip->ecc.mode) {
1140 	case NAND_ECC_NONE:
1141 	case NAND_ECC_SOFT:
1142 		/*
1143 		 * Nothing to do, the core will initialize everything for us.
1144 		 */
1145 		break;
1146 
1147 	case NAND_ECC_HW:
1148 		ret = atmel_nand_pmecc_init(chip);
1149 		if (ret)
1150 			return ret;
1151 
1152 		chip->ecc.read_page = atmel_nand_pmecc_read_page;
1153 		chip->ecc.write_page = atmel_nand_pmecc_write_page;
1154 		chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1155 		chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1156 		break;
1157 
1158 	default:
1159 		/* Other modes are not supported. */
1160 		dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1161 			chip->ecc.mode);
1162 		return -ENOTSUPP;
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
1169 {
1170 	struct nand_chip *chip = &nand->base;
1171 	int ret;
1172 
1173 	ret = atmel_nand_ecc_init(nand);
1174 	if (ret)
1175 		return ret;
1176 
1177 	if (chip->ecc.mode != NAND_ECC_HW)
1178 		return 0;
1179 
1180 	/* Adjust the ECC operations for the HSMC IP. */
1181 	chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1182 	chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1183 	chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1184 	chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1185 
1186 	return 0;
1187 }
1188 
1189 static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1190 					const struct nand_data_interface *conf,
1191 					struct atmel_smc_cs_conf *smcconf)
1192 {
1193 	u32 ncycles, totalcycles, timeps, mckperiodps;
1194 	struct atmel_nand_controller *nc;
1195 	int ret;
1196 
1197 	nc = to_nand_controller(nand->base.controller);
1198 
1199 	/* DDR interface not supported. */
1200 	if (conf->type != NAND_SDR_IFACE)
1201 		return -ENOTSUPP;
1202 
1203 	/*
1204 	 * tRC < 30ns implies EDO mode. This controller does not support this
1205 	 * mode.
1206 	 */
1207 	if (conf->timings.sdr.tRC_min < 30000)
1208 		return -ENOTSUPP;
1209 
1210 	atmel_smc_cs_conf_init(smcconf);
1211 
1212 	mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
1213 	mckperiodps *= 1000;
1214 
1215 	/*
1216 	 * Set write pulse timing. This one is easy to extract:
1217 	 *
1218 	 * NWE_PULSE = tWP
1219 	 */
1220 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
1221 	totalcycles = ncycles;
1222 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
1223 					  ncycles);
1224 	if (ret)
1225 		return ret;
1226 
1227 	/*
1228 	 * The write setup timing depends on the operation done on the NAND.
1229 	 * All operations goes through the same data bus, but the operation
1230 	 * type depends on the address we are writing to (ALE/CLE address
1231 	 * lines).
1232 	 * Since we have no way to differentiate the different operations at
1233 	 * the SMC level, we must consider the worst case (the biggest setup
1234 	 * time among all operation types):
1235 	 *
1236 	 * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
1237 	 */
1238 	timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
1239 		      conf->timings.sdr.tALS_min);
1240 	timeps = max(timeps, conf->timings.sdr.tDS_min);
1241 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1242 	ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
1243 	totalcycles += ncycles;
1244 	ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
1245 					  ncycles);
1246 	if (ret)
1247 		return ret;
1248 
1249 	/*
1250 	 * As for the write setup timing, the write hold timing depends on the
1251 	 * operation done on the NAND:
1252 	 *
1253 	 * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
1254 	 */
1255 	timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
1256 		      conf->timings.sdr.tALH_min);
1257 	timeps = max3(timeps, conf->timings.sdr.tDH_min,
1258 		      conf->timings.sdr.tWH_min);
1259 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1260 	totalcycles += ncycles;
1261 
1262 	/*
1263 	 * The write cycle timing is directly matching tWC, but is also
1264 	 * dependent on the other timings on the setup and hold timings we
1265 	 * calculated earlier, which gives:
1266 	 *
1267 	 * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
1268 	 */
1269 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
1270 	ncycles = max(totalcycles, ncycles);
1271 	ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
1272 					  ncycles);
1273 	if (ret)
1274 		return ret;
1275 
1276 	/*
1277 	 * We don't want the CS line to be toggled between each byte/word
1278 	 * transfer to the NAND. The only way to guarantee that is to have the
1279 	 * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1280 	 *
1281 	 * NCS_WR_PULSE = NWE_CYCLE
1282 	 */
1283 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
1284 					  ncycles);
1285 	if (ret)
1286 		return ret;
1287 
1288 	/*
1289 	 * As for the write setup timing, the read hold timing depends on the
1290 	 * operation done on the NAND:
1291 	 *
1292 	 * NRD_HOLD = max(tREH, tRHOH)
1293 	 */
1294 	timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
1295 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1296 	totalcycles = ncycles;
1297 
1298 	/*
1299 	 * TDF = tRHZ - NRD_HOLD
1300 	 */
1301 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
1302 	ncycles -= totalcycles;
1303 
1304 	/*
1305 	 * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
1306 	 * we might end up with a config that does not fit in the TDF field.
1307 	 * Just take the max value in this case and hope that the NAND is more
1308 	 * tolerant than advertised.
1309 	 */
1310 	if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
1311 		ncycles = ATMEL_SMC_MODE_TDF_MAX;
1312 	else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
1313 		ncycles = ATMEL_SMC_MODE_TDF_MIN;
1314 
1315 	smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
1316 			 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
1317 
1318 	/*
1319 	 * Read pulse timing directly matches tRP:
1320 	 *
1321 	 * NRD_PULSE = tRP
1322 	 */
1323 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
1324 	totalcycles += ncycles;
1325 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
1326 					  ncycles);
1327 	if (ret)
1328 		return ret;
1329 
1330 	/*
1331 	 * The write cycle timing is directly matching tWC, but is also
1332 	 * dependent on the setup and hold timings we calculated earlier,
1333 	 * which gives:
1334 	 *
1335 	 * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
1336 	 *
1337 	 * NRD_SETUP is always 0.
1338 	 */
1339 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
1340 	ncycles = max(totalcycles, ncycles);
1341 	ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
1342 					  ncycles);
1343 	if (ret)
1344 		return ret;
1345 
1346 	/*
1347 	 * We don't want the CS line to be toggled between each byte/word
1348 	 * transfer from the NAND. The only way to guarantee that is to have
1349 	 * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1350 	 *
1351 	 * NCS_RD_PULSE = NRD_CYCLE
1352 	 */
1353 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
1354 					  ncycles);
1355 	if (ret)
1356 		return ret;
1357 
1358 	/* Txxx timings are directly matching tXXX ones. */
1359 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
1360 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1361 					   ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
1362 					   ncycles);
1363 	if (ret)
1364 		return ret;
1365 
1366 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
1367 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1368 					   ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1369 					   ncycles);
1370 	/*
1371 	 * Version 4 of the ONFI spec mandates that tADL be at least 400
1372 	 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1373 	 * fit in the tADL field of the SMC reg. We need to relax the check and
1374 	 * accept the -ERANGE return code.
1375 	 *
1376 	 * Note that previous versions of the ONFI spec had a lower tADL_min
1377 	 * (100 or 200 ns). It's not clear why this timing constraint got
1378 	 * increased but it seems most NANDs are fine with values lower than
1379 	 * 400ns, so we should be safe.
1380 	 */
1381 	if (ret && ret != -ERANGE)
1382 		return ret;
1383 
1384 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
1385 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1386 					   ATMEL_HSMC_TIMINGS_TAR_SHIFT,
1387 					   ncycles);
1388 	if (ret)
1389 		return ret;
1390 
1391 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
1392 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1393 					   ATMEL_HSMC_TIMINGS_TRR_SHIFT,
1394 					   ncycles);
1395 	if (ret)
1396 		return ret;
1397 
1398 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
1399 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1400 					   ATMEL_HSMC_TIMINGS_TWB_SHIFT,
1401 					   ncycles);
1402 	if (ret)
1403 		return ret;
1404 
1405 	/* Attach the CS line to the NFC logic. */
1406 	smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
1407 
1408 	/* Set the appropriate data bus width. */
1409 	if (nand->base.options & NAND_BUSWIDTH_16)
1410 		smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
1411 
1412 	/* Operate in NRD/NWE READ/WRITEMODE. */
1413 	smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
1414 			 ATMEL_SMC_MODE_WRITEMODE_NWE;
1415 
1416 	return 0;
1417 }
1418 
1419 static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
1420 					int csline,
1421 					const struct nand_data_interface *conf)
1422 {
1423 	struct atmel_nand_controller *nc;
1424 	struct atmel_smc_cs_conf smcconf;
1425 	struct atmel_nand_cs *cs;
1426 	int ret;
1427 
1428 	nc = to_nand_controller(nand->base.controller);
1429 
1430 	ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1431 	if (ret)
1432 		return ret;
1433 
1434 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1435 		return 0;
1436 
1437 	cs = &nand->cs[csline];
1438 	cs->smcconf = smcconf;
1439 	atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1440 
1441 	return 0;
1442 }
1443 
1444 static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
1445 					int csline,
1446 					const struct nand_data_interface *conf)
1447 {
1448 	struct atmel_hsmc_nand_controller *nc;
1449 	struct atmel_smc_cs_conf smcconf;
1450 	struct atmel_nand_cs *cs;
1451 	int ret;
1452 
1453 	nc = to_hsmc_nand_controller(nand->base.controller);
1454 
1455 	ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1456 	if (ret)
1457 		return ret;
1458 
1459 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1460 		return 0;
1461 
1462 	cs = &nand->cs[csline];
1463 	cs->smcconf = smcconf;
1464 
1465 	if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
1466 		cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
1467 
1468 	atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
1469 				 &cs->smcconf);
1470 
1471 	return 0;
1472 }
1473 
1474 static int atmel_nand_setup_data_interface(struct mtd_info *mtd, int csline,
1475 					const struct nand_data_interface *conf)
1476 {
1477 	struct nand_chip *chip = mtd_to_nand(mtd);
1478 	struct atmel_nand *nand = to_atmel_nand(chip);
1479 	struct atmel_nand_controller *nc;
1480 
1481 	nc = to_nand_controller(nand->base.controller);
1482 
1483 	if (csline >= nand->numcs ||
1484 	    (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
1485 		return -EINVAL;
1486 
1487 	return nc->caps->ops->setup_data_interface(nand, csline, conf);
1488 }
1489 
1490 static void atmel_nand_init(struct atmel_nand_controller *nc,
1491 			    struct atmel_nand *nand)
1492 {
1493 	struct nand_chip *chip = &nand->base;
1494 	struct mtd_info *mtd = nand_to_mtd(chip);
1495 
1496 	mtd->dev.parent = nc->dev;
1497 	nand->base.controller = &nc->base;
1498 
1499 	chip->cmd_ctrl = atmel_nand_cmd_ctrl;
1500 	chip->read_byte = atmel_nand_read_byte;
1501 	chip->read_word = atmel_nand_read_word;
1502 	chip->write_byte = atmel_nand_write_byte;
1503 	chip->read_buf = atmel_nand_read_buf;
1504 	chip->write_buf = atmel_nand_write_buf;
1505 	chip->select_chip = atmel_nand_select_chip;
1506 
1507 	if (nc->mck && nc->caps->ops->setup_data_interface)
1508 		chip->setup_data_interface = atmel_nand_setup_data_interface;
1509 
1510 	/* Some NANDs require a longer delay than the default one (20us). */
1511 	chip->chip_delay = 40;
1512 
1513 	/*
1514 	 * Use a bounce buffer when the buffer passed by the MTD user is not
1515 	 * suitable for DMA.
1516 	 */
1517 	if (nc->dmac)
1518 		chip->options |= NAND_USE_BOUNCE_BUFFER;
1519 
1520 	/* Default to HW ECC if pmecc is available. */
1521 	if (nc->pmecc)
1522 		chip->ecc.mode = NAND_ECC_HW;
1523 }
1524 
1525 static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1526 				struct atmel_nand *nand)
1527 {
1528 	struct nand_chip *chip = &nand->base;
1529 	struct atmel_smc_nand_controller *smc_nc;
1530 	int i;
1531 
1532 	atmel_nand_init(nc, nand);
1533 
1534 	smc_nc = to_smc_nand_controller(chip->controller);
1535 	if (!smc_nc->matrix)
1536 		return;
1537 
1538 	/* Attach the CS to the NAND Flash logic. */
1539 	for (i = 0; i < nand->numcs; i++)
1540 		regmap_update_bits(smc_nc->matrix, smc_nc->ebi_csa_offs,
1541 				   BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1542 }
1543 
1544 static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
1545 				 struct atmel_nand *nand)
1546 {
1547 	struct nand_chip *chip = &nand->base;
1548 
1549 	atmel_nand_init(nc, nand);
1550 
1551 	/* Overload some methods for the HSMC controller. */
1552 	chip->cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
1553 	chip->select_chip = atmel_hsmc_nand_select_chip;
1554 }
1555 
1556 static int atmel_nand_detect(struct atmel_nand *nand)
1557 {
1558 	struct nand_chip *chip = &nand->base;
1559 	struct mtd_info *mtd = nand_to_mtd(chip);
1560 	struct atmel_nand_controller *nc;
1561 	int ret;
1562 
1563 	nc = to_nand_controller(chip->controller);
1564 
1565 	ret = nand_scan_ident(mtd, nand->numcs, NULL);
1566 	if (ret)
1567 		dev_err(nc->dev, "nand_scan_ident() failed: %d\n", ret);
1568 
1569 	return ret;
1570 }
1571 
1572 static int atmel_nand_unregister(struct atmel_nand *nand)
1573 {
1574 	struct nand_chip *chip = &nand->base;
1575 	struct mtd_info *mtd = nand_to_mtd(chip);
1576 	int ret;
1577 
1578 	ret = mtd_device_unregister(mtd);
1579 	if (ret)
1580 		return ret;
1581 
1582 	nand_cleanup(chip);
1583 	list_del(&nand->node);
1584 
1585 	return 0;
1586 }
1587 
1588 static int atmel_nand_register(struct atmel_nand *nand)
1589 {
1590 	struct nand_chip *chip = &nand->base;
1591 	struct mtd_info *mtd = nand_to_mtd(chip);
1592 	struct atmel_nand_controller *nc;
1593 	int ret;
1594 
1595 	nc = to_nand_controller(chip->controller);
1596 
1597 	if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
1598 		/*
1599 		 * We keep the MTD name unchanged to avoid breaking platforms
1600 		 * where the MTD cmdline parser is used and the bootloader
1601 		 * has not been updated to use the new naming scheme.
1602 		 */
1603 		mtd->name = "atmel_nand";
1604 	} else if (!mtd->name) {
1605 		/*
1606 		 * If the new bindings are used and the bootloader has not been
1607 		 * updated to pass a new mtdparts parameter on the cmdline, you
1608 		 * should define the following property in your nand node:
1609 		 *
1610 		 *	label = "atmel_nand";
1611 		 *
1612 		 * This way, mtd->name will be set by the core when
1613 		 * nand_set_flash_node() is called.
1614 		 */
1615 		mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
1616 					   "%s:nand.%d", dev_name(nc->dev),
1617 					   nand->cs[0].id);
1618 		if (!mtd->name) {
1619 			dev_err(nc->dev, "Failed to allocate mtd->name\n");
1620 			return -ENOMEM;
1621 		}
1622 	}
1623 
1624 	ret = nand_scan_tail(mtd);
1625 	if (ret) {
1626 		dev_err(nc->dev, "nand_scan_tail() failed: %d\n", ret);
1627 		return ret;
1628 	}
1629 
1630 	ret = mtd_device_register(mtd, NULL, 0);
1631 	if (ret) {
1632 		dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1633 		nand_cleanup(chip);
1634 		return ret;
1635 	}
1636 
1637 	list_add_tail(&nand->node, &nc->chips);
1638 
1639 	return 0;
1640 }
1641 
1642 static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1643 					    struct device_node *np,
1644 					    int reg_cells)
1645 {
1646 	struct atmel_nand *nand;
1647 	struct gpio_desc *gpio;
1648 	int numcs, ret, i;
1649 
1650 	numcs = of_property_count_elems_of_size(np, "reg",
1651 						reg_cells * sizeof(u32));
1652 	if (numcs < 1) {
1653 		dev_err(nc->dev, "Missing or invalid reg property\n");
1654 		return ERR_PTR(-EINVAL);
1655 	}
1656 
1657 	nand = devm_kzalloc(nc->dev,
1658 			    sizeof(*nand) + (numcs * sizeof(*nand->cs)),
1659 			    GFP_KERNEL);
1660 	if (!nand) {
1661 		dev_err(nc->dev, "Failed to allocate NAND object\n");
1662 		return ERR_PTR(-ENOMEM);
1663 	}
1664 
1665 	nand->numcs = numcs;
1666 
1667 	gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
1668 						      &np->fwnode, GPIOD_IN,
1669 						      "nand-det");
1670 	if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1671 		dev_err(nc->dev,
1672 			"Failed to get detect gpio (err = %ld)\n",
1673 			PTR_ERR(gpio));
1674 		return ERR_CAST(gpio);
1675 	}
1676 
1677 	if (!IS_ERR(gpio))
1678 		nand->cdgpio = gpio;
1679 
1680 	for (i = 0; i < numcs; i++) {
1681 		struct resource res;
1682 		u32 val;
1683 
1684 		ret = of_address_to_resource(np, 0, &res);
1685 		if (ret) {
1686 			dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1687 				ret);
1688 			return ERR_PTR(ret);
1689 		}
1690 
1691 		ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1692 						 &val);
1693 		if (ret) {
1694 			dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1695 				ret);
1696 			return ERR_PTR(ret);
1697 		}
1698 
1699 		nand->cs[i].id = val;
1700 
1701 		nand->cs[i].io.dma = res.start;
1702 		nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1703 		if (IS_ERR(nand->cs[i].io.virt))
1704 			return ERR_CAST(nand->cs[i].io.virt);
1705 
1706 		if (!of_property_read_u32(np, "atmel,rb", &val)) {
1707 			if (val > ATMEL_NFC_MAX_RB_ID)
1708 				return ERR_PTR(-EINVAL);
1709 
1710 			nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1711 			nand->cs[i].rb.id = val;
1712 		} else {
1713 			gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
1714 							"rb", i, &np->fwnode,
1715 							GPIOD_IN, "nand-rb");
1716 			if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1717 				dev_err(nc->dev,
1718 					"Failed to get R/B gpio (err = %ld)\n",
1719 					PTR_ERR(gpio));
1720 				return ERR_CAST(gpio);
1721 			}
1722 
1723 			if (!IS_ERR(gpio)) {
1724 				nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1725 				nand->cs[i].rb.gpio = gpio;
1726 			}
1727 		}
1728 
1729 		gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
1730 							      i, &np->fwnode,
1731 							      GPIOD_OUT_HIGH,
1732 							      "nand-cs");
1733 		if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1734 			dev_err(nc->dev,
1735 				"Failed to get CS gpio (err = %ld)\n",
1736 				PTR_ERR(gpio));
1737 			return ERR_CAST(gpio);
1738 		}
1739 
1740 		if (!IS_ERR(gpio))
1741 			nand->cs[i].csgpio = gpio;
1742 	}
1743 
1744 	nand_set_flash_node(&nand->base, np);
1745 
1746 	return nand;
1747 }
1748 
1749 static int
1750 atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1751 			       struct atmel_nand *nand)
1752 {
1753 	int ret;
1754 
1755 	/* No card inserted, skip this NAND. */
1756 	if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1757 		dev_info(nc->dev, "No SmartMedia card inserted.\n");
1758 		return 0;
1759 	}
1760 
1761 	nc->caps->ops->nand_init(nc, nand);
1762 
1763 	ret = atmel_nand_detect(nand);
1764 	if (ret)
1765 		return ret;
1766 
1767 	ret = nc->caps->ops->ecc_init(nand);
1768 	if (ret)
1769 		return ret;
1770 
1771 	return atmel_nand_register(nand);
1772 }
1773 
1774 static int
1775 atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1776 {
1777 	struct atmel_nand *nand, *tmp;
1778 	int ret;
1779 
1780 	list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1781 		ret = atmel_nand_unregister(nand);
1782 		if (ret)
1783 			return ret;
1784 	}
1785 
1786 	return 0;
1787 }
1788 
1789 static int
1790 atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1791 {
1792 	struct device *dev = nc->dev;
1793 	struct platform_device *pdev = to_platform_device(dev);
1794 	struct atmel_nand *nand;
1795 	struct gpio_desc *gpio;
1796 	struct resource *res;
1797 
1798 	/*
1799 	 * Legacy bindings only allow connecting a single NAND with a unique CS
1800 	 * line to the controller.
1801 	 */
1802 	nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1803 			    GFP_KERNEL);
1804 	if (!nand)
1805 		return -ENOMEM;
1806 
1807 	nand->numcs = 1;
1808 
1809 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1810 	nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
1811 	if (IS_ERR(nand->cs[0].io.virt))
1812 		return PTR_ERR(nand->cs[0].io.virt);
1813 
1814 	nand->cs[0].io.dma = res->start;
1815 
1816 	/*
1817 	 * The old driver was hardcoding the CS id to 3 for all sama5
1818 	 * controllers. Since this id is only meaningful for the sama5
1819 	 * controller we can safely assign this id to 3 no matter the
1820 	 * controller.
1821 	 * If one wants to connect a NAND to a different CS line, he will
1822 	 * have to use the new bindings.
1823 	 */
1824 	nand->cs[0].id = 3;
1825 
1826 	/* R/B GPIO. */
1827 	gpio = devm_gpiod_get_index_optional(dev, NULL, 0,  GPIOD_IN);
1828 	if (IS_ERR(gpio)) {
1829 		dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1830 			PTR_ERR(gpio));
1831 		return PTR_ERR(gpio);
1832 	}
1833 
1834 	if (gpio) {
1835 		nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1836 		nand->cs[0].rb.gpio = gpio;
1837 	}
1838 
1839 	/* CS GPIO. */
1840 	gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1841 	if (IS_ERR(gpio)) {
1842 		dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1843 			PTR_ERR(gpio));
1844 		return PTR_ERR(gpio);
1845 	}
1846 
1847 	nand->cs[0].csgpio = gpio;
1848 
1849 	/* Card detect GPIO. */
1850 	gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1851 	if (IS_ERR(gpio)) {
1852 		dev_err(dev,
1853 			"Failed to get detect gpio (err = %ld)\n",
1854 			PTR_ERR(gpio));
1855 		return PTR_ERR(gpio);
1856 	}
1857 
1858 	nand->cdgpio = gpio;
1859 
1860 	nand_set_flash_node(&nand->base, nc->dev->of_node);
1861 
1862 	return atmel_nand_controller_add_nand(nc, nand);
1863 }
1864 
1865 static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1866 {
1867 	struct device_node *np, *nand_np;
1868 	struct device *dev = nc->dev;
1869 	int ret, reg_cells;
1870 	u32 val;
1871 
1872 	/* We do not retrieve the SMC syscon when parsing old DTs. */
1873 	if (nc->caps->legacy_of_bindings)
1874 		return atmel_nand_controller_legacy_add_nands(nc);
1875 
1876 	np = dev->of_node;
1877 
1878 	ret = of_property_read_u32(np, "#address-cells", &val);
1879 	if (ret) {
1880 		dev_err(dev, "missing #address-cells property\n");
1881 		return ret;
1882 	}
1883 
1884 	reg_cells = val;
1885 
1886 	ret = of_property_read_u32(np, "#size-cells", &val);
1887 	if (ret) {
1888 		dev_err(dev, "missing #address-cells property\n");
1889 		return ret;
1890 	}
1891 
1892 	reg_cells += val;
1893 
1894 	for_each_child_of_node(np, nand_np) {
1895 		struct atmel_nand *nand;
1896 
1897 		nand = atmel_nand_create(nc, nand_np, reg_cells);
1898 		if (IS_ERR(nand)) {
1899 			ret = PTR_ERR(nand);
1900 			goto err;
1901 		}
1902 
1903 		ret = atmel_nand_controller_add_nand(nc, nand);
1904 		if (ret)
1905 			goto err;
1906 	}
1907 
1908 	return 0;
1909 
1910 err:
1911 	atmel_nand_controller_remove_nands(nc);
1912 
1913 	return ret;
1914 }
1915 
1916 static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1917 {
1918 	if (nc->dmac)
1919 		dma_release_channel(nc->dmac);
1920 
1921 	clk_put(nc->mck);
1922 }
1923 
1924 static const struct of_device_id atmel_matrix_of_ids[] = {
1925 	{
1926 		.compatible = "atmel,at91sam9260-matrix",
1927 		.data = (void *)AT91SAM9260_MATRIX_EBICSA,
1928 	},
1929 	{
1930 		.compatible = "atmel,at91sam9261-matrix",
1931 		.data = (void *)AT91SAM9261_MATRIX_EBICSA,
1932 	},
1933 	{
1934 		.compatible = "atmel,at91sam9263-matrix",
1935 		.data = (void *)AT91SAM9263_MATRIX_EBI0CSA,
1936 	},
1937 	{
1938 		.compatible = "atmel,at91sam9rl-matrix",
1939 		.data = (void *)AT91SAM9RL_MATRIX_EBICSA,
1940 	},
1941 	{
1942 		.compatible = "atmel,at91sam9g45-matrix",
1943 		.data = (void *)AT91SAM9G45_MATRIX_EBICSA,
1944 	},
1945 	{
1946 		.compatible = "atmel,at91sam9n12-matrix",
1947 		.data = (void *)AT91SAM9N12_MATRIX_EBICSA,
1948 	},
1949 	{
1950 		.compatible = "atmel,at91sam9x5-matrix",
1951 		.data = (void *)AT91SAM9X5_MATRIX_EBICSA,
1952 	},
1953 	{ /* sentinel */ },
1954 };
1955 
1956 static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
1957 				struct platform_device *pdev,
1958 				const struct atmel_nand_controller_caps *caps)
1959 {
1960 	struct device *dev = &pdev->dev;
1961 	struct device_node *np = dev->of_node;
1962 	int ret;
1963 
1964 	nand_hw_control_init(&nc->base);
1965 	INIT_LIST_HEAD(&nc->chips);
1966 	nc->dev = dev;
1967 	nc->caps = caps;
1968 
1969 	platform_set_drvdata(pdev, nc);
1970 
1971 	nc->pmecc = devm_atmel_pmecc_get(dev);
1972 	if (IS_ERR(nc->pmecc)) {
1973 		ret = PTR_ERR(nc->pmecc);
1974 		if (ret != -EPROBE_DEFER)
1975 			dev_err(dev, "Could not get PMECC object (err = %d)\n",
1976 				ret);
1977 		return ret;
1978 	}
1979 
1980 	if (nc->caps->has_dma) {
1981 		dma_cap_mask_t mask;
1982 
1983 		dma_cap_zero(mask);
1984 		dma_cap_set(DMA_MEMCPY, mask);
1985 
1986 		nc->dmac = dma_request_channel(mask, NULL, NULL);
1987 		if (!nc->dmac)
1988 			dev_err(nc->dev, "Failed to request DMA channel\n");
1989 	}
1990 
1991 	/* We do not retrieve the SMC syscon when parsing old DTs. */
1992 	if (nc->caps->legacy_of_bindings)
1993 		return 0;
1994 
1995 	nc->mck = of_clk_get(dev->parent->of_node, 0);
1996 	if (IS_ERR(nc->mck)) {
1997 		dev_err(dev, "Failed to retrieve MCK clk\n");
1998 		return PTR_ERR(nc->mck);
1999 	}
2000 
2001 	np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2002 	if (!np) {
2003 		dev_err(dev, "Missing or invalid atmel,smc property\n");
2004 		return -EINVAL;
2005 	}
2006 
2007 	nc->smc = syscon_node_to_regmap(np);
2008 	of_node_put(np);
2009 	if (IS_ERR(nc->smc)) {
2010 		ret = PTR_ERR(nc->smc);
2011 		dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
2012 		return ret;
2013 	}
2014 
2015 	return 0;
2016 }
2017 
2018 static int
2019 atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
2020 {
2021 	struct device *dev = nc->base.dev;
2022 	const struct of_device_id *match;
2023 	struct device_node *np;
2024 	int ret;
2025 
2026 	/* We do not retrieve the matrix syscon when parsing old DTs. */
2027 	if (nc->base.caps->legacy_of_bindings)
2028 		return 0;
2029 
2030 	np = of_parse_phandle(dev->parent->of_node, "atmel,matrix", 0);
2031 	if (!np)
2032 		return 0;
2033 
2034 	match = of_match_node(atmel_matrix_of_ids, np);
2035 	if (!match) {
2036 		of_node_put(np);
2037 		return 0;
2038 	}
2039 
2040 	nc->matrix = syscon_node_to_regmap(np);
2041 	of_node_put(np);
2042 	if (IS_ERR(nc->matrix)) {
2043 		ret = PTR_ERR(nc->matrix);
2044 		dev_err(dev, "Could not get Matrix regmap (err = %d)\n", ret);
2045 		return ret;
2046 	}
2047 
2048 	nc->ebi_csa_offs = (unsigned int)match->data;
2049 
2050 	/*
2051 	 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
2052 	 * add 4 to ->ebi_csa_offs.
2053 	 */
2054 	if (of_device_is_compatible(dev->parent->of_node,
2055 				    "atmel,at91sam9263-ebi1"))
2056 		nc->ebi_csa_offs += 4;
2057 
2058 	return 0;
2059 }
2060 
2061 static int
2062 atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2063 {
2064 	struct regmap_config regmap_conf = {
2065 		.reg_bits = 32,
2066 		.val_bits = 32,
2067 		.reg_stride = 4,
2068 	};
2069 
2070 	struct device *dev = nc->base.dev;
2071 	struct device_node *nand_np, *nfc_np;
2072 	void __iomem *iomem;
2073 	struct resource res;
2074 	int ret;
2075 
2076 	nand_np = dev->of_node;
2077 	nfc_np = of_find_compatible_node(dev->of_node, NULL,
2078 					 "atmel,sama5d3-nfc");
2079 
2080 	nc->clk = of_clk_get(nfc_np, 0);
2081 	if (IS_ERR(nc->clk)) {
2082 		ret = PTR_ERR(nc->clk);
2083 		dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
2084 			ret);
2085 		goto out;
2086 	}
2087 
2088 	ret = clk_prepare_enable(nc->clk);
2089 	if (ret) {
2090 		dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
2091 			ret);
2092 		goto out;
2093 	}
2094 
2095 	nc->irq = of_irq_get(nand_np, 0);
2096 	if (nc->irq <= 0) {
2097 		ret = nc->irq ?: -ENXIO;
2098 		if (ret != -EPROBE_DEFER)
2099 			dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2100 				ret);
2101 		goto out;
2102 	}
2103 
2104 	ret = of_address_to_resource(nfc_np, 0, &res);
2105 	if (ret) {
2106 		dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
2107 			ret);
2108 		goto out;
2109 	}
2110 
2111 	iomem = devm_ioremap_resource(dev, &res);
2112 	if (IS_ERR(iomem)) {
2113 		ret = PTR_ERR(iomem);
2114 		goto out;
2115 	}
2116 
2117 	regmap_conf.name = "nfc-io";
2118 	regmap_conf.max_register = resource_size(&res) - 4;
2119 	nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2120 	if (IS_ERR(nc->io)) {
2121 		ret = PTR_ERR(nc->io);
2122 		dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2123 			ret);
2124 		goto out;
2125 	}
2126 
2127 	ret = of_address_to_resource(nfc_np, 1, &res);
2128 	if (ret) {
2129 		dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
2130 			ret);
2131 		goto out;
2132 	}
2133 
2134 	iomem = devm_ioremap_resource(dev, &res);
2135 	if (IS_ERR(iomem)) {
2136 		ret = PTR_ERR(iomem);
2137 		goto out;
2138 	}
2139 
2140 	regmap_conf.name = "smc";
2141 	regmap_conf.max_register = resource_size(&res) - 4;
2142 	nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2143 	if (IS_ERR(nc->base.smc)) {
2144 		ret = PTR_ERR(nc->base.smc);
2145 		dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2146 			ret);
2147 		goto out;
2148 	}
2149 
2150 	ret = of_address_to_resource(nfc_np, 2, &res);
2151 	if (ret) {
2152 		dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
2153 			ret);
2154 		goto out;
2155 	}
2156 
2157 	nc->sram.virt = devm_ioremap_resource(dev, &res);
2158 	if (IS_ERR(nc->sram.virt)) {
2159 		ret = PTR_ERR(nc->sram.virt);
2160 		goto out;
2161 	}
2162 
2163 	nc->sram.dma = res.start;
2164 
2165 out:
2166 	of_node_put(nfc_np);
2167 
2168 	return ret;
2169 }
2170 
2171 static int
2172 atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
2173 {
2174 	struct device *dev = nc->base.dev;
2175 	struct device_node *np;
2176 	int ret;
2177 
2178 	np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2179 	if (!np) {
2180 		dev_err(dev, "Missing or invalid atmel,smc property\n");
2181 		return -EINVAL;
2182 	}
2183 
2184 	nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
2185 
2186 	nc->irq = of_irq_get(np, 0);
2187 	of_node_put(np);
2188 	if (nc->irq <= 0) {
2189 		ret = nc->irq ?: -ENXIO;
2190 		if (ret != -EPROBE_DEFER)
2191 			dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2192 				ret);
2193 		return ret;
2194 	}
2195 
2196 	np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
2197 	if (!np) {
2198 		dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
2199 		return -EINVAL;
2200 	}
2201 
2202 	nc->io = syscon_node_to_regmap(np);
2203 	of_node_put(np);
2204 	if (IS_ERR(nc->io)) {
2205 		ret = PTR_ERR(nc->io);
2206 		dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
2207 		return ret;
2208 	}
2209 
2210 	nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
2211 					 "atmel,nfc-sram", 0);
2212 	if (!nc->sram.pool) {
2213 		dev_err(nc->base.dev, "Missing SRAM\n");
2214 		return -ENOMEM;
2215 	}
2216 
2217 	nc->sram.virt = gen_pool_dma_alloc(nc->sram.pool,
2218 					    ATMEL_NFC_SRAM_SIZE,
2219 					    &nc->sram.dma);
2220 	if (!nc->sram.virt) {
2221 		dev_err(nc->base.dev,
2222 			"Could not allocate memory from the NFC SRAM pool\n");
2223 		return -ENOMEM;
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static int
2230 atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
2231 {
2232 	struct atmel_hsmc_nand_controller *hsmc_nc;
2233 	int ret;
2234 
2235 	ret = atmel_nand_controller_remove_nands(nc);
2236 	if (ret)
2237 		return ret;
2238 
2239 	hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
2240 	if (hsmc_nc->sram.pool)
2241 		gen_pool_free(hsmc_nc->sram.pool,
2242 			      (unsigned long)hsmc_nc->sram.virt,
2243 			      ATMEL_NFC_SRAM_SIZE);
2244 
2245 	if (hsmc_nc->clk) {
2246 		clk_disable_unprepare(hsmc_nc->clk);
2247 		clk_put(hsmc_nc->clk);
2248 	}
2249 
2250 	atmel_nand_controller_cleanup(nc);
2251 
2252 	return 0;
2253 }
2254 
2255 static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
2256 				const struct atmel_nand_controller_caps *caps)
2257 {
2258 	struct device *dev = &pdev->dev;
2259 	struct atmel_hsmc_nand_controller *nc;
2260 	int ret;
2261 
2262 	nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2263 	if (!nc)
2264 		return -ENOMEM;
2265 
2266 	ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2267 	if (ret)
2268 		return ret;
2269 
2270 	if (caps->legacy_of_bindings)
2271 		ret = atmel_hsmc_nand_controller_legacy_init(nc);
2272 	else
2273 		ret = atmel_hsmc_nand_controller_init(nc);
2274 
2275 	if (ret)
2276 		return ret;
2277 
2278 	/* Make sure all irqs are masked before registering our IRQ handler. */
2279 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
2280 	ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
2281 			       IRQF_SHARED, "nfc", nc);
2282 	if (ret) {
2283 		dev_err(dev,
2284 			"Could not get register NFC interrupt handler (err = %d)\n",
2285 			ret);
2286 		goto err;
2287 	}
2288 
2289 	/* Initial NFC configuration. */
2290 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
2291 		     ATMEL_HSMC_NFC_CFG_DTO_MAX);
2292 
2293 	ret = atmel_nand_controller_add_nands(&nc->base);
2294 	if (ret)
2295 		goto err;
2296 
2297 	return 0;
2298 
2299 err:
2300 	atmel_hsmc_nand_controller_remove(&nc->base);
2301 
2302 	return ret;
2303 }
2304 
2305 static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
2306 	.probe = atmel_hsmc_nand_controller_probe,
2307 	.remove = atmel_hsmc_nand_controller_remove,
2308 	.ecc_init = atmel_hsmc_nand_ecc_init,
2309 	.nand_init = atmel_hsmc_nand_init,
2310 	.setup_data_interface = atmel_hsmc_nand_setup_data_interface,
2311 };
2312 
2313 static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
2314 	.has_dma = true,
2315 	.ale_offs = BIT(21),
2316 	.cle_offs = BIT(22),
2317 	.ops = &atmel_hsmc_nc_ops,
2318 };
2319 
2320 /* Only used to parse old bindings. */
2321 static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
2322 	.has_dma = true,
2323 	.ale_offs = BIT(21),
2324 	.cle_offs = BIT(22),
2325 	.ops = &atmel_hsmc_nc_ops,
2326 	.legacy_of_bindings = true,
2327 };
2328 
2329 static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2330 				const struct atmel_nand_controller_caps *caps)
2331 {
2332 	struct device *dev = &pdev->dev;
2333 	struct atmel_smc_nand_controller *nc;
2334 	int ret;
2335 
2336 	nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2337 	if (!nc)
2338 		return -ENOMEM;
2339 
2340 	ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2341 	if (ret)
2342 		return ret;
2343 
2344 	ret = atmel_smc_nand_controller_init(nc);
2345 	if (ret)
2346 		return ret;
2347 
2348 	return atmel_nand_controller_add_nands(&nc->base);
2349 }
2350 
2351 static int
2352 atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2353 {
2354 	int ret;
2355 
2356 	ret = atmel_nand_controller_remove_nands(nc);
2357 	if (ret)
2358 		return ret;
2359 
2360 	atmel_nand_controller_cleanup(nc);
2361 
2362 	return 0;
2363 }
2364 
2365 /*
2366  * The SMC reg layout of at91rm9200 is completely different which prevents us
2367  * from re-using atmel_smc_nand_setup_data_interface() for the
2368  * ->setup_data_interface() hook.
2369  * At this point, there's no support for the at91rm9200 SMC IP, so we leave
2370  * ->setup_data_interface() unassigned.
2371  */
2372 static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
2373 	.probe = atmel_smc_nand_controller_probe,
2374 	.remove = atmel_smc_nand_controller_remove,
2375 	.ecc_init = atmel_nand_ecc_init,
2376 	.nand_init = atmel_smc_nand_init,
2377 };
2378 
2379 static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2380 	.ale_offs = BIT(21),
2381 	.cle_offs = BIT(22),
2382 	.ops = &at91rm9200_nc_ops,
2383 };
2384 
2385 static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2386 	.probe = atmel_smc_nand_controller_probe,
2387 	.remove = atmel_smc_nand_controller_remove,
2388 	.ecc_init = atmel_nand_ecc_init,
2389 	.nand_init = atmel_smc_nand_init,
2390 	.setup_data_interface = atmel_smc_nand_setup_data_interface,
2391 };
2392 
2393 static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
2394 	.ale_offs = BIT(21),
2395 	.cle_offs = BIT(22),
2396 	.ops = &atmel_smc_nc_ops,
2397 };
2398 
2399 static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2400 	.ale_offs = BIT(22),
2401 	.cle_offs = BIT(21),
2402 	.ops = &atmel_smc_nc_ops,
2403 };
2404 
2405 static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2406 	.has_dma = true,
2407 	.ale_offs = BIT(21),
2408 	.cle_offs = BIT(22),
2409 	.ops = &atmel_smc_nc_ops,
2410 };
2411 
2412 /* Only used to parse old bindings. */
2413 static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2414 	.ale_offs = BIT(21),
2415 	.cle_offs = BIT(22),
2416 	.ops = &atmel_smc_nc_ops,
2417 	.legacy_of_bindings = true,
2418 };
2419 
2420 static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2421 	.ale_offs = BIT(22),
2422 	.cle_offs = BIT(21),
2423 	.ops = &atmel_smc_nc_ops,
2424 	.legacy_of_bindings = true,
2425 };
2426 
2427 static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2428 	.has_dma = true,
2429 	.ale_offs = BIT(21),
2430 	.cle_offs = BIT(22),
2431 	.ops = &atmel_smc_nc_ops,
2432 	.legacy_of_bindings = true,
2433 };
2434 
2435 static const struct of_device_id atmel_nand_controller_of_ids[] = {
2436 	{
2437 		.compatible = "atmel,at91rm9200-nand-controller",
2438 		.data = &atmel_rm9200_nc_caps,
2439 	},
2440 	{
2441 		.compatible = "atmel,at91sam9260-nand-controller",
2442 		.data = &atmel_sam9260_nc_caps,
2443 	},
2444 	{
2445 		.compatible = "atmel,at91sam9261-nand-controller",
2446 		.data = &atmel_sam9261_nc_caps,
2447 	},
2448 	{
2449 		.compatible = "atmel,at91sam9g45-nand-controller",
2450 		.data = &atmel_sam9g45_nc_caps,
2451 	},
2452 	{
2453 		.compatible = "atmel,sama5d3-nand-controller",
2454 		.data = &atmel_sama5_nc_caps,
2455 	},
2456 	/* Support for old/deprecated bindings: */
2457 	{
2458 		.compatible = "atmel,at91rm9200-nand",
2459 		.data = &atmel_rm9200_nand_caps,
2460 	},
2461 	{
2462 		.compatible = "atmel,sama5d4-nand",
2463 		.data = &atmel_rm9200_nand_caps,
2464 	},
2465 	{
2466 		.compatible = "atmel,sama5d2-nand",
2467 		.data = &atmel_rm9200_nand_caps,
2468 	},
2469 	{ /* sentinel */ },
2470 };
2471 MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2472 
2473 static int atmel_nand_controller_probe(struct platform_device *pdev)
2474 {
2475 	const struct atmel_nand_controller_caps *caps;
2476 
2477 	if (pdev->id_entry)
2478 		caps = (void *)pdev->id_entry->driver_data;
2479 	else
2480 		caps = of_device_get_match_data(&pdev->dev);
2481 
2482 	if (!caps) {
2483 		dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2484 		return -EINVAL;
2485 	}
2486 
2487 	if (caps->legacy_of_bindings) {
2488 		u32 ale_offs = 21;
2489 
2490 		/*
2491 		 * If we are parsing legacy DT props and the DT contains a
2492 		 * valid NFC node, forward the request to the sama5 logic.
2493 		 */
2494 		if (of_find_compatible_node(pdev->dev.of_node, NULL,
2495 					    "atmel,sama5d3-nfc"))
2496 			caps = &atmel_sama5_nand_caps;
2497 
2498 		/*
2499 		 * Even if the compatible says we are dealing with an
2500 		 * at91rm9200 controller, the atmel,nand-has-dma specify that
2501 		 * this controller supports DMA, which means we are in fact
2502 		 * dealing with an at91sam9g45+ controller.
2503 		 */
2504 		if (!caps->has_dma &&
2505 		    of_property_read_bool(pdev->dev.of_node,
2506 					  "atmel,nand-has-dma"))
2507 			caps = &atmel_sam9g45_nand_caps;
2508 
2509 		/*
2510 		 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2511 		 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2512 		 * actually dealing with an at91sam9261 controller.
2513 		 */
2514 		of_property_read_u32(pdev->dev.of_node,
2515 				     "atmel,nand-addr-offset", &ale_offs);
2516 		if (ale_offs != 21)
2517 			caps = &atmel_sam9261_nand_caps;
2518 	}
2519 
2520 	return caps->ops->probe(pdev, caps);
2521 }
2522 
2523 static int atmel_nand_controller_remove(struct platform_device *pdev)
2524 {
2525 	struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2526 
2527 	return nc->caps->ops->remove(nc);
2528 }
2529 
2530 static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
2531 {
2532 	struct atmel_nand_controller *nc = dev_get_drvdata(dev);
2533 	struct atmel_nand *nand;
2534 
2535 	if (nc->pmecc)
2536 		atmel_pmecc_reset(nc->pmecc);
2537 
2538 	list_for_each_entry(nand, &nc->chips, node) {
2539 		int i;
2540 
2541 		for (i = 0; i < nand->numcs; i++)
2542 			nand_reset(&nand->base, i);
2543 	}
2544 
2545 	return 0;
2546 }
2547 
2548 static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
2549 			 atmel_nand_controller_resume);
2550 
2551 static struct platform_driver atmel_nand_controller_driver = {
2552 	.driver = {
2553 		.name = "atmel-nand-controller",
2554 		.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
2555 		.pm = &atmel_nand_controller_pm_ops,
2556 	},
2557 	.probe = atmel_nand_controller_probe,
2558 	.remove = atmel_nand_controller_remove,
2559 };
2560 module_platform_driver(atmel_nand_controller_driver);
2561 
2562 MODULE_LICENSE("GPL");
2563 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2564 MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2565 MODULE_ALIAS("platform:atmel-nand-controller");
2566