xref: /linux/drivers/mtd/nand/raw/atmel/nand-controller.c (revision 320475fbd590dc94a0a3d9173f81e0797ee1a232)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2017 ATMEL
4  * Copyright 2017 Free Electrons
5  *
6  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7  *
8  * Derived from the atmel_nand.c driver which contained the following
9  * copyrights:
10  *
11  *   Copyright 2003 Rick Bronson
12  *
13  *   Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
14  *	Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
15  *
16  *   Derived from drivers/mtd/spia.c (removed in v3.8)
17  *	Copyright 2000 Steven J. Hill (sjhill@cotw.com)
18  *
19  *
20  *   Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
21  *	Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
22  *
23  *   Derived from Das U-Boot source code
24  *	(u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
25  *	Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
26  *
27  *   Add Programmable Multibit ECC support for various AT91 SoC
28  *	Copyright 2012 ATMEL, Hong Xu
29  *
30  *   Add Nand Flash Controller support for SAMA5 SoC
31  *	Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
32  *
33  * A few words about the naming convention in this file. This convention
34  * applies to structure and function names.
35  *
36  * Prefixes:
37  *
38  * - atmel_nand_: all generic structures/functions
39  * - atmel_smc_nand_: all structures/functions specific to the SMC interface
40  *		      (at91sam9 and avr32 SoCs)
41  * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
42  *		       (sama5 SoCs and later)
43  * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
44  *		 that is available in the HSMC block
45  * - <soc>_nand_: all SoC specific structures/functions
46  */
47 
48 #include <linux/clk.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/dmaengine.h>
51 #include <linux/genalloc.h>
52 #include <linux/gpio/consumer.h>
53 #include <linux/interrupt.h>
54 #include <linux/mfd/syscon.h>
55 #include <linux/mfd/syscon/atmel-matrix.h>
56 #include <linux/mfd/syscon/atmel-smc.h>
57 #include <linux/module.h>
58 #include <linux/mtd/rawnand.h>
59 #include <linux/of_address.h>
60 #include <linux/of_irq.h>
61 #include <linux/of_platform.h>
62 #include <linux/iopoll.h>
63 #include <linux/platform_device.h>
64 #include <linux/regmap.h>
65 #include <soc/at91/atmel-sfr.h>
66 
67 #include "pmecc.h"
68 
69 #define ATMEL_HSMC_NFC_CFG			0x0
70 #define ATMEL_HSMC_NFC_CFG_SPARESIZE(x)		(((x) / 4) << 24)
71 #define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK	GENMASK(30, 24)
72 #define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul)	(((cyc) << 16) | ((mul) << 20))
73 #define ATMEL_HSMC_NFC_CFG_DTO_MAX		GENMASK(22, 16)
74 #define ATMEL_HSMC_NFC_CFG_RBEDGE		BIT(13)
75 #define ATMEL_HSMC_NFC_CFG_FALLING_EDGE		BIT(12)
76 #define ATMEL_HSMC_NFC_CFG_RSPARE		BIT(9)
77 #define ATMEL_HSMC_NFC_CFG_WSPARE		BIT(8)
78 #define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK	GENMASK(2, 0)
79 #define ATMEL_HSMC_NFC_CFG_PAGESIZE(x)		(fls((x) / 512) - 1)
80 
81 #define ATMEL_HSMC_NFC_CTRL			0x4
82 #define ATMEL_HSMC_NFC_CTRL_EN			BIT(0)
83 #define ATMEL_HSMC_NFC_CTRL_DIS			BIT(1)
84 
85 #define ATMEL_HSMC_NFC_SR			0x8
86 #define ATMEL_HSMC_NFC_IER			0xc
87 #define ATMEL_HSMC_NFC_IDR			0x10
88 #define ATMEL_HSMC_NFC_IMR			0x14
89 #define ATMEL_HSMC_NFC_SR_ENABLED		BIT(1)
90 #define ATMEL_HSMC_NFC_SR_RB_RISE		BIT(4)
91 #define ATMEL_HSMC_NFC_SR_RB_FALL		BIT(5)
92 #define ATMEL_HSMC_NFC_SR_BUSY			BIT(8)
93 #define ATMEL_HSMC_NFC_SR_WR			BIT(11)
94 #define ATMEL_HSMC_NFC_SR_CSID			GENMASK(14, 12)
95 #define ATMEL_HSMC_NFC_SR_XFRDONE		BIT(16)
96 #define ATMEL_HSMC_NFC_SR_CMDDONE		BIT(17)
97 #define ATMEL_HSMC_NFC_SR_DTOE			BIT(20)
98 #define ATMEL_HSMC_NFC_SR_UNDEF			BIT(21)
99 #define ATMEL_HSMC_NFC_SR_AWB			BIT(22)
100 #define ATMEL_HSMC_NFC_SR_NFCASE		BIT(23)
101 #define ATMEL_HSMC_NFC_SR_ERRORS		(ATMEL_HSMC_NFC_SR_DTOE | \
102 						 ATMEL_HSMC_NFC_SR_UNDEF | \
103 						 ATMEL_HSMC_NFC_SR_AWB | \
104 						 ATMEL_HSMC_NFC_SR_NFCASE)
105 #define ATMEL_HSMC_NFC_SR_RBEDGE(x)		BIT((x) + 24)
106 
107 #define ATMEL_HSMC_NFC_ADDR			0x18
108 #define ATMEL_HSMC_NFC_BANK			0x1c
109 
110 #define ATMEL_NFC_MAX_RB_ID			7
111 
112 #define ATMEL_NFC_SRAM_SIZE			0x2400
113 
114 #define ATMEL_NFC_CMD(pos, cmd)			((cmd) << (((pos) * 8) + 2))
115 #define ATMEL_NFC_VCMD2				BIT(18)
116 #define ATMEL_NFC_ACYCLE(naddrs)		((naddrs) << 19)
117 #define ATMEL_NFC_CSID(cs)			((cs) << 22)
118 #define ATMEL_NFC_DATAEN			BIT(25)
119 #define ATMEL_NFC_NFCWR				BIT(26)
120 
121 #define ATMEL_NFC_MAX_ADDR_CYCLES		5
122 
123 #define ATMEL_NAND_ALE_OFFSET			BIT(21)
124 #define ATMEL_NAND_CLE_OFFSET			BIT(22)
125 
126 #define DEFAULT_TIMEOUT_MS			1000
127 #define MIN_DMA_LEN				128
128 
129 static bool atmel_nand_avoid_dma __read_mostly;
130 
131 MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
132 module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
133 
134 enum atmel_nand_rb_type {
135 	ATMEL_NAND_NO_RB,
136 	ATMEL_NAND_NATIVE_RB,
137 	ATMEL_NAND_GPIO_RB,
138 };
139 
140 struct atmel_nand_rb {
141 	enum atmel_nand_rb_type type;
142 	union {
143 		struct gpio_desc *gpio;
144 		int id;
145 	};
146 };
147 
148 struct atmel_nand_cs {
149 	int id;
150 	struct atmel_nand_rb rb;
151 	struct gpio_desc *csgpio;
152 	struct {
153 		void __iomem *virt;
154 		dma_addr_t dma;
155 	} io;
156 
157 	struct atmel_smc_cs_conf smcconf;
158 };
159 
160 struct atmel_nand {
161 	struct list_head node;
162 	struct device *dev;
163 	struct nand_chip base;
164 	struct atmel_nand_cs *activecs;
165 	struct atmel_pmecc_user *pmecc;
166 	struct gpio_desc *cdgpio;
167 	int numcs;
168 	struct atmel_nand_cs cs[] __counted_by(numcs);
169 };
170 
to_atmel_nand(struct nand_chip * chip)171 static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
172 {
173 	return container_of(chip, struct atmel_nand, base);
174 }
175 
176 enum atmel_nfc_data_xfer {
177 	ATMEL_NFC_NO_DATA,
178 	ATMEL_NFC_READ_DATA,
179 	ATMEL_NFC_WRITE_DATA,
180 };
181 
182 struct atmel_nfc_op {
183 	u8 cs;
184 	u8 ncmds;
185 	u8 cmds[2];
186 	u8 naddrs;
187 	u8 addrs[5];
188 	enum atmel_nfc_data_xfer data;
189 	u32 wait;
190 	u32 errors;
191 };
192 
193 struct atmel_nand_controller;
194 struct atmel_nand_controller_caps;
195 
196 struct atmel_nand_controller_ops {
197 	int (*probe)(struct platform_device *pdev,
198 		     const struct atmel_nand_controller_caps *caps);
199 	int (*remove)(struct atmel_nand_controller *nc);
200 	void (*nand_init)(struct atmel_nand_controller *nc,
201 			  struct atmel_nand *nand);
202 	int (*ecc_init)(struct nand_chip *chip);
203 	int (*setup_interface)(struct atmel_nand *nand, int csline,
204 			       const struct nand_interface_config *conf);
205 	int (*exec_op)(struct atmel_nand *nand,
206 		       const struct nand_operation *op, bool check_only);
207 };
208 
209 struct atmel_nand_controller_caps {
210 	bool has_dma;
211 	bool legacy_of_bindings;
212 	u32 ale_offs;
213 	u32 cle_offs;
214 	const char *ebi_csa_regmap_name;
215 	const struct atmel_nand_controller_ops *ops;
216 };
217 
218 struct atmel_nand_controller {
219 	struct nand_controller base;
220 	const struct atmel_nand_controller_caps *caps;
221 	struct device *dev;
222 	struct regmap *smc;
223 	struct dma_chan *dmac;
224 	struct atmel_pmecc *pmecc;
225 	struct list_head chips;
226 	struct clk *mck;
227 };
228 
229 static inline struct atmel_nand_controller *
to_nand_controller(struct nand_controller * ctl)230 to_nand_controller(struct nand_controller *ctl)
231 {
232 	return container_of(ctl, struct atmel_nand_controller, base);
233 }
234 
235 struct atmel_smc_nand_ebi_csa_cfg {
236 	u32 offs;
237 	u32 nfd0_on_d16;
238 };
239 
240 struct atmel_smc_nand_controller {
241 	struct atmel_nand_controller base;
242 	struct regmap *ebi_csa_regmap;
243 	struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
244 };
245 
246 static inline struct atmel_smc_nand_controller *
to_smc_nand_controller(struct nand_controller * ctl)247 to_smc_nand_controller(struct nand_controller *ctl)
248 {
249 	return container_of(to_nand_controller(ctl),
250 			    struct atmel_smc_nand_controller, base);
251 }
252 
253 struct atmel_hsmc_nand_controller {
254 	struct atmel_nand_controller base;
255 	struct {
256 		struct gen_pool *pool;
257 		void __iomem *virt;
258 		dma_addr_t dma;
259 	} sram;
260 	const struct atmel_hsmc_reg_layout *hsmc_layout;
261 	struct regmap *io;
262 	struct atmel_nfc_op op;
263 	struct completion complete;
264 	u32 cfg;
265 	int irq;
266 
267 	/* Only used when instantiating from legacy DT bindings. */
268 	struct clk *clk;
269 };
270 
271 static inline struct atmel_hsmc_nand_controller *
to_hsmc_nand_controller(struct nand_controller * ctl)272 to_hsmc_nand_controller(struct nand_controller *ctl)
273 {
274 	return container_of(to_nand_controller(ctl),
275 			    struct atmel_hsmc_nand_controller, base);
276 }
277 
atmel_nfc_op_done(struct atmel_nfc_op * op,u32 status)278 static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
279 {
280 	op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
281 	op->wait ^= status & op->wait;
282 
283 	return !op->wait || op->errors;
284 }
285 
atmel_nfc_interrupt(int irq,void * data)286 static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
287 {
288 	struct atmel_hsmc_nand_controller *nc = data;
289 	u32 sr, rcvd;
290 	bool done;
291 
292 	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
293 
294 	rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
295 	done = atmel_nfc_op_done(&nc->op, sr);
296 
297 	if (rcvd)
298 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
299 
300 	if (done)
301 		complete(&nc->complete);
302 
303 	return rcvd ? IRQ_HANDLED : IRQ_NONE;
304 }
305 
atmel_nfc_wait(struct atmel_hsmc_nand_controller * nc,bool poll,unsigned int timeout_ms)306 static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
307 			  unsigned int timeout_ms)
308 {
309 	int ret;
310 
311 	if (!timeout_ms)
312 		timeout_ms = DEFAULT_TIMEOUT_MS;
313 
314 	if (poll) {
315 		u32 status;
316 
317 		ret = regmap_read_poll_timeout(nc->base.smc,
318 					       ATMEL_HSMC_NFC_SR, status,
319 					       atmel_nfc_op_done(&nc->op,
320 								 status),
321 					       0, timeout_ms * 1000);
322 	} else {
323 		init_completion(&nc->complete);
324 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
325 			     nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
326 		ret = wait_for_completion_timeout(&nc->complete,
327 						msecs_to_jiffies(timeout_ms));
328 		if (!ret)
329 			ret = -ETIMEDOUT;
330 		else
331 			ret = 0;
332 
333 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
334 	}
335 
336 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
337 		dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
338 		ret = -ETIMEDOUT;
339 	}
340 
341 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
342 		dev_err(nc->base.dev, "Access to an undefined area\n");
343 		ret = -EIO;
344 	}
345 
346 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
347 		dev_err(nc->base.dev, "Access while busy\n");
348 		ret = -EIO;
349 	}
350 
351 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
352 		dev_err(nc->base.dev, "Wrong access size\n");
353 		ret = -EIO;
354 	}
355 
356 	return ret;
357 }
358 
atmel_nand_dma_transfer_finished(void * data)359 static void atmel_nand_dma_transfer_finished(void *data)
360 {
361 	struct completion *finished = data;
362 
363 	complete(finished);
364 }
365 
atmel_nand_dma_transfer(struct atmel_nand_controller * nc,void * buf,dma_addr_t dev_dma,size_t len,enum dma_data_direction dir)366 static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
367 				   void *buf, dma_addr_t dev_dma, size_t len,
368 				   enum dma_data_direction dir)
369 {
370 	DECLARE_COMPLETION_ONSTACK(finished);
371 	dma_addr_t src_dma, dst_dma, buf_dma;
372 	struct dma_async_tx_descriptor *tx;
373 	dma_cookie_t cookie;
374 
375 	buf_dma = dma_map_single(nc->dev, buf, len, dir);
376 	if (dma_mapping_error(nc->dev, buf_dma)) {
377 		dev_err(nc->dev,
378 			"Failed to prepare a buffer for DMA access\n");
379 		goto err;
380 	}
381 
382 	if (dir == DMA_FROM_DEVICE) {
383 		src_dma = dev_dma;
384 		dst_dma = buf_dma;
385 	} else {
386 		src_dma = buf_dma;
387 		dst_dma = dev_dma;
388 	}
389 
390 	tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
391 				       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
392 	if (!tx) {
393 		dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
394 		goto err_unmap;
395 	}
396 
397 	tx->callback = atmel_nand_dma_transfer_finished;
398 	tx->callback_param = &finished;
399 
400 	cookie = dmaengine_submit(tx);
401 	if (dma_submit_error(cookie)) {
402 		dev_err(nc->dev, "Failed to do DMA tx_submit\n");
403 		goto err_unmap;
404 	}
405 
406 	dma_async_issue_pending(nc->dmac);
407 	wait_for_completion(&finished);
408 	dma_unmap_single(nc->dev, buf_dma, len, dir);
409 
410 	return 0;
411 
412 err_unmap:
413 	dma_unmap_single(nc->dev, buf_dma, len, dir);
414 
415 err:
416 	dev_dbg(nc->dev, "Fall back to CPU I/O\n");
417 
418 	return -EIO;
419 }
420 
atmel_nfc_exec_op(struct atmel_hsmc_nand_controller * nc,bool poll)421 static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
422 {
423 	u8 *addrs = nc->op.addrs;
424 	unsigned int op = 0;
425 	u32 addr, val;
426 	int i, ret;
427 
428 	nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
429 
430 	for (i = 0; i < nc->op.ncmds; i++)
431 		op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
432 
433 	if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
434 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
435 
436 	op |= ATMEL_NFC_CSID(nc->op.cs) |
437 	      ATMEL_NFC_ACYCLE(nc->op.naddrs);
438 
439 	if (nc->op.ncmds > 1)
440 		op |= ATMEL_NFC_VCMD2;
441 
442 	addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
443 	       (addrs[3] << 24);
444 
445 	if (nc->op.data != ATMEL_NFC_NO_DATA) {
446 		op |= ATMEL_NFC_DATAEN;
447 		nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
448 
449 		if (nc->op.data == ATMEL_NFC_WRITE_DATA)
450 			op |= ATMEL_NFC_NFCWR;
451 	}
452 
453 	/* Clear all flags. */
454 	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
455 
456 	/* Send the command. */
457 	regmap_write(nc->io, op, addr);
458 
459 	ret = atmel_nfc_wait(nc, poll, 0);
460 	if (ret)
461 		dev_err(nc->base.dev,
462 			"Failed to send NAND command (err = %d)!",
463 			ret);
464 
465 	/* Reset the op state. */
466 	memset(&nc->op, 0, sizeof(nc->op));
467 
468 	return ret;
469 }
470 
atmel_nand_data_in(struct atmel_nand * nand,void * buf,unsigned int len,bool force_8bit)471 static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
472 			       unsigned int len, bool force_8bit)
473 {
474 	struct atmel_nand_controller *nc;
475 
476 	nc = to_nand_controller(nand->base.controller);
477 
478 	/*
479 	 * If the controller supports DMA, the buffer address is DMA-able and
480 	 * len is long enough to make DMA transfers profitable, let's trigger
481 	 * a DMA transfer. If it fails, fallback to PIO mode.
482 	 */
483 	if (nc->dmac && virt_addr_valid(buf) &&
484 	    len >= MIN_DMA_LEN && !force_8bit &&
485 	    !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
486 				     DMA_FROM_DEVICE))
487 		return;
488 
489 	if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
490 		ioread16_rep(nand->activecs->io.virt, buf, len / 2);
491 	else
492 		ioread8_rep(nand->activecs->io.virt, buf, len);
493 }
494 
atmel_nand_data_out(struct atmel_nand * nand,const void * buf,unsigned int len,bool force_8bit)495 static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
496 				unsigned int len, bool force_8bit)
497 {
498 	struct atmel_nand_controller *nc;
499 
500 	nc = to_nand_controller(nand->base.controller);
501 
502 	/*
503 	 * If the controller supports DMA, the buffer address is DMA-able and
504 	 * len is long enough to make DMA transfers profitable, let's trigger
505 	 * a DMA transfer. If it fails, fallback to PIO mode.
506 	 */
507 	if (nc->dmac && virt_addr_valid(buf) &&
508 	    len >= MIN_DMA_LEN && !force_8bit &&
509 	    !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
510 				     len, DMA_TO_DEVICE))
511 		return;
512 
513 	if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
514 		iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
515 	else
516 		iowrite8_rep(nand->activecs->io.virt, buf, len);
517 }
518 
atmel_nand_waitrdy(struct atmel_nand * nand,unsigned int timeout_ms)519 static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
520 {
521 	if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
522 		return nand_soft_waitrdy(&nand->base, timeout_ms);
523 
524 	return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
525 				 timeout_ms);
526 }
527 
atmel_hsmc_nand_waitrdy(struct atmel_nand * nand,unsigned int timeout_ms)528 static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
529 				   unsigned int timeout_ms)
530 {
531 	struct atmel_hsmc_nand_controller *nc;
532 	u32 status, mask;
533 
534 	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
535 		return atmel_nand_waitrdy(nand, timeout_ms);
536 
537 	nc = to_hsmc_nand_controller(nand->base.controller);
538 	mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
539 	return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
540 					       status, status & mask,
541 					       10, timeout_ms * 1000);
542 }
543 
atmel_nand_select_target(struct atmel_nand * nand,unsigned int cs)544 static void atmel_nand_select_target(struct atmel_nand *nand,
545 				     unsigned int cs)
546 {
547 	nand->activecs = &nand->cs[cs];
548 }
549 
atmel_hsmc_nand_select_target(struct atmel_nand * nand,unsigned int cs)550 static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
551 					  unsigned int cs)
552 {
553 	struct mtd_info *mtd = nand_to_mtd(&nand->base);
554 	struct atmel_hsmc_nand_controller *nc;
555 	u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
556 		  ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
557 		  ATMEL_HSMC_NFC_CFG_RSPARE;
558 
559 	nand->activecs = &nand->cs[cs];
560 	nc = to_hsmc_nand_controller(nand->base.controller);
561 	if (nc->cfg == cfg)
562 		return;
563 
564 	regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
565 			   ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
566 			   ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
567 			   ATMEL_HSMC_NFC_CFG_RSPARE |
568 			   ATMEL_HSMC_NFC_CFG_WSPARE,
569 			   cfg);
570 	nc->cfg = cfg;
571 }
572 
atmel_smc_nand_exec_instr(struct atmel_nand * nand,const struct nand_op_instr * instr)573 static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
574 				     const struct nand_op_instr *instr)
575 {
576 	struct atmel_nand_controller *nc;
577 	unsigned int i;
578 
579 	nc = to_nand_controller(nand->base.controller);
580 	switch (instr->type) {
581 	case NAND_OP_CMD_INSTR:
582 		writeb(instr->ctx.cmd.opcode,
583 		       nand->activecs->io.virt + nc->caps->cle_offs);
584 		return 0;
585 	case NAND_OP_ADDR_INSTR:
586 		for (i = 0; i < instr->ctx.addr.naddrs; i++)
587 			writeb(instr->ctx.addr.addrs[i],
588 			       nand->activecs->io.virt + nc->caps->ale_offs);
589 		return 0;
590 	case NAND_OP_DATA_IN_INSTR:
591 		atmel_nand_data_in(nand, instr->ctx.data.buf.in,
592 				   instr->ctx.data.len,
593 				   instr->ctx.data.force_8bit);
594 		return 0;
595 	case NAND_OP_DATA_OUT_INSTR:
596 		atmel_nand_data_out(nand, instr->ctx.data.buf.out,
597 				    instr->ctx.data.len,
598 				    instr->ctx.data.force_8bit);
599 		return 0;
600 	case NAND_OP_WAITRDY_INSTR:
601 		return atmel_nand_waitrdy(nand,
602 					  instr->ctx.waitrdy.timeout_ms);
603 	default:
604 		break;
605 	}
606 
607 	return -EINVAL;
608 }
609 
atmel_smc_nand_exec_op(struct atmel_nand * nand,const struct nand_operation * op,bool check_only)610 static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
611 				  const struct nand_operation *op,
612 				  bool check_only)
613 {
614 	unsigned int i;
615 	int ret = 0;
616 
617 	if (check_only)
618 		return 0;
619 
620 	atmel_nand_select_target(nand, op->cs);
621 	gpiod_set_value(nand->activecs->csgpio, 0);
622 	for (i = 0; i < op->ninstrs; i++) {
623 		ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
624 		if (ret)
625 			break;
626 	}
627 	gpiod_set_value(nand->activecs->csgpio, 1);
628 
629 	return ret;
630 }
631 
atmel_hsmc_exec_cmd_addr(struct nand_chip * chip,const struct nand_subop * subop)632 static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
633 				    const struct nand_subop *subop)
634 {
635 	struct atmel_nand *nand = to_atmel_nand(chip);
636 	struct atmel_hsmc_nand_controller *nc;
637 	unsigned int i, j;
638 
639 	nc = to_hsmc_nand_controller(chip->controller);
640 
641 	nc->op.cs = nand->activecs->id;
642 	for (i = 0; i < subop->ninstrs; i++) {
643 		const struct nand_op_instr *instr = &subop->instrs[i];
644 
645 		if (instr->type == NAND_OP_CMD_INSTR) {
646 			nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
647 			continue;
648 		}
649 
650 		for (j = nand_subop_get_addr_start_off(subop, i);
651 		     j < nand_subop_get_num_addr_cyc(subop, i); j++) {
652 			nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
653 			nc->op.naddrs++;
654 		}
655 	}
656 
657 	return atmel_nfc_exec_op(nc, true);
658 }
659 
atmel_hsmc_exec_rw(struct nand_chip * chip,const struct nand_subop * subop)660 static int atmel_hsmc_exec_rw(struct nand_chip *chip,
661 			      const struct nand_subop *subop)
662 {
663 	const struct nand_op_instr *instr = subop->instrs;
664 	struct atmel_nand *nand = to_atmel_nand(chip);
665 
666 	if (instr->type == NAND_OP_DATA_IN_INSTR)
667 		atmel_nand_data_in(nand, instr->ctx.data.buf.in,
668 				   instr->ctx.data.len,
669 				   instr->ctx.data.force_8bit);
670 	else
671 		atmel_nand_data_out(nand, instr->ctx.data.buf.out,
672 				    instr->ctx.data.len,
673 				    instr->ctx.data.force_8bit);
674 
675 	return 0;
676 }
677 
atmel_hsmc_exec_waitrdy(struct nand_chip * chip,const struct nand_subop * subop)678 static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
679 				   const struct nand_subop *subop)
680 {
681 	const struct nand_op_instr *instr = subop->instrs;
682 	struct atmel_nand *nand = to_atmel_nand(chip);
683 
684 	return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
685 }
686 
687 static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
688 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
689 		NAND_OP_PARSER_PAT_CMD_ELEM(true),
690 		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
691 		NAND_OP_PARSER_PAT_CMD_ELEM(true)),
692 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
693 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
694 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
695 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
696 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
697 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
698 );
699 
atmel_hsmc_nand_exec_op(struct atmel_nand * nand,const struct nand_operation * op,bool check_only)700 static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
701 				   const struct nand_operation *op,
702 				   bool check_only)
703 {
704 	int ret;
705 
706 	if (check_only)
707 		return nand_op_parser_exec_op(&nand->base,
708 					      &atmel_hsmc_op_parser, op, true);
709 
710 	atmel_hsmc_nand_select_target(nand, op->cs);
711 	ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
712 				     false);
713 
714 	return ret;
715 }
716 
atmel_nfc_copy_to_sram(struct nand_chip * chip,const u8 * buf,bool oob_required)717 static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
718 				   bool oob_required)
719 {
720 	struct mtd_info *mtd = nand_to_mtd(chip);
721 	struct atmel_hsmc_nand_controller *nc;
722 	int ret = -EIO;
723 
724 	nc = to_hsmc_nand_controller(chip->controller);
725 
726 	if (nc->base.dmac)
727 		ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
728 					      nc->sram.dma, mtd->writesize,
729 					      DMA_TO_DEVICE);
730 
731 	/* Falling back to CPU copy. */
732 	if (ret)
733 		memcpy_toio(nc->sram.virt, buf, mtd->writesize);
734 
735 	if (oob_required)
736 		memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
737 			    mtd->oobsize);
738 }
739 
atmel_nfc_copy_from_sram(struct nand_chip * chip,u8 * buf,bool oob_required)740 static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
741 				     bool oob_required)
742 {
743 	struct mtd_info *mtd = nand_to_mtd(chip);
744 	struct atmel_hsmc_nand_controller *nc;
745 	int ret = -EIO;
746 
747 	nc = to_hsmc_nand_controller(chip->controller);
748 
749 	if (nc->base.dmac)
750 		ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
751 					      mtd->writesize, DMA_FROM_DEVICE);
752 
753 	/* Falling back to CPU copy. */
754 	if (ret)
755 		memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
756 
757 	if (oob_required)
758 		memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
759 			      mtd->oobsize);
760 }
761 
atmel_nfc_set_op_addr(struct nand_chip * chip,int page,int column)762 static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
763 {
764 	struct mtd_info *mtd = nand_to_mtd(chip);
765 	struct atmel_hsmc_nand_controller *nc;
766 
767 	nc = to_hsmc_nand_controller(chip->controller);
768 
769 	if (column >= 0) {
770 		nc->op.addrs[nc->op.naddrs++] = column;
771 
772 		/*
773 		 * 2 address cycles for the column offset on large page NANDs.
774 		 */
775 		if (mtd->writesize > 512)
776 			nc->op.addrs[nc->op.naddrs++] = column >> 8;
777 	}
778 
779 	if (page >= 0) {
780 		nc->op.addrs[nc->op.naddrs++] = page;
781 		nc->op.addrs[nc->op.naddrs++] = page >> 8;
782 
783 		if (chip->options & NAND_ROW_ADDR_3)
784 			nc->op.addrs[nc->op.naddrs++] = page >> 16;
785 	}
786 }
787 
atmel_nand_pmecc_enable(struct nand_chip * chip,int op,bool raw)788 static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
789 {
790 	struct atmel_nand *nand = to_atmel_nand(chip);
791 	struct atmel_nand_controller *nc;
792 	int ret;
793 
794 	nc = to_nand_controller(chip->controller);
795 
796 	if (raw)
797 		return 0;
798 
799 	ret = atmel_pmecc_enable(nand->pmecc, op);
800 	if (ret)
801 		dev_err(nc->dev,
802 			"Failed to enable ECC engine (err = %d)\n", ret);
803 
804 	return ret;
805 }
806 
atmel_nand_pmecc_disable(struct nand_chip * chip,bool raw)807 static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
808 {
809 	struct atmel_nand *nand = to_atmel_nand(chip);
810 
811 	if (!raw)
812 		atmel_pmecc_disable(nand->pmecc);
813 }
814 
atmel_nand_pmecc_generate_eccbytes(struct nand_chip * chip,bool raw)815 static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
816 {
817 	struct atmel_nand *nand = to_atmel_nand(chip);
818 	struct mtd_info *mtd = nand_to_mtd(chip);
819 	struct atmel_nand_controller *nc;
820 	struct mtd_oob_region oobregion;
821 	void *eccbuf;
822 	int ret, i;
823 
824 	nc = to_nand_controller(chip->controller);
825 
826 	if (raw)
827 		return 0;
828 
829 	ret = atmel_pmecc_wait_rdy(nand->pmecc);
830 	if (ret) {
831 		dev_err(nc->dev,
832 			"Failed to transfer NAND page data (err = %d)\n",
833 			ret);
834 		return ret;
835 	}
836 
837 	mtd_ooblayout_ecc(mtd, 0, &oobregion);
838 	eccbuf = chip->oob_poi + oobregion.offset;
839 
840 	for (i = 0; i < chip->ecc.steps; i++) {
841 		atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
842 						   eccbuf);
843 		eccbuf += chip->ecc.bytes;
844 	}
845 
846 	return 0;
847 }
848 
atmel_nand_pmecc_correct_data(struct nand_chip * chip,void * buf,bool raw)849 static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
850 					 bool raw)
851 {
852 	struct atmel_nand *nand = to_atmel_nand(chip);
853 	struct mtd_info *mtd = nand_to_mtd(chip);
854 	struct atmel_nand_controller *nc;
855 	struct mtd_oob_region oobregion;
856 	int ret, i, max_bitflips = 0;
857 	void *databuf, *eccbuf;
858 
859 	nc = to_nand_controller(chip->controller);
860 
861 	if (raw)
862 		return 0;
863 
864 	ret = atmel_pmecc_wait_rdy(nand->pmecc);
865 	if (ret) {
866 		dev_err(nc->dev,
867 			"Failed to read NAND page data (err = %d)\n",
868 			ret);
869 		return ret;
870 	}
871 
872 	mtd_ooblayout_ecc(mtd, 0, &oobregion);
873 	eccbuf = chip->oob_poi + oobregion.offset;
874 	databuf = buf;
875 
876 	for (i = 0; i < chip->ecc.steps; i++) {
877 		ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
878 						 eccbuf);
879 		if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
880 			ret = nand_check_erased_ecc_chunk(databuf,
881 							  chip->ecc.size,
882 							  eccbuf,
883 							  chip->ecc.bytes,
884 							  NULL, 0,
885 							  chip->ecc.strength);
886 
887 		if (ret >= 0) {
888 			mtd->ecc_stats.corrected += ret;
889 			max_bitflips = max(ret, max_bitflips);
890 		} else {
891 			mtd->ecc_stats.failed++;
892 		}
893 
894 		databuf += chip->ecc.size;
895 		eccbuf += chip->ecc.bytes;
896 	}
897 
898 	return max_bitflips;
899 }
900 
atmel_nand_pmecc_write_pg(struct nand_chip * chip,const u8 * buf,bool oob_required,int page,bool raw)901 static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
902 				     bool oob_required, int page, bool raw)
903 {
904 	struct mtd_info *mtd = nand_to_mtd(chip);
905 	struct atmel_nand *nand = to_atmel_nand(chip);
906 	int ret;
907 
908 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
909 
910 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
911 	if (ret)
912 		return ret;
913 
914 	nand_write_data_op(chip, buf, mtd->writesize, false);
915 
916 	ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
917 	if (ret) {
918 		atmel_pmecc_disable(nand->pmecc);
919 		return ret;
920 	}
921 
922 	atmel_nand_pmecc_disable(chip, raw);
923 
924 	nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
925 
926 	return nand_prog_page_end_op(chip);
927 }
928 
atmel_nand_pmecc_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)929 static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
930 				       int oob_required, int page)
931 {
932 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
933 }
934 
atmel_nand_pmecc_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)935 static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
936 					   const u8 *buf, int oob_required,
937 					   int page)
938 {
939 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
940 }
941 
atmel_nand_pmecc_read_pg(struct nand_chip * chip,u8 * buf,bool oob_required,int page,bool raw)942 static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
943 				    bool oob_required, int page, bool raw)
944 {
945 	struct mtd_info *mtd = nand_to_mtd(chip);
946 	int ret;
947 
948 	nand_read_page_op(chip, page, 0, NULL, 0);
949 
950 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
951 	if (ret)
952 		return ret;
953 
954 	ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
955 	if (ret)
956 		goto out_disable;
957 
958 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
959 	if (ret)
960 		goto out_disable;
961 
962 	ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
963 
964 out_disable:
965 	atmel_nand_pmecc_disable(chip, raw);
966 
967 	return ret;
968 }
969 
atmel_nand_pmecc_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)970 static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
971 				      int oob_required, int page)
972 {
973 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
974 }
975 
atmel_nand_pmecc_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)976 static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
977 					  int oob_required, int page)
978 {
979 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
980 }
981 
atmel_hsmc_nand_pmecc_write_pg(struct nand_chip * chip,const u8 * buf,bool oob_required,int page,bool raw)982 static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
983 					  const u8 *buf, bool oob_required,
984 					  int page, bool raw)
985 {
986 	struct mtd_info *mtd = nand_to_mtd(chip);
987 	struct atmel_nand *nand = to_atmel_nand(chip);
988 	struct atmel_hsmc_nand_controller *nc;
989 	int ret;
990 
991 	atmel_hsmc_nand_select_target(nand, chip->cur_cs);
992 	nc = to_hsmc_nand_controller(chip->controller);
993 
994 	atmel_nfc_copy_to_sram(chip, buf, false);
995 
996 	nc->op.cmds[0] = NAND_CMD_SEQIN;
997 	nc->op.ncmds = 1;
998 	atmel_nfc_set_op_addr(chip, page, 0x0);
999 	nc->op.cs = nand->activecs->id;
1000 	nc->op.data = ATMEL_NFC_WRITE_DATA;
1001 
1002 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
1003 	if (ret)
1004 		return ret;
1005 
1006 	ret = atmel_nfc_exec_op(nc, false);
1007 	if (ret) {
1008 		atmel_nand_pmecc_disable(chip, raw);
1009 		dev_err(nc->base.dev,
1010 			"Failed to transfer NAND page data (err = %d)\n",
1011 			ret);
1012 		return ret;
1013 	}
1014 
1015 	ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
1016 
1017 	atmel_nand_pmecc_disable(chip, raw);
1018 
1019 	if (ret)
1020 		return ret;
1021 
1022 	nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
1023 
1024 	return nand_prog_page_end_op(chip);
1025 }
1026 
atmel_hsmc_nand_pmecc_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1027 static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
1028 					    const u8 *buf, int oob_required,
1029 					    int page)
1030 {
1031 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
1032 					      false);
1033 }
1034 
atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1035 static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
1036 						const u8 *buf,
1037 						int oob_required, int page)
1038 {
1039 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
1040 					      true);
1041 }
1042 
atmel_hsmc_nand_pmecc_read_pg(struct nand_chip * chip,u8 * buf,bool oob_required,int page,bool raw)1043 static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
1044 					 bool oob_required, int page,
1045 					 bool raw)
1046 {
1047 	struct mtd_info *mtd = nand_to_mtd(chip);
1048 	struct atmel_nand *nand = to_atmel_nand(chip);
1049 	struct atmel_hsmc_nand_controller *nc;
1050 	int ret;
1051 
1052 	atmel_hsmc_nand_select_target(nand, chip->cur_cs);
1053 	nc = to_hsmc_nand_controller(chip->controller);
1054 
1055 	/*
1056 	 * Optimized read page accessors only work when the NAND R/B pin is
1057 	 * connected to a native SoC R/B pin. If that's not the case, fallback
1058 	 * to the non-optimized one.
1059 	 */
1060 	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
1061 		return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
1062 						raw);
1063 
1064 	nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1065 
1066 	if (mtd->writesize > 512)
1067 		nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1068 
1069 	atmel_nfc_set_op_addr(chip, page, 0x0);
1070 	nc->op.cs = nand->activecs->id;
1071 	nc->op.data = ATMEL_NFC_READ_DATA;
1072 
1073 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1074 	if (ret)
1075 		return ret;
1076 
1077 	ret = atmel_nfc_exec_op(nc, false);
1078 	if (ret) {
1079 		atmel_nand_pmecc_disable(chip, raw);
1080 		dev_err(nc->base.dev,
1081 			"Failed to load NAND page data (err = %d)\n",
1082 			ret);
1083 		return ret;
1084 	}
1085 
1086 	atmel_nfc_copy_from_sram(chip, buf, true);
1087 
1088 	ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1089 
1090 	atmel_nand_pmecc_disable(chip, raw);
1091 
1092 	return ret;
1093 }
1094 
atmel_hsmc_nand_pmecc_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)1095 static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
1096 					   int oob_required, int page)
1097 {
1098 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1099 					     false);
1100 }
1101 
atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)1102 static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
1103 					       u8 *buf, int oob_required,
1104 					       int page)
1105 {
1106 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1107 					     true);
1108 }
1109 
atmel_nand_pmecc_init(struct nand_chip * chip)1110 static int atmel_nand_pmecc_init(struct nand_chip *chip)
1111 {
1112 	const struct nand_ecc_props *requirements =
1113 		nanddev_get_ecc_requirements(&chip->base);
1114 	struct mtd_info *mtd = nand_to_mtd(chip);
1115 	struct nand_device *nanddev = mtd_to_nanddev(mtd);
1116 	struct atmel_nand *nand = to_atmel_nand(chip);
1117 	struct atmel_nand_controller *nc;
1118 	struct atmel_pmecc_user_req req;
1119 
1120 	nc = to_nand_controller(chip->controller);
1121 
1122 	if (!nc->pmecc) {
1123 		dev_err(nc->dev, "HW ECC not supported\n");
1124 		return -ENOTSUPP;
1125 	}
1126 
1127 	if (nc->caps->legacy_of_bindings) {
1128 		u32 val;
1129 
1130 		if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1131 					  &val))
1132 			chip->ecc.strength = val;
1133 
1134 		if (!of_property_read_u32(nc->dev->of_node,
1135 					  "atmel,pmecc-sector-size",
1136 					  &val))
1137 			chip->ecc.size = val;
1138 	}
1139 
1140 	if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
1141 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1142 	else if (chip->ecc.strength)
1143 		req.ecc.strength = chip->ecc.strength;
1144 	else if (requirements->strength)
1145 		req.ecc.strength = requirements->strength;
1146 	else
1147 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1148 
1149 	if (chip->ecc.size)
1150 		req.ecc.sectorsize = chip->ecc.size;
1151 	else if (requirements->step_size)
1152 		req.ecc.sectorsize = requirements->step_size;
1153 	else
1154 		req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1155 
1156 	req.pagesize = mtd->writesize;
1157 	req.oobsize = mtd->oobsize;
1158 
1159 	if (mtd->writesize <= 512) {
1160 		req.ecc.bytes = 4;
1161 		req.ecc.ooboffset = 0;
1162 	} else {
1163 		req.ecc.bytes = mtd->oobsize - 2;
1164 		req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1165 	}
1166 
1167 	nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1168 	if (IS_ERR(nand->pmecc))
1169 		return PTR_ERR(nand->pmecc);
1170 
1171 	chip->ecc.algo = NAND_ECC_ALGO_BCH;
1172 	chip->ecc.size = req.ecc.sectorsize;
1173 	chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1174 	chip->ecc.strength = req.ecc.strength;
1175 
1176 	chip->options |= NAND_NO_SUBPAGE_WRITE;
1177 
1178 	mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
1179 
1180 	return 0;
1181 }
1182 
atmel_nand_ecc_init(struct nand_chip * chip)1183 static int atmel_nand_ecc_init(struct nand_chip *chip)
1184 {
1185 	struct atmel_nand_controller *nc;
1186 	int ret;
1187 
1188 	nc = to_nand_controller(chip->controller);
1189 
1190 	switch (chip->ecc.engine_type) {
1191 	case NAND_ECC_ENGINE_TYPE_NONE:
1192 	case NAND_ECC_ENGINE_TYPE_SOFT:
1193 		/*
1194 		 * Nothing to do, the core will initialize everything for us.
1195 		 */
1196 		break;
1197 
1198 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
1199 		ret = atmel_nand_pmecc_init(chip);
1200 		if (ret)
1201 			return ret;
1202 
1203 		chip->ecc.read_page = atmel_nand_pmecc_read_page;
1204 		chip->ecc.write_page = atmel_nand_pmecc_write_page;
1205 		chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1206 		chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1207 		break;
1208 
1209 	default:
1210 		/* Other modes are not supported. */
1211 		dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1212 			chip->ecc.engine_type);
1213 		return -ENOTSUPP;
1214 	}
1215 
1216 	return 0;
1217 }
1218 
atmel_hsmc_nand_ecc_init(struct nand_chip * chip)1219 static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
1220 {
1221 	int ret;
1222 
1223 	ret = atmel_nand_ecc_init(chip);
1224 	if (ret)
1225 		return ret;
1226 
1227 	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
1228 		return 0;
1229 
1230 	/* Adjust the ECC operations for the HSMC IP. */
1231 	chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1232 	chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1233 	chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1234 	chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1235 
1236 	return 0;
1237 }
1238 
atmel_smc_nand_prepare_smcconf(struct atmel_nand * nand,const struct nand_interface_config * conf,struct atmel_smc_cs_conf * smcconf)1239 static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1240 					const struct nand_interface_config *conf,
1241 					struct atmel_smc_cs_conf *smcconf)
1242 {
1243 	u32 ncycles, totalcycles, timeps, mckperiodps;
1244 	struct atmel_nand_controller *nc;
1245 	int ret;
1246 
1247 	nc = to_nand_controller(nand->base.controller);
1248 
1249 	/* DDR interface not supported. */
1250 	if (!nand_interface_is_sdr(conf))
1251 		return -ENOTSUPP;
1252 
1253 	/*
1254 	 * tRC < 30ns implies EDO mode. This controller does not support this
1255 	 * mode.
1256 	 */
1257 	if (conf->timings.sdr.tRC_min < 30000)
1258 		return -ENOTSUPP;
1259 
1260 	atmel_smc_cs_conf_init(smcconf);
1261 
1262 	mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
1263 	mckperiodps *= 1000;
1264 
1265 	/*
1266 	 * Set write pulse timing. This one is easy to extract:
1267 	 *
1268 	 * NWE_PULSE = tWP
1269 	 */
1270 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
1271 	totalcycles = ncycles;
1272 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
1273 					  ncycles);
1274 	if (ret)
1275 		return ret;
1276 
1277 	/*
1278 	 * The write setup timing depends on the operation done on the NAND.
1279 	 * All operations goes through the same data bus, but the operation
1280 	 * type depends on the address we are writing to (ALE/CLE address
1281 	 * lines).
1282 	 * Since we have no way to differentiate the different operations at
1283 	 * the SMC level, we must consider the worst case (the biggest setup
1284 	 * time among all operation types):
1285 	 *
1286 	 * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
1287 	 */
1288 	timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
1289 		      conf->timings.sdr.tALS_min);
1290 	timeps = max(timeps, conf->timings.sdr.tDS_min);
1291 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1292 	ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
1293 	totalcycles += ncycles;
1294 	ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
1295 					  ncycles);
1296 	if (ret)
1297 		return ret;
1298 
1299 	/*
1300 	 * As for the write setup timing, the write hold timing depends on the
1301 	 * operation done on the NAND:
1302 	 *
1303 	 * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
1304 	 */
1305 	timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
1306 		      conf->timings.sdr.tALH_min);
1307 	timeps = max3(timeps, conf->timings.sdr.tDH_min,
1308 		      conf->timings.sdr.tWH_min);
1309 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1310 	totalcycles += ncycles;
1311 
1312 	/*
1313 	 * The write cycle timing is directly matching tWC, but is also
1314 	 * dependent on the other timings on the setup and hold timings we
1315 	 * calculated earlier, which gives:
1316 	 *
1317 	 * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
1318 	 */
1319 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
1320 	ncycles = max(totalcycles, ncycles);
1321 	ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
1322 					  ncycles);
1323 	if (ret)
1324 		return ret;
1325 
1326 	/*
1327 	 * We don't want the CS line to be toggled between each byte/word
1328 	 * transfer to the NAND. The only way to guarantee that is to have the
1329 	 * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1330 	 *
1331 	 * NCS_WR_PULSE = NWE_CYCLE
1332 	 */
1333 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
1334 					  ncycles);
1335 	if (ret)
1336 		return ret;
1337 
1338 	/*
1339 	 * As for the write setup timing, the read hold timing depends on the
1340 	 * operation done on the NAND:
1341 	 *
1342 	 * NRD_HOLD = max(tREH, tRHOH)
1343 	 */
1344 	timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
1345 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1346 	totalcycles = ncycles;
1347 
1348 	/*
1349 	 * TDF = tRHZ - NRD_HOLD
1350 	 */
1351 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
1352 	ncycles -= totalcycles;
1353 
1354 	/*
1355 	 * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
1356 	 * we might end up with a config that does not fit in the TDF field.
1357 	 * Just take the max value in this case and hope that the NAND is more
1358 	 * tolerant than advertised.
1359 	 */
1360 	if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
1361 		ncycles = ATMEL_SMC_MODE_TDF_MAX;
1362 	else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
1363 		ncycles = ATMEL_SMC_MODE_TDF_MIN;
1364 
1365 	smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
1366 			 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
1367 
1368 	/*
1369 	 * Read pulse timing directly matches tRP:
1370 	 *
1371 	 * NRD_PULSE = tRP
1372 	 */
1373 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
1374 	totalcycles += ncycles;
1375 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
1376 					  ncycles);
1377 	if (ret)
1378 		return ret;
1379 
1380 	/*
1381 	 * Read setup timing depends on the operation done on the NAND:
1382 	 *
1383 	 * NRD_SETUP = max(tAR, tCLR)
1384 	 */
1385 	timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
1386 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1387 	totalcycles += ncycles;
1388 	ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
1389 	if (ret)
1390 		return ret;
1391 
1392 	/*
1393 	 * The read cycle timing is directly matching tRC, but is also
1394 	 * dependent on the setup and hold timings we calculated earlier,
1395 	 * which gives:
1396 	 *
1397 	 * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
1398 	 */
1399 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
1400 	ncycles = max(totalcycles, ncycles);
1401 	ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
1402 					  ncycles);
1403 	if (ret)
1404 		return ret;
1405 
1406 	/*
1407 	 * We don't want the CS line to be toggled between each byte/word
1408 	 * transfer from the NAND. The only way to guarantee that is to have
1409 	 * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1410 	 *
1411 	 * NCS_RD_PULSE = NRD_CYCLE
1412 	 */
1413 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
1414 					  ncycles);
1415 	if (ret)
1416 		return ret;
1417 
1418 	/* Txxx timings are directly matching tXXX ones. */
1419 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
1420 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1421 					   ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
1422 					   ncycles);
1423 	if (ret)
1424 		return ret;
1425 
1426 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
1427 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1428 					   ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1429 					   ncycles);
1430 	/*
1431 	 * Version 4 of the ONFI spec mandates that tADL be at least 400
1432 	 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1433 	 * fit in the tADL field of the SMC reg. We need to relax the check and
1434 	 * accept the -ERANGE return code.
1435 	 *
1436 	 * Note that previous versions of the ONFI spec had a lower tADL_min
1437 	 * (100 or 200 ns). It's not clear why this timing constraint got
1438 	 * increased but it seems most NANDs are fine with values lower than
1439 	 * 400ns, so we should be safe.
1440 	 */
1441 	if (ret && ret != -ERANGE)
1442 		return ret;
1443 
1444 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
1445 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1446 					   ATMEL_HSMC_TIMINGS_TAR_SHIFT,
1447 					   ncycles);
1448 	if (ret)
1449 		return ret;
1450 
1451 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
1452 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1453 					   ATMEL_HSMC_TIMINGS_TRR_SHIFT,
1454 					   ncycles);
1455 	if (ret)
1456 		return ret;
1457 
1458 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
1459 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1460 					   ATMEL_HSMC_TIMINGS_TWB_SHIFT,
1461 					   ncycles);
1462 	if (ret)
1463 		return ret;
1464 
1465 	/* Attach the CS line to the NFC logic. */
1466 	smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
1467 
1468 	/* Set the appropriate data bus width. */
1469 	if (nand->base.options & NAND_BUSWIDTH_16)
1470 		smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
1471 
1472 	/* Operate in NRD/NWE READ/WRITEMODE. */
1473 	smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
1474 			 ATMEL_SMC_MODE_WRITEMODE_NWE;
1475 
1476 	return 0;
1477 }
1478 
atmel_smc_nand_setup_interface(struct atmel_nand * nand,int csline,const struct nand_interface_config * conf)1479 static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
1480 					int csline,
1481 					const struct nand_interface_config *conf)
1482 {
1483 	struct atmel_nand_controller *nc;
1484 	struct atmel_smc_cs_conf smcconf;
1485 	struct atmel_nand_cs *cs;
1486 	int ret;
1487 
1488 	nc = to_nand_controller(nand->base.controller);
1489 
1490 	ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1491 	if (ret)
1492 		return ret;
1493 
1494 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1495 		return 0;
1496 
1497 	cs = &nand->cs[csline];
1498 	cs->smcconf = smcconf;
1499 	atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1500 
1501 	return 0;
1502 }
1503 
atmel_hsmc_nand_setup_interface(struct atmel_nand * nand,int csline,const struct nand_interface_config * conf)1504 static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
1505 					int csline,
1506 					const struct nand_interface_config *conf)
1507 {
1508 	struct atmel_hsmc_nand_controller *nc;
1509 	struct atmel_smc_cs_conf smcconf;
1510 	struct atmel_nand_cs *cs;
1511 	int ret;
1512 
1513 	nc = to_hsmc_nand_controller(nand->base.controller);
1514 
1515 	ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1516 	if (ret)
1517 		return ret;
1518 
1519 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1520 		return 0;
1521 
1522 	cs = &nand->cs[csline];
1523 	cs->smcconf = smcconf;
1524 
1525 	if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
1526 		cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
1527 
1528 	atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
1529 				 &cs->smcconf);
1530 
1531 	return 0;
1532 }
1533 
atmel_nand_setup_interface(struct nand_chip * chip,int csline,const struct nand_interface_config * conf)1534 static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
1535 				      const struct nand_interface_config *conf)
1536 {
1537 	struct atmel_nand *nand = to_atmel_nand(chip);
1538 	const struct nand_sdr_timings *sdr;
1539 	struct atmel_nand_controller *nc;
1540 
1541 	sdr = nand_get_sdr_timings(conf);
1542 	if (IS_ERR(sdr))
1543 		return PTR_ERR(sdr);
1544 
1545 	nc = to_nand_controller(nand->base.controller);
1546 
1547 	if (csline >= nand->numcs ||
1548 	    (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
1549 		return -EINVAL;
1550 
1551 	return nc->caps->ops->setup_interface(nand, csline, conf);
1552 }
1553 
atmel_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)1554 static int atmel_nand_exec_op(struct nand_chip *chip,
1555 			      const struct nand_operation *op,
1556 			      bool check_only)
1557 {
1558 	struct atmel_nand *nand = to_atmel_nand(chip);
1559 	struct atmel_nand_controller *nc;
1560 
1561 	nc = to_nand_controller(nand->base.controller);
1562 
1563 	return nc->caps->ops->exec_op(nand, op, check_only);
1564 }
1565 
atmel_nand_init(struct atmel_nand_controller * nc,struct atmel_nand * nand)1566 static void atmel_nand_init(struct atmel_nand_controller *nc,
1567 			    struct atmel_nand *nand)
1568 {
1569 	struct nand_chip *chip = &nand->base;
1570 	struct mtd_info *mtd = nand_to_mtd(chip);
1571 
1572 	mtd->dev.parent = nc->dev;
1573 	nand->base.controller = &nc->base;
1574 
1575 	if (!nc->mck || !nc->caps->ops->setup_interface)
1576 		chip->options |= NAND_KEEP_TIMINGS;
1577 
1578 	/*
1579 	 * Use a bounce buffer when the buffer passed by the MTD user is not
1580 	 * suitable for DMA.
1581 	 */
1582 	if (nc->dmac)
1583 		chip->options |= NAND_USES_DMA;
1584 
1585 	/* Default to HW ECC if pmecc is available. */
1586 	if (nc->pmecc)
1587 		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
1588 }
1589 
atmel_smc_nand_init(struct atmel_nand_controller * nc,struct atmel_nand * nand)1590 static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1591 				struct atmel_nand *nand)
1592 {
1593 	struct nand_chip *chip = &nand->base;
1594 	struct atmel_smc_nand_controller *smc_nc;
1595 	int i;
1596 
1597 	atmel_nand_init(nc, nand);
1598 
1599 	smc_nc = to_smc_nand_controller(chip->controller);
1600 	if (!smc_nc->ebi_csa_regmap)
1601 		return;
1602 
1603 	/* Attach the CS to the NAND Flash logic. */
1604 	for (i = 0; i < nand->numcs; i++)
1605 		regmap_update_bits(smc_nc->ebi_csa_regmap,
1606 				   smc_nc->ebi_csa->offs,
1607 				   BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1608 
1609 	if (smc_nc->ebi_csa->nfd0_on_d16)
1610 		regmap_update_bits(smc_nc->ebi_csa_regmap,
1611 				   smc_nc->ebi_csa->offs,
1612 				   smc_nc->ebi_csa->nfd0_on_d16,
1613 				   smc_nc->ebi_csa->nfd0_on_d16);
1614 }
1615 
atmel_nand_controller_remove_nand(struct atmel_nand * nand)1616 static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
1617 {
1618 	struct nand_chip *chip = &nand->base;
1619 	struct mtd_info *mtd = nand_to_mtd(chip);
1620 	int ret;
1621 
1622 	ret = mtd_device_unregister(mtd);
1623 	if (ret)
1624 		return ret;
1625 
1626 	nand_cleanup(chip);
1627 	list_del(&nand->node);
1628 
1629 	return 0;
1630 }
1631 
atmel_nand_create(struct atmel_nand_controller * nc,struct device_node * np,int reg_cells)1632 static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1633 					    struct device_node *np,
1634 					    int reg_cells)
1635 {
1636 	struct atmel_nand *nand;
1637 	struct gpio_desc *gpio;
1638 	int numcs, ret, i;
1639 
1640 	numcs = of_property_count_elems_of_size(np, "reg",
1641 						reg_cells * sizeof(u32));
1642 	if (numcs < 1) {
1643 		dev_err(nc->dev, "Missing or invalid reg property\n");
1644 		return ERR_PTR(-EINVAL);
1645 	}
1646 
1647 	nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
1648 	if (!nand)
1649 		return ERR_PTR(-ENOMEM);
1650 
1651 	nand->numcs = numcs;
1652 
1653 	gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
1654 				     "det", GPIOD_IN, "nand-det");
1655 	if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1656 		dev_err(nc->dev,
1657 			"Failed to get detect gpio (err = %ld)\n",
1658 			PTR_ERR(gpio));
1659 		return ERR_CAST(gpio);
1660 	}
1661 
1662 	if (!IS_ERR(gpio))
1663 		nand->cdgpio = gpio;
1664 
1665 	for (i = 0; i < numcs; i++) {
1666 		struct resource res;
1667 		u32 val;
1668 
1669 		ret = of_address_to_resource(np, 0, &res);
1670 		if (ret) {
1671 			dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1672 				ret);
1673 			return ERR_PTR(ret);
1674 		}
1675 
1676 		ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1677 						 &val);
1678 		if (ret) {
1679 			dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1680 				ret);
1681 			return ERR_PTR(ret);
1682 		}
1683 
1684 		nand->cs[i].id = val;
1685 
1686 		nand->cs[i].io.dma = res.start;
1687 		nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1688 		if (IS_ERR(nand->cs[i].io.virt))
1689 			return ERR_CAST(nand->cs[i].io.virt);
1690 
1691 		if (!of_property_read_u32(np, "atmel,rb", &val)) {
1692 			if (val > ATMEL_NFC_MAX_RB_ID)
1693 				return ERR_PTR(-EINVAL);
1694 
1695 			nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1696 			nand->cs[i].rb.id = val;
1697 		} else {
1698 			gpio = devm_fwnode_gpiod_get_index(nc->dev,
1699 							   of_fwnode_handle(np),
1700 							   "rb", i, GPIOD_IN,
1701 							   "nand-rb");
1702 			if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1703 				dev_err(nc->dev,
1704 					"Failed to get R/B gpio (err = %ld)\n",
1705 					PTR_ERR(gpio));
1706 				return ERR_CAST(gpio);
1707 			}
1708 
1709 			if (!IS_ERR(gpio)) {
1710 				nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1711 				nand->cs[i].rb.gpio = gpio;
1712 			}
1713 		}
1714 
1715 		gpio = devm_fwnode_gpiod_get_index(nc->dev,
1716 						   of_fwnode_handle(np),
1717 						   "cs", i, GPIOD_OUT_HIGH,
1718 						   "nand-cs");
1719 		if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1720 			dev_err(nc->dev,
1721 				"Failed to get CS gpio (err = %ld)\n",
1722 				PTR_ERR(gpio));
1723 			return ERR_CAST(gpio);
1724 		}
1725 
1726 		if (!IS_ERR(gpio))
1727 			nand->cs[i].csgpio = gpio;
1728 	}
1729 
1730 	nand_set_flash_node(&nand->base, np);
1731 
1732 	return nand;
1733 }
1734 
1735 static int
atmel_nand_controller_add_nand(struct atmel_nand_controller * nc,struct atmel_nand * nand)1736 atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1737 			       struct atmel_nand *nand)
1738 {
1739 	struct nand_chip *chip = &nand->base;
1740 	struct mtd_info *mtd = nand_to_mtd(chip);
1741 	int ret;
1742 
1743 	/* No card inserted, skip this NAND. */
1744 	if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1745 		dev_info(nc->dev, "No SmartMedia card inserted.\n");
1746 		return 0;
1747 	}
1748 
1749 	nc->caps->ops->nand_init(nc, nand);
1750 
1751 	ret = nand_scan(chip, nand->numcs);
1752 	if (ret) {
1753 		dev_err(nc->dev, "NAND scan failed: %d\n", ret);
1754 		return ret;
1755 	}
1756 
1757 	ret = mtd_device_register(mtd, NULL, 0);
1758 	if (ret) {
1759 		dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1760 		nand_cleanup(chip);
1761 		return ret;
1762 	}
1763 
1764 	list_add_tail(&nand->node, &nc->chips);
1765 
1766 	return 0;
1767 }
1768 
1769 static int
atmel_nand_controller_remove_nands(struct atmel_nand_controller * nc)1770 atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1771 {
1772 	struct atmel_nand *nand, *tmp;
1773 	int ret;
1774 
1775 	list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1776 		ret = atmel_nand_controller_remove_nand(nand);
1777 		if (ret)
1778 			return ret;
1779 	}
1780 
1781 	return 0;
1782 }
1783 
1784 static int
atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller * nc)1785 atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1786 {
1787 	struct device *dev = nc->dev;
1788 	struct platform_device *pdev = to_platform_device(dev);
1789 	struct atmel_nand *nand;
1790 	struct gpio_desc *gpio;
1791 	struct resource *res;
1792 
1793 	/*
1794 	 * Legacy bindings only allow connecting a single NAND with a unique CS
1795 	 * line to the controller.
1796 	 */
1797 	nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1798 			    GFP_KERNEL);
1799 	if (!nand)
1800 		return -ENOMEM;
1801 
1802 	nand->numcs = 1;
1803 
1804 	nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1805 	if (IS_ERR(nand->cs[0].io.virt))
1806 		return PTR_ERR(nand->cs[0].io.virt);
1807 
1808 	nand->cs[0].io.dma = res->start;
1809 
1810 	/*
1811 	 * The old driver was hardcoding the CS id to 3 for all sama5
1812 	 * controllers. Since this id is only meaningful for the sama5
1813 	 * controller we can safely assign this id to 3 no matter the
1814 	 * controller.
1815 	 * If one wants to connect a NAND to a different CS line, he will
1816 	 * have to use the new bindings.
1817 	 */
1818 	nand->cs[0].id = 3;
1819 
1820 	/* R/B GPIO. */
1821 	gpio = devm_gpiod_get_index_optional(dev, NULL, 0,  GPIOD_IN);
1822 	if (IS_ERR(gpio)) {
1823 		dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1824 			PTR_ERR(gpio));
1825 		return PTR_ERR(gpio);
1826 	}
1827 
1828 	if (gpio) {
1829 		nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1830 		nand->cs[0].rb.gpio = gpio;
1831 	}
1832 
1833 	/* CS GPIO. */
1834 	gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1835 	if (IS_ERR(gpio)) {
1836 		dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1837 			PTR_ERR(gpio));
1838 		return PTR_ERR(gpio);
1839 	}
1840 
1841 	nand->cs[0].csgpio = gpio;
1842 
1843 	/* Card detect GPIO. */
1844 	gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1845 	if (IS_ERR(gpio)) {
1846 		dev_err(dev,
1847 			"Failed to get detect gpio (err = %ld)\n",
1848 			PTR_ERR(gpio));
1849 		return PTR_ERR(gpio);
1850 	}
1851 
1852 	nand->cdgpio = gpio;
1853 
1854 	nand_set_flash_node(&nand->base, nc->dev->of_node);
1855 
1856 	return atmel_nand_controller_add_nand(nc, nand);
1857 }
1858 
atmel_nand_controller_add_nands(struct atmel_nand_controller * nc)1859 static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1860 {
1861 	struct device_node *np, *nand_np;
1862 	struct device *dev = nc->dev;
1863 	int ret, reg_cells;
1864 	u32 val;
1865 
1866 	/* We do not retrieve the SMC syscon when parsing old DTs. */
1867 	if (nc->caps->legacy_of_bindings)
1868 		return atmel_nand_controller_legacy_add_nands(nc);
1869 
1870 	np = dev->of_node;
1871 
1872 	ret = of_property_read_u32(np, "#address-cells", &val);
1873 	if (ret) {
1874 		dev_err(dev, "missing #address-cells property\n");
1875 		return ret;
1876 	}
1877 
1878 	reg_cells = val;
1879 
1880 	ret = of_property_read_u32(np, "#size-cells", &val);
1881 	if (ret) {
1882 		dev_err(dev, "missing #size-cells property\n");
1883 		return ret;
1884 	}
1885 
1886 	reg_cells += val;
1887 
1888 	for_each_child_of_node(np, nand_np) {
1889 		struct atmel_nand *nand;
1890 
1891 		nand = atmel_nand_create(nc, nand_np, reg_cells);
1892 		if (IS_ERR(nand)) {
1893 			ret = PTR_ERR(nand);
1894 			goto err;
1895 		}
1896 
1897 		ret = atmel_nand_controller_add_nand(nc, nand);
1898 		if (ret)
1899 			goto err;
1900 	}
1901 
1902 	return 0;
1903 
1904 err:
1905 	atmel_nand_controller_remove_nands(nc);
1906 
1907 	return ret;
1908 }
1909 
atmel_nand_controller_cleanup(struct atmel_nand_controller * nc)1910 static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1911 {
1912 	if (nc->dmac)
1913 		dma_release_channel(nc->dmac);
1914 
1915 	clk_put(nc->mck);
1916 }
1917 
1918 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
1919 	.offs = AT91SAM9260_MATRIX_EBICSA,
1920 };
1921 
1922 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
1923 	.offs = AT91SAM9261_MATRIX_EBICSA,
1924 };
1925 
1926 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
1927 	.offs = AT91SAM9263_MATRIX_EBI0CSA,
1928 };
1929 
1930 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
1931 	.offs = AT91SAM9RL_MATRIX_EBICSA,
1932 };
1933 
1934 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
1935 	.offs = AT91SAM9G45_MATRIX_EBICSA,
1936 };
1937 
1938 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
1939 	.offs = AT91SAM9N12_MATRIX_EBICSA,
1940 };
1941 
1942 static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
1943 	.offs = AT91SAM9X5_MATRIX_EBICSA,
1944 };
1945 
1946 static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
1947 	.offs = AT91_SFR_CCFG_EBICSA,
1948 	.nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
1949 };
1950 
1951 static const struct of_device_id __maybe_unused atmel_ebi_csa_regmap_of_ids[] = {
1952 	{
1953 		.compatible = "atmel,at91sam9260-matrix",
1954 		.data = &at91sam9260_ebi_csa,
1955 	},
1956 	{
1957 		.compatible = "atmel,at91sam9261-matrix",
1958 		.data = &at91sam9261_ebi_csa,
1959 	},
1960 	{
1961 		.compatible = "atmel,at91sam9263-matrix",
1962 		.data = &at91sam9263_ebi_csa,
1963 	},
1964 	{
1965 		.compatible = "atmel,at91sam9rl-matrix",
1966 		.data = &at91sam9rl_ebi_csa,
1967 	},
1968 	{
1969 		.compatible = "atmel,at91sam9g45-matrix",
1970 		.data = &at91sam9g45_ebi_csa,
1971 	},
1972 	{
1973 		.compatible = "atmel,at91sam9n12-matrix",
1974 		.data = &at91sam9n12_ebi_csa,
1975 	},
1976 	{
1977 		.compatible = "atmel,at91sam9x5-matrix",
1978 		.data = &at91sam9x5_ebi_csa,
1979 	},
1980 	{
1981 		.compatible = "microchip,sam9x60-sfr",
1982 		.data = &sam9x60_ebi_csa,
1983 	},
1984 	{ /* sentinel */ },
1985 };
1986 
atmel_nand_attach_chip(struct nand_chip * chip)1987 static int atmel_nand_attach_chip(struct nand_chip *chip)
1988 {
1989 	struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
1990 	struct atmel_nand *nand = to_atmel_nand(chip);
1991 	struct mtd_info *mtd = nand_to_mtd(chip);
1992 	int ret;
1993 
1994 	ret = nc->caps->ops->ecc_init(chip);
1995 	if (ret)
1996 		return ret;
1997 
1998 	if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
1999 		/*
2000 		 * We keep the MTD name unchanged to avoid breaking platforms
2001 		 * where the MTD cmdline parser is used and the bootloader
2002 		 * has not been updated to use the new naming scheme.
2003 		 */
2004 		mtd->name = "atmel_nand";
2005 	} else if (!mtd->name) {
2006 		/*
2007 		 * If the new bindings are used and the bootloader has not been
2008 		 * updated to pass a new mtdparts parameter on the cmdline, you
2009 		 * should define the following property in your nand node:
2010 		 *
2011 		 *	label = "atmel_nand";
2012 		 *
2013 		 * This way, mtd->name will be set by the core when
2014 		 * nand_set_flash_node() is called.
2015 		 */
2016 		mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
2017 					   "%s:nand.%d", dev_name(nc->dev),
2018 					   nand->cs[0].id);
2019 		if (!mtd->name) {
2020 			dev_err(nc->dev, "Failed to allocate mtd->name\n");
2021 			return -ENOMEM;
2022 		}
2023 	}
2024 
2025 	return 0;
2026 }
2027 
2028 static const struct nand_controller_ops atmel_nand_controller_ops = {
2029 	.attach_chip = atmel_nand_attach_chip,
2030 	.setup_interface = atmel_nand_setup_interface,
2031 	.exec_op = atmel_nand_exec_op,
2032 };
2033 
atmel_nand_controller_init(struct atmel_nand_controller * nc,struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2034 static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
2035 				struct platform_device *pdev,
2036 				const struct atmel_nand_controller_caps *caps)
2037 {
2038 	struct device *dev = &pdev->dev;
2039 	struct device_node *np = dev->of_node;
2040 	int ret;
2041 
2042 	nand_controller_init(&nc->base);
2043 	nc->base.ops = &atmel_nand_controller_ops;
2044 	INIT_LIST_HEAD(&nc->chips);
2045 	nc->dev = dev;
2046 	nc->caps = caps;
2047 
2048 	platform_set_drvdata(pdev, nc);
2049 
2050 	nc->pmecc = devm_atmel_pmecc_get(dev);
2051 	if (IS_ERR(nc->pmecc))
2052 		return dev_err_probe(dev, PTR_ERR(nc->pmecc),
2053 				     "Could not get PMECC object\n");
2054 
2055 	if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
2056 		dma_cap_mask_t mask;
2057 
2058 		dma_cap_zero(mask);
2059 		dma_cap_set(DMA_MEMCPY, mask);
2060 
2061 		nc->dmac = dma_request_channel(mask, NULL, NULL);
2062 		if (nc->dmac)
2063 			dev_info(nc->dev, "using %s for DMA transfers\n",
2064 				 dma_chan_name(nc->dmac));
2065 		else
2066 			dev_err(nc->dev, "Failed to request DMA channel\n");
2067 	}
2068 
2069 	/* We do not retrieve the SMC syscon when parsing old DTs. */
2070 	if (nc->caps->legacy_of_bindings)
2071 		return 0;
2072 
2073 	nc->mck = of_clk_get(dev->parent->of_node, 0);
2074 	if (IS_ERR(nc->mck)) {
2075 		dev_err(dev, "Failed to retrieve MCK clk\n");
2076 		ret = PTR_ERR(nc->mck);
2077 		goto out_release_dma;
2078 	}
2079 
2080 	np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2081 	if (!np) {
2082 		dev_err(dev, "Missing or invalid atmel,smc property\n");
2083 		ret = -EINVAL;
2084 		goto out_release_dma;
2085 	}
2086 
2087 	nc->smc = syscon_node_to_regmap(np);
2088 	of_node_put(np);
2089 	if (IS_ERR(nc->smc)) {
2090 		ret = PTR_ERR(nc->smc);
2091 		dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
2092 		goto out_release_dma;
2093 	}
2094 
2095 	return 0;
2096 
2097 out_release_dma:
2098 	if (nc->dmac)
2099 		dma_release_channel(nc->dmac);
2100 
2101 	return ret;
2102 }
2103 
2104 static int
atmel_smc_nand_controller_init(struct atmel_smc_nand_controller * nc)2105 atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
2106 {
2107 	struct device *dev = nc->base.dev;
2108 	const struct of_device_id *match;
2109 	struct device_node *np;
2110 	int ret;
2111 
2112 	/* We do not retrieve the EBICSA regmap when parsing old DTs. */
2113 	if (nc->base.caps->legacy_of_bindings)
2114 		return 0;
2115 
2116 	np = of_parse_phandle(dev->parent->of_node,
2117 			      nc->base.caps->ebi_csa_regmap_name, 0);
2118 	if (!np)
2119 		return 0;
2120 
2121 	match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
2122 	if (!match) {
2123 		of_node_put(np);
2124 		return 0;
2125 	}
2126 
2127 	nc->ebi_csa_regmap = syscon_node_to_regmap(np);
2128 	of_node_put(np);
2129 	if (IS_ERR(nc->ebi_csa_regmap)) {
2130 		ret = PTR_ERR(nc->ebi_csa_regmap);
2131 		dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
2132 		return ret;
2133 	}
2134 
2135 	nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
2136 
2137 	/*
2138 	 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
2139 	 * add 4 to ->ebi_csa->offs.
2140 	 */
2141 	if (of_device_is_compatible(dev->parent->of_node,
2142 				    "atmel,at91sam9263-ebi1"))
2143 		nc->ebi_csa->offs += 4;
2144 
2145 	return 0;
2146 }
2147 
2148 static int
atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller * nc)2149 atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2150 {
2151 	struct regmap_config regmap_conf = {
2152 		.reg_bits = 32,
2153 		.val_bits = 32,
2154 		.reg_stride = 4,
2155 	};
2156 
2157 	struct device *dev = nc->base.dev;
2158 	struct device_node *nand_np, *nfc_np;
2159 	void __iomem *iomem;
2160 	struct resource res;
2161 	int ret;
2162 
2163 	nand_np = dev->of_node;
2164 	nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
2165 	if (!nfc_np) {
2166 		dev_err(dev, "Could not find device node for sama5d3-nfc\n");
2167 		return -ENODEV;
2168 	}
2169 
2170 	nc->clk = of_clk_get(nfc_np, 0);
2171 	if (IS_ERR(nc->clk)) {
2172 		ret = PTR_ERR(nc->clk);
2173 		dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
2174 			ret);
2175 		goto out;
2176 	}
2177 
2178 	ret = clk_prepare_enable(nc->clk);
2179 	if (ret) {
2180 		dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
2181 			ret);
2182 		goto out;
2183 	}
2184 
2185 	nc->irq = of_irq_get(nand_np, 0);
2186 	if (nc->irq <= 0) {
2187 		ret = nc->irq ?: -ENXIO;
2188 		if (ret != -EPROBE_DEFER)
2189 			dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2190 				ret);
2191 		goto out;
2192 	}
2193 
2194 	ret = of_address_to_resource(nfc_np, 0, &res);
2195 	if (ret) {
2196 		dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
2197 			ret);
2198 		goto out;
2199 	}
2200 
2201 	iomem = devm_ioremap_resource(dev, &res);
2202 	if (IS_ERR(iomem)) {
2203 		ret = PTR_ERR(iomem);
2204 		goto out;
2205 	}
2206 
2207 	regmap_conf.name = "nfc-io";
2208 	regmap_conf.max_register = resource_size(&res) - 4;
2209 	nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2210 	if (IS_ERR(nc->io)) {
2211 		ret = PTR_ERR(nc->io);
2212 		dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2213 			ret);
2214 		goto out;
2215 	}
2216 
2217 	ret = of_address_to_resource(nfc_np, 1, &res);
2218 	if (ret) {
2219 		dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
2220 			ret);
2221 		goto out;
2222 	}
2223 
2224 	iomem = devm_ioremap_resource(dev, &res);
2225 	if (IS_ERR(iomem)) {
2226 		ret = PTR_ERR(iomem);
2227 		goto out;
2228 	}
2229 
2230 	regmap_conf.name = "smc";
2231 	regmap_conf.max_register = resource_size(&res) - 4;
2232 	nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2233 	if (IS_ERR(nc->base.smc)) {
2234 		ret = PTR_ERR(nc->base.smc);
2235 		dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2236 			ret);
2237 		goto out;
2238 	}
2239 
2240 	ret = of_address_to_resource(nfc_np, 2, &res);
2241 	if (ret) {
2242 		dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
2243 			ret);
2244 		goto out;
2245 	}
2246 
2247 	nc->sram.virt = devm_ioremap_resource(dev, &res);
2248 	if (IS_ERR(nc->sram.virt)) {
2249 		ret = PTR_ERR(nc->sram.virt);
2250 		goto out;
2251 	}
2252 
2253 	nc->sram.dma = res.start;
2254 
2255 out:
2256 	of_node_put(nfc_np);
2257 
2258 	return ret;
2259 }
2260 
2261 static int
atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller * nc)2262 atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
2263 {
2264 	struct device *dev = nc->base.dev;
2265 	struct device_node *np;
2266 	int ret;
2267 
2268 	np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2269 	if (!np) {
2270 		dev_err(dev, "Missing or invalid atmel,smc property\n");
2271 		return -EINVAL;
2272 	}
2273 
2274 	nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
2275 
2276 	nc->irq = of_irq_get(np, 0);
2277 	of_node_put(np);
2278 	if (nc->irq <= 0) {
2279 		ret = nc->irq ?: -ENXIO;
2280 		if (ret != -EPROBE_DEFER)
2281 			dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2282 				ret);
2283 		return ret;
2284 	}
2285 
2286 	np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
2287 	if (!np) {
2288 		dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
2289 		return -EINVAL;
2290 	}
2291 
2292 	nc->io = syscon_node_to_regmap(np);
2293 	of_node_put(np);
2294 	if (IS_ERR(nc->io)) {
2295 		ret = PTR_ERR(nc->io);
2296 		dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
2297 		return ret;
2298 	}
2299 
2300 	nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
2301 					 "atmel,nfc-sram", 0);
2302 	if (!nc->sram.pool) {
2303 		dev_err(nc->base.dev, "Missing SRAM\n");
2304 		return -ENOMEM;
2305 	}
2306 
2307 	nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
2308 							   ATMEL_NFC_SRAM_SIZE,
2309 							   &nc->sram.dma);
2310 	if (!nc->sram.virt) {
2311 		dev_err(nc->base.dev,
2312 			"Could not allocate memory from the NFC SRAM pool\n");
2313 		return -ENOMEM;
2314 	}
2315 
2316 	return 0;
2317 }
2318 
2319 static int
atmel_hsmc_nand_controller_remove(struct atmel_nand_controller * nc)2320 atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
2321 {
2322 	struct atmel_hsmc_nand_controller *hsmc_nc;
2323 	int ret;
2324 
2325 	ret = atmel_nand_controller_remove_nands(nc);
2326 	if (ret)
2327 		return ret;
2328 
2329 	hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
2330 	regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
2331 		     ATMEL_HSMC_NFC_CTRL_DIS);
2332 
2333 	if (hsmc_nc->sram.pool)
2334 		gen_pool_free(hsmc_nc->sram.pool,
2335 			      (unsigned long)hsmc_nc->sram.virt,
2336 			      ATMEL_NFC_SRAM_SIZE);
2337 
2338 	if (hsmc_nc->clk) {
2339 		clk_disable_unprepare(hsmc_nc->clk);
2340 		clk_put(hsmc_nc->clk);
2341 	}
2342 
2343 	atmel_nand_controller_cleanup(nc);
2344 
2345 	return 0;
2346 }
2347 
atmel_hsmc_nand_controller_probe(struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2348 static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
2349 				const struct atmel_nand_controller_caps *caps)
2350 {
2351 	struct device *dev = &pdev->dev;
2352 	struct atmel_hsmc_nand_controller *nc;
2353 	int ret;
2354 
2355 	nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2356 	if (!nc)
2357 		return -ENOMEM;
2358 
2359 	ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2360 	if (ret)
2361 		return ret;
2362 
2363 	if (caps->legacy_of_bindings)
2364 		ret = atmel_hsmc_nand_controller_legacy_init(nc);
2365 	else
2366 		ret = atmel_hsmc_nand_controller_init(nc);
2367 
2368 	if (ret)
2369 		return ret;
2370 
2371 	/* Make sure all irqs are masked before registering our IRQ handler. */
2372 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
2373 	ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
2374 			       IRQF_SHARED, "nfc", nc);
2375 	if (ret) {
2376 		dev_err(dev,
2377 			"Could not get register NFC interrupt handler (err = %d)\n",
2378 			ret);
2379 		goto err;
2380 	}
2381 
2382 	/* Initial NFC configuration. */
2383 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
2384 		     ATMEL_HSMC_NFC_CFG_DTO_MAX);
2385 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
2386 		     ATMEL_HSMC_NFC_CTRL_EN);
2387 
2388 	ret = atmel_nand_controller_add_nands(&nc->base);
2389 	if (ret)
2390 		goto err;
2391 
2392 	return 0;
2393 
2394 err:
2395 	atmel_hsmc_nand_controller_remove(&nc->base);
2396 
2397 	return ret;
2398 }
2399 
2400 static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
2401 	.probe = atmel_hsmc_nand_controller_probe,
2402 	.remove = atmel_hsmc_nand_controller_remove,
2403 	.ecc_init = atmel_hsmc_nand_ecc_init,
2404 	.nand_init = atmel_nand_init,
2405 	.setup_interface = atmel_hsmc_nand_setup_interface,
2406 	.exec_op = atmel_hsmc_nand_exec_op,
2407 };
2408 
2409 static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
2410 	.has_dma = true,
2411 	.ale_offs = BIT(21),
2412 	.cle_offs = BIT(22),
2413 	.ops = &atmel_hsmc_nc_ops,
2414 };
2415 
2416 /* Only used to parse old bindings. */
2417 static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
2418 	.has_dma = true,
2419 	.ale_offs = BIT(21),
2420 	.cle_offs = BIT(22),
2421 	.ops = &atmel_hsmc_nc_ops,
2422 	.legacy_of_bindings = true,
2423 };
2424 
atmel_smc_nand_controller_probe(struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2425 static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2426 				const struct atmel_nand_controller_caps *caps)
2427 {
2428 	struct device *dev = &pdev->dev;
2429 	struct atmel_smc_nand_controller *nc;
2430 	int ret;
2431 
2432 	nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2433 	if (!nc)
2434 		return -ENOMEM;
2435 
2436 	ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2437 	if (ret)
2438 		return ret;
2439 
2440 	ret = atmel_smc_nand_controller_init(nc);
2441 	if (ret)
2442 		return ret;
2443 
2444 	return atmel_nand_controller_add_nands(&nc->base);
2445 }
2446 
2447 static int
atmel_smc_nand_controller_remove(struct atmel_nand_controller * nc)2448 atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2449 {
2450 	int ret;
2451 
2452 	ret = atmel_nand_controller_remove_nands(nc);
2453 	if (ret)
2454 		return ret;
2455 
2456 	atmel_nand_controller_cleanup(nc);
2457 
2458 	return 0;
2459 }
2460 
2461 /*
2462  * The SMC reg layout of at91rm9200 is completely different which prevents us
2463  * from re-using atmel_smc_nand_setup_interface() for the
2464  * ->setup_interface() hook.
2465  * At this point, there's no support for the at91rm9200 SMC IP, so we leave
2466  * ->setup_interface() unassigned.
2467  */
2468 static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
2469 	.probe = atmel_smc_nand_controller_probe,
2470 	.remove = atmel_smc_nand_controller_remove,
2471 	.ecc_init = atmel_nand_ecc_init,
2472 	.nand_init = atmel_smc_nand_init,
2473 	.exec_op = atmel_smc_nand_exec_op,
2474 };
2475 
2476 static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2477 	.ale_offs = BIT(21),
2478 	.cle_offs = BIT(22),
2479 	.ebi_csa_regmap_name = "atmel,matrix",
2480 	.ops = &at91rm9200_nc_ops,
2481 };
2482 
2483 static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2484 	.probe = atmel_smc_nand_controller_probe,
2485 	.remove = atmel_smc_nand_controller_remove,
2486 	.ecc_init = atmel_nand_ecc_init,
2487 	.nand_init = atmel_smc_nand_init,
2488 	.setup_interface = atmel_smc_nand_setup_interface,
2489 	.exec_op = atmel_smc_nand_exec_op,
2490 };
2491 
2492 static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
2493 	.ale_offs = BIT(21),
2494 	.cle_offs = BIT(22),
2495 	.ebi_csa_regmap_name = "atmel,matrix",
2496 	.ops = &atmel_smc_nc_ops,
2497 };
2498 
2499 static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2500 	.ale_offs = BIT(22),
2501 	.cle_offs = BIT(21),
2502 	.ebi_csa_regmap_name = "atmel,matrix",
2503 	.ops = &atmel_smc_nc_ops,
2504 };
2505 
2506 static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2507 	.has_dma = true,
2508 	.ale_offs = BIT(21),
2509 	.cle_offs = BIT(22),
2510 	.ebi_csa_regmap_name = "atmel,matrix",
2511 	.ops = &atmel_smc_nc_ops,
2512 };
2513 
2514 static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
2515 	.has_dma = true,
2516 	.ale_offs = BIT(21),
2517 	.cle_offs = BIT(22),
2518 	.ebi_csa_regmap_name = "microchip,sfr",
2519 	.ops = &atmel_smc_nc_ops,
2520 };
2521 
2522 /* Only used to parse old bindings. */
2523 static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2524 	.ale_offs = BIT(21),
2525 	.cle_offs = BIT(22),
2526 	.ops = &atmel_smc_nc_ops,
2527 	.legacy_of_bindings = true,
2528 };
2529 
2530 static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2531 	.ale_offs = BIT(22),
2532 	.cle_offs = BIT(21),
2533 	.ops = &atmel_smc_nc_ops,
2534 	.legacy_of_bindings = true,
2535 };
2536 
2537 static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2538 	.has_dma = true,
2539 	.ale_offs = BIT(21),
2540 	.cle_offs = BIT(22),
2541 	.ops = &atmel_smc_nc_ops,
2542 	.legacy_of_bindings = true,
2543 };
2544 
2545 static const struct of_device_id atmel_nand_controller_of_ids[] = {
2546 	{
2547 		.compatible = "atmel,at91rm9200-nand-controller",
2548 		.data = &atmel_rm9200_nc_caps,
2549 	},
2550 	{
2551 		.compatible = "atmel,at91sam9260-nand-controller",
2552 		.data = &atmel_sam9260_nc_caps,
2553 	},
2554 	{
2555 		.compatible = "atmel,at91sam9261-nand-controller",
2556 		.data = &atmel_sam9261_nc_caps,
2557 	},
2558 	{
2559 		.compatible = "atmel,at91sam9g45-nand-controller",
2560 		.data = &atmel_sam9g45_nc_caps,
2561 	},
2562 	{
2563 		.compatible = "atmel,sama5d3-nand-controller",
2564 		.data = &atmel_sama5_nc_caps,
2565 	},
2566 	{
2567 		.compatible = "microchip,sam9x60-nand-controller",
2568 		.data = &microchip_sam9x60_nc_caps,
2569 	},
2570 	/* Support for old/deprecated bindings: */
2571 	{
2572 		.compatible = "atmel,at91rm9200-nand",
2573 		.data = &atmel_rm9200_nand_caps,
2574 	},
2575 	{
2576 		.compatible = "atmel,sama5d4-nand",
2577 		.data = &atmel_rm9200_nand_caps,
2578 	},
2579 	{
2580 		.compatible = "atmel,sama5d2-nand",
2581 		.data = &atmel_rm9200_nand_caps,
2582 	},
2583 	{ /* sentinel */ },
2584 };
2585 MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2586 
atmel_nand_controller_probe(struct platform_device * pdev)2587 static int atmel_nand_controller_probe(struct platform_device *pdev)
2588 {
2589 	const struct atmel_nand_controller_caps *caps;
2590 
2591 	if (pdev->id_entry)
2592 		caps = (void *)pdev->id_entry->driver_data;
2593 	else
2594 		caps = of_device_get_match_data(&pdev->dev);
2595 
2596 	if (!caps) {
2597 		dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2598 		return -EINVAL;
2599 	}
2600 
2601 	if (caps->legacy_of_bindings) {
2602 		struct device_node *nfc_node;
2603 		u32 ale_offs = 21;
2604 
2605 		/*
2606 		 * If we are parsing legacy DT props and the DT contains a
2607 		 * valid NFC node, forward the request to the sama5 logic.
2608 		 */
2609 		nfc_node = of_get_compatible_child(pdev->dev.of_node,
2610 						   "atmel,sama5d3-nfc");
2611 		if (nfc_node) {
2612 			caps = &atmel_sama5_nand_caps;
2613 			of_node_put(nfc_node);
2614 		}
2615 
2616 		/*
2617 		 * Even if the compatible says we are dealing with an
2618 		 * at91rm9200 controller, the atmel,nand-has-dma specify that
2619 		 * this controller supports DMA, which means we are in fact
2620 		 * dealing with an at91sam9g45+ controller.
2621 		 */
2622 		if (!caps->has_dma &&
2623 		    of_property_read_bool(pdev->dev.of_node,
2624 					  "atmel,nand-has-dma"))
2625 			caps = &atmel_sam9g45_nand_caps;
2626 
2627 		/*
2628 		 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2629 		 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2630 		 * actually dealing with an at91sam9261 controller.
2631 		 */
2632 		of_property_read_u32(pdev->dev.of_node,
2633 				     "atmel,nand-addr-offset", &ale_offs);
2634 		if (ale_offs != 21)
2635 			caps = &atmel_sam9261_nand_caps;
2636 	}
2637 
2638 	return caps->ops->probe(pdev, caps);
2639 }
2640 
atmel_nand_controller_remove(struct platform_device * pdev)2641 static void atmel_nand_controller_remove(struct platform_device *pdev)
2642 {
2643 	struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2644 
2645 	WARN_ON(nc->caps->ops->remove(nc));
2646 }
2647 
atmel_nand_controller_resume(struct device * dev)2648 static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
2649 {
2650 	struct atmel_nand_controller *nc = dev_get_drvdata(dev);
2651 	struct atmel_nand *nand;
2652 
2653 	if (nc->pmecc)
2654 		atmel_pmecc_reset(nc->pmecc);
2655 
2656 	list_for_each_entry(nand, &nc->chips, node) {
2657 		int i;
2658 
2659 		for (i = 0; i < nand->numcs; i++)
2660 			nand_reset(&nand->base, i);
2661 	}
2662 
2663 	return 0;
2664 }
2665 
2666 static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
2667 			 atmel_nand_controller_resume);
2668 
2669 static struct platform_driver atmel_nand_controller_driver = {
2670 	.driver = {
2671 		.name = "atmel-nand-controller",
2672 		.of_match_table = atmel_nand_controller_of_ids,
2673 		.pm = &atmel_nand_controller_pm_ops,
2674 	},
2675 	.probe = atmel_nand_controller_probe,
2676 	.remove = atmel_nand_controller_remove,
2677 };
2678 module_platform_driver(atmel_nand_controller_driver);
2679 
2680 MODULE_LICENSE("GPL");
2681 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2682 MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2683 MODULE_ALIAS("platform:atmel-nand-controller");
2684