xref: /linux/drivers/mtd/nand/spi/core.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 
spinand_read_reg_op(struct spinand_device * spinand,u8 reg,u8 * val)23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
24 {
25 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
26 						      spinand->scratchbuf);
27 	int ret;
28 
29 	ret = spi_mem_exec_op(spinand->spimem, &op);
30 	if (ret)
31 		return ret;
32 
33 	*val = *spinand->scratchbuf;
34 	return 0;
35 }
36 
spinand_write_reg_op(struct spinand_device * spinand,u8 reg,u8 val)37 int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
38 {
39 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
40 						      spinand->scratchbuf);
41 
42 	*spinand->scratchbuf = val;
43 	return spi_mem_exec_op(spinand->spimem, &op);
44 }
45 
spinand_read_status(struct spinand_device * spinand,u8 * status)46 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
47 {
48 	return spinand_read_reg_op(spinand, REG_STATUS, status);
49 }
50 
spinand_get_cfg(struct spinand_device * spinand,u8 * cfg)51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
52 {
53 	struct nand_device *nand = spinand_to_nand(spinand);
54 
55 	if (WARN_ON(spinand->cur_target < 0 ||
56 		    spinand->cur_target >= nand->memorg.ntargets))
57 		return -EINVAL;
58 
59 	*cfg = spinand->cfg_cache[spinand->cur_target];
60 	return 0;
61 }
62 
spinand_set_cfg(struct spinand_device * spinand,u8 cfg)63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
64 {
65 	struct nand_device *nand = spinand_to_nand(spinand);
66 	int ret;
67 
68 	if (WARN_ON(spinand->cur_target < 0 ||
69 		    spinand->cur_target >= nand->memorg.ntargets))
70 		return -EINVAL;
71 
72 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
73 		return 0;
74 
75 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
76 	if (ret)
77 		return ret;
78 
79 	spinand->cfg_cache[spinand->cur_target] = cfg;
80 	return 0;
81 }
82 
83 /**
84  * spinand_upd_cfg() - Update the configuration register
85  * @spinand: the spinand device
86  * @mask: the mask encoding the bits to update in the config reg
87  * @val: the new value to apply
88  *
89  * Update the configuration register.
90  *
91  * Return: 0 on success, a negative error code otherwise.
92  */
spinand_upd_cfg(struct spinand_device * spinand,u8 mask,u8 val)93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
94 {
95 	int ret;
96 	u8 cfg;
97 
98 	ret = spinand_get_cfg(spinand, &cfg);
99 	if (ret)
100 		return ret;
101 
102 	cfg &= ~mask;
103 	cfg |= val;
104 
105 	return spinand_set_cfg(spinand, cfg);
106 }
107 
108 /**
109  * spinand_select_target() - Select a specific NAND target/die
110  * @spinand: the spinand device
111  * @target: the target/die to select
112  *
113  * Select a new target/die. If chip only has one die, this function is a NOOP.
114  *
115  * Return: 0 on success, a negative error code otherwise.
116  */
spinand_select_target(struct spinand_device * spinand,unsigned int target)117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
118 {
119 	struct nand_device *nand = spinand_to_nand(spinand);
120 	int ret;
121 
122 	if (WARN_ON(target >= nand->memorg.ntargets))
123 		return -EINVAL;
124 
125 	if (spinand->cur_target == target)
126 		return 0;
127 
128 	if (nand->memorg.ntargets == 1) {
129 		spinand->cur_target = target;
130 		return 0;
131 	}
132 
133 	ret = spinand->select_target(spinand, target);
134 	if (ret)
135 		return ret;
136 
137 	spinand->cur_target = target;
138 	return 0;
139 }
140 
spinand_read_cfg(struct spinand_device * spinand)141 static int spinand_read_cfg(struct spinand_device *spinand)
142 {
143 	struct nand_device *nand = spinand_to_nand(spinand);
144 	unsigned int target;
145 	int ret;
146 
147 	for (target = 0; target < nand->memorg.ntargets; target++) {
148 		ret = spinand_select_target(spinand, target);
149 		if (ret)
150 			return ret;
151 
152 		/*
153 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
154 		 * here to bypass the config cache.
155 		 */
156 		ret = spinand_read_reg_op(spinand, REG_CFG,
157 					  &spinand->cfg_cache[target]);
158 		if (ret)
159 			return ret;
160 	}
161 
162 	return 0;
163 }
164 
spinand_init_cfg_cache(struct spinand_device * spinand)165 static int spinand_init_cfg_cache(struct spinand_device *spinand)
166 {
167 	struct nand_device *nand = spinand_to_nand(spinand);
168 	struct device *dev = &spinand->spimem->spi->dev;
169 
170 	spinand->cfg_cache = devm_kcalloc(dev,
171 					  nand->memorg.ntargets,
172 					  sizeof(*spinand->cfg_cache),
173 					  GFP_KERNEL);
174 	if (!spinand->cfg_cache)
175 		return -ENOMEM;
176 
177 	return 0;
178 }
179 
spinand_init_quad_enable(struct spinand_device * spinand)180 static int spinand_init_quad_enable(struct spinand_device *spinand)
181 {
182 	bool enable = false;
183 
184 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
185 		return 0;
186 
187 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
188 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
189 	    spinand->op_templates.update_cache->data.buswidth == 4)
190 		enable = true;
191 
192 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
193 			       enable ? CFG_QUAD_ENABLE : 0);
194 }
195 
spinand_ecc_enable(struct spinand_device * spinand,bool enable)196 static int spinand_ecc_enable(struct spinand_device *spinand,
197 			      bool enable)
198 {
199 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
200 			       enable ? CFG_ECC_ENABLE : 0);
201 }
202 
spinand_cont_read_enable(struct spinand_device * spinand,bool enable)203 static int spinand_cont_read_enable(struct spinand_device *spinand,
204 				    bool enable)
205 {
206 	return spinand->set_cont_read(spinand, enable);
207 }
208 
spinand_check_ecc_status(struct spinand_device * spinand,u8 status)209 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
210 {
211 	struct nand_device *nand = spinand_to_nand(spinand);
212 
213 	if (spinand->eccinfo.get_status)
214 		return spinand->eccinfo.get_status(spinand, status);
215 
216 	switch (status & STATUS_ECC_MASK) {
217 	case STATUS_ECC_NO_BITFLIPS:
218 		return 0;
219 
220 	case STATUS_ECC_HAS_BITFLIPS:
221 		/*
222 		 * We have no way to know exactly how many bitflips have been
223 		 * fixed, so let's return the maximum possible value so that
224 		 * wear-leveling layers move the data immediately.
225 		 */
226 		return nanddev_get_ecc_conf(nand)->strength;
227 
228 	case STATUS_ECC_UNCOR_ERROR:
229 		return -EBADMSG;
230 
231 	default:
232 		break;
233 	}
234 
235 	return -EINVAL;
236 }
237 
spinand_noecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)238 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
239 				       struct mtd_oob_region *region)
240 {
241 	return -ERANGE;
242 }
243 
spinand_noecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)244 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
245 					struct mtd_oob_region *region)
246 {
247 	if (section)
248 		return -ERANGE;
249 
250 	/* Reserve 2 bytes for the BBM. */
251 	region->offset = 2;
252 	region->length = 62;
253 
254 	return 0;
255 }
256 
257 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
258 	.ecc = spinand_noecc_ooblayout_ecc,
259 	.free = spinand_noecc_ooblayout_free,
260 };
261 
spinand_ondie_ecc_init_ctx(struct nand_device * nand)262 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
263 {
264 	struct spinand_device *spinand = nand_to_spinand(nand);
265 	struct mtd_info *mtd = nanddev_to_mtd(nand);
266 	struct spinand_ondie_ecc_conf *engine_conf;
267 
268 	nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
269 	nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
270 	nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
271 
272 	engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
273 	if (!engine_conf)
274 		return -ENOMEM;
275 
276 	nand->ecc.ctx.priv = engine_conf;
277 
278 	if (spinand->eccinfo.ooblayout)
279 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
280 	else
281 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
282 
283 	return 0;
284 }
285 
spinand_ondie_ecc_cleanup_ctx(struct nand_device * nand)286 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
287 {
288 	kfree(nand->ecc.ctx.priv);
289 }
290 
spinand_ondie_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)291 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
292 					    struct nand_page_io_req *req)
293 {
294 	struct spinand_device *spinand = nand_to_spinand(nand);
295 	bool enable = (req->mode != MTD_OPS_RAW);
296 
297 	memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
298 
299 	/* Only enable or disable the engine */
300 	return spinand_ecc_enable(spinand, enable);
301 }
302 
spinand_ondie_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)303 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
304 					   struct nand_page_io_req *req)
305 {
306 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
307 	struct spinand_device *spinand = nand_to_spinand(nand);
308 	struct mtd_info *mtd = spinand_to_mtd(spinand);
309 	int ret;
310 
311 	if (req->mode == MTD_OPS_RAW)
312 		return 0;
313 
314 	/* Nothing to do when finishing a page write */
315 	if (req->type == NAND_PAGE_WRITE)
316 		return 0;
317 
318 	/* Finish a page read: check the status, report errors/bitflips */
319 	ret = spinand_check_ecc_status(spinand, engine_conf->status);
320 	if (ret == -EBADMSG) {
321 		mtd->ecc_stats.failed++;
322 	} else if (ret > 0) {
323 		unsigned int pages;
324 
325 		/*
326 		 * Continuous reads don't allow us to get the detail,
327 		 * so we may exagerate the actual number of corrected bitflips.
328 		 */
329 		if (!req->continuous)
330 			pages = 1;
331 		else
332 			pages = req->datalen / nanddev_page_size(nand);
333 
334 		mtd->ecc_stats.corrected += ret * pages;
335 	}
336 
337 	return ret;
338 }
339 
340 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
341 	.init_ctx = spinand_ondie_ecc_init_ctx,
342 	.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
343 	.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
344 	.finish_io_req = spinand_ondie_ecc_finish_io_req,
345 };
346 
347 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
348 	.ops = &spinand_ondie_ecc_engine_ops,
349 };
350 
spinand_ondie_ecc_save_status(struct nand_device * nand,u8 status)351 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
352 {
353 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
354 
355 	if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
356 	    engine_conf)
357 		engine_conf->status = status;
358 }
359 
spinand_write_enable_op(struct spinand_device * spinand)360 static int spinand_write_enable_op(struct spinand_device *spinand)
361 {
362 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
363 
364 	return spi_mem_exec_op(spinand->spimem, &op);
365 }
366 
spinand_load_page_op(struct spinand_device * spinand,const struct nand_page_io_req * req)367 static int spinand_load_page_op(struct spinand_device *spinand,
368 				const struct nand_page_io_req *req)
369 {
370 	struct nand_device *nand = spinand_to_nand(spinand);
371 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
372 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
373 
374 	return spi_mem_exec_op(spinand->spimem, &op);
375 }
376 
spinand_read_from_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)377 static int spinand_read_from_cache_op(struct spinand_device *spinand,
378 				      const struct nand_page_io_req *req)
379 {
380 	struct nand_device *nand = spinand_to_nand(spinand);
381 	struct mtd_info *mtd = spinand_to_mtd(spinand);
382 	struct spi_mem_dirmap_desc *rdesc;
383 	unsigned int nbytes = 0;
384 	void *buf = NULL;
385 	u16 column = 0;
386 	ssize_t ret;
387 
388 	if (req->datalen) {
389 		buf = spinand->databuf;
390 		if (!req->continuous)
391 			nbytes = nanddev_page_size(nand);
392 		else
393 			nbytes = round_up(req->dataoffs + req->datalen,
394 					  nanddev_page_size(nand));
395 		column = 0;
396 	}
397 
398 	if (req->ooblen) {
399 		nbytes += nanddev_per_page_oobsize(nand);
400 		if (!buf) {
401 			buf = spinand->oobbuf;
402 			column = nanddev_page_size(nand);
403 		}
404 	}
405 
406 	if (req->mode == MTD_OPS_RAW)
407 		rdesc = spinand->dirmaps[req->pos.plane].rdesc;
408 	else
409 		rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
410 
411 	if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
412 		column |= req->pos.plane << fls(nanddev_page_size(nand));
413 
414 	while (nbytes) {
415 		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
416 		if (ret < 0)
417 			return ret;
418 
419 		if (!ret || ret > nbytes)
420 			return -EIO;
421 
422 		nbytes -= ret;
423 		column += ret;
424 		buf += ret;
425 
426 		/*
427 		 * Dirmap accesses are allowed to toggle the CS.
428 		 * Toggling the CS during a continuous read is forbidden.
429 		 */
430 		if (nbytes && req->continuous)
431 			return -EIO;
432 	}
433 
434 	if (req->datalen)
435 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
436 		       req->datalen);
437 
438 	if (req->ooblen) {
439 		if (req->mode == MTD_OPS_AUTO_OOB)
440 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
441 						    spinand->oobbuf,
442 						    req->ooboffs,
443 						    req->ooblen);
444 		else
445 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
446 			       req->ooblen);
447 	}
448 
449 	return 0;
450 }
451 
spinand_write_to_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)452 static int spinand_write_to_cache_op(struct spinand_device *spinand,
453 				     const struct nand_page_io_req *req)
454 {
455 	struct nand_device *nand = spinand_to_nand(spinand);
456 	struct mtd_info *mtd = spinand_to_mtd(spinand);
457 	struct spi_mem_dirmap_desc *wdesc;
458 	unsigned int nbytes, column = 0;
459 	void *buf = spinand->databuf;
460 	ssize_t ret;
461 
462 	/*
463 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
464 	 * the cache content to 0xFF (depends on vendor implementation), so we
465 	 * must fill the page cache entirely even if we only want to program
466 	 * the data portion of the page, otherwise we might corrupt the BBM or
467 	 * user data previously programmed in OOB area.
468 	 *
469 	 * Only reset the data buffer manually, the OOB buffer is prepared by
470 	 * ECC engines ->prepare_io_req() callback.
471 	 */
472 	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
473 	memset(spinand->databuf, 0xff, nanddev_page_size(nand));
474 
475 	if (req->datalen)
476 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
477 		       req->datalen);
478 
479 	if (req->ooblen) {
480 		if (req->mode == MTD_OPS_AUTO_OOB)
481 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
482 						    spinand->oobbuf,
483 						    req->ooboffs,
484 						    req->ooblen);
485 		else
486 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
487 			       req->ooblen);
488 	}
489 
490 	if (req->mode == MTD_OPS_RAW)
491 		wdesc = spinand->dirmaps[req->pos.plane].wdesc;
492 	else
493 		wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
494 
495 	if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
496 		column |= req->pos.plane << fls(nanddev_page_size(nand));
497 
498 	while (nbytes) {
499 		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
500 		if (ret < 0)
501 			return ret;
502 
503 		if (!ret || ret > nbytes)
504 			return -EIO;
505 
506 		nbytes -= ret;
507 		column += ret;
508 		buf += ret;
509 	}
510 
511 	return 0;
512 }
513 
spinand_program_op(struct spinand_device * spinand,const struct nand_page_io_req * req)514 static int spinand_program_op(struct spinand_device *spinand,
515 			      const struct nand_page_io_req *req)
516 {
517 	struct nand_device *nand = spinand_to_nand(spinand);
518 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
519 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
520 
521 	return spi_mem_exec_op(spinand->spimem, &op);
522 }
523 
spinand_erase_op(struct spinand_device * spinand,const struct nand_pos * pos)524 static int spinand_erase_op(struct spinand_device *spinand,
525 			    const struct nand_pos *pos)
526 {
527 	struct nand_device *nand = spinand_to_nand(spinand);
528 	unsigned int row = nanddev_pos_to_row(nand, pos);
529 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
530 
531 	return spi_mem_exec_op(spinand->spimem, &op);
532 }
533 
spinand_wait(struct spinand_device * spinand,unsigned long initial_delay_us,unsigned long poll_delay_us,u8 * s)534 static int spinand_wait(struct spinand_device *spinand,
535 			unsigned long initial_delay_us,
536 			unsigned long poll_delay_us,
537 			u8 *s)
538 {
539 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
540 						      spinand->scratchbuf);
541 	u8 status;
542 	int ret;
543 
544 	ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
545 				  initial_delay_us,
546 				  poll_delay_us,
547 				  SPINAND_WAITRDY_TIMEOUT_MS);
548 	if (ret)
549 		return ret;
550 
551 	status = *spinand->scratchbuf;
552 	if (!(status & STATUS_BUSY))
553 		goto out;
554 
555 	/*
556 	 * Extra read, just in case the STATUS_READY bit has changed
557 	 * since our last check
558 	 */
559 	ret = spinand_read_status(spinand, &status);
560 	if (ret)
561 		return ret;
562 
563 out:
564 	if (s)
565 		*s = status;
566 
567 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
568 }
569 
spinand_read_id_op(struct spinand_device * spinand,u8 naddr,u8 ndummy,u8 * buf)570 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
571 			      u8 ndummy, u8 *buf)
572 {
573 	struct spi_mem_op op = SPINAND_READID_OP(
574 		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
575 	int ret;
576 
577 	ret = spi_mem_exec_op(spinand->spimem, &op);
578 	if (!ret)
579 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
580 
581 	return ret;
582 }
583 
spinand_reset_op(struct spinand_device * spinand)584 static int spinand_reset_op(struct spinand_device *spinand)
585 {
586 	struct spi_mem_op op = SPINAND_RESET_OP;
587 	int ret;
588 
589 	ret = spi_mem_exec_op(spinand->spimem, &op);
590 	if (ret)
591 		return ret;
592 
593 	return spinand_wait(spinand,
594 			    SPINAND_RESET_INITIAL_DELAY_US,
595 			    SPINAND_RESET_POLL_DELAY_US,
596 			    NULL);
597 }
598 
spinand_lock_block(struct spinand_device * spinand,u8 lock)599 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
600 {
601 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
602 }
603 
spinand_read_page(struct spinand_device * spinand,const struct nand_page_io_req * req)604 static int spinand_read_page(struct spinand_device *spinand,
605 			     const struct nand_page_io_req *req)
606 {
607 	struct nand_device *nand = spinand_to_nand(spinand);
608 	u8 status;
609 	int ret;
610 
611 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
612 	if (ret)
613 		return ret;
614 
615 	ret = spinand_load_page_op(spinand, req);
616 	if (ret)
617 		return ret;
618 
619 	ret = spinand_wait(spinand,
620 			   SPINAND_READ_INITIAL_DELAY_US,
621 			   SPINAND_READ_POLL_DELAY_US,
622 			   &status);
623 	if (ret < 0)
624 		return ret;
625 
626 	spinand_ondie_ecc_save_status(nand, status);
627 
628 	ret = spinand_read_from_cache_op(spinand, req);
629 	if (ret)
630 		return ret;
631 
632 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
633 }
634 
spinand_write_page(struct spinand_device * spinand,const struct nand_page_io_req * req)635 static int spinand_write_page(struct spinand_device *spinand,
636 			      const struct nand_page_io_req *req)
637 {
638 	struct nand_device *nand = spinand_to_nand(spinand);
639 	u8 status;
640 	int ret;
641 
642 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
643 	if (ret)
644 		return ret;
645 
646 	ret = spinand_write_enable_op(spinand);
647 	if (ret)
648 		return ret;
649 
650 	ret = spinand_write_to_cache_op(spinand, req);
651 	if (ret)
652 		return ret;
653 
654 	ret = spinand_program_op(spinand, req);
655 	if (ret)
656 		return ret;
657 
658 	ret = spinand_wait(spinand,
659 			   SPINAND_WRITE_INITIAL_DELAY_US,
660 			   SPINAND_WRITE_POLL_DELAY_US,
661 			   &status);
662 	if (!ret && (status & STATUS_PROG_FAILED))
663 		return -EIO;
664 
665 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
666 }
667 
spinand_mtd_regular_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)668 static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
669 					 struct mtd_oob_ops *ops,
670 					 unsigned int *max_bitflips)
671 {
672 	struct spinand_device *spinand = mtd_to_spinand(mtd);
673 	struct nand_device *nand = mtd_to_nanddev(mtd);
674 	struct nand_io_iter iter;
675 	bool disable_ecc = false;
676 	bool ecc_failed = false;
677 	int ret;
678 
679 	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
680 		disable_ecc = true;
681 
682 	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
683 		if (disable_ecc)
684 			iter.req.mode = MTD_OPS_RAW;
685 
686 		ret = spinand_select_target(spinand, iter.req.pos.target);
687 		if (ret)
688 			break;
689 
690 		ret = spinand_read_page(spinand, &iter.req);
691 		if (ret < 0 && ret != -EBADMSG)
692 			break;
693 
694 		if (ret == -EBADMSG)
695 			ecc_failed = true;
696 		else
697 			*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
698 
699 		ret = 0;
700 		ops->retlen += iter.req.datalen;
701 		ops->oobretlen += iter.req.ooblen;
702 	}
703 
704 	if (ecc_failed && !ret)
705 		ret = -EBADMSG;
706 
707 	return ret;
708 }
709 
spinand_mtd_continuous_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)710 static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
711 					    struct mtd_oob_ops *ops,
712 					    unsigned int *max_bitflips)
713 {
714 	struct spinand_device *spinand = mtd_to_spinand(mtd);
715 	struct nand_device *nand = mtd_to_nanddev(mtd);
716 	struct nand_io_iter iter;
717 	u8 status;
718 	int ret;
719 
720 	ret = spinand_cont_read_enable(spinand, true);
721 	if (ret)
722 		return ret;
723 
724 	/*
725 	 * The cache is divided into two halves. While one half of the cache has
726 	 * the requested data, the other half is loaded with the next chunk of data.
727 	 * Therefore, the host can read out the data continuously from page to page.
728 	 * Each data read must be a multiple of 4-bytes and full pages should be read;
729 	 * otherwise, the data output might get out of sequence from one read command
730 	 * to another.
731 	 */
732 	nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
733 		ret = spinand_select_target(spinand, iter.req.pos.target);
734 		if (ret)
735 			goto end_cont_read;
736 
737 		ret = nand_ecc_prepare_io_req(nand, &iter.req);
738 		if (ret)
739 			goto end_cont_read;
740 
741 		ret = spinand_load_page_op(spinand, &iter.req);
742 		if (ret)
743 			goto end_cont_read;
744 
745 		ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
746 				   SPINAND_READ_POLL_DELAY_US, NULL);
747 		if (ret < 0)
748 			goto end_cont_read;
749 
750 		ret = spinand_read_from_cache_op(spinand, &iter.req);
751 		if (ret)
752 			goto end_cont_read;
753 
754 		ops->retlen += iter.req.datalen;
755 
756 		ret = spinand_read_status(spinand, &status);
757 		if (ret)
758 			goto end_cont_read;
759 
760 		spinand_ondie_ecc_save_status(nand, status);
761 
762 		ret = nand_ecc_finish_io_req(nand, &iter.req);
763 		if (ret < 0)
764 			goto end_cont_read;
765 
766 		*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
767 		ret = 0;
768 	}
769 
770 end_cont_read:
771 	/*
772 	 * Once all the data has been read out, the host can either pull CS#
773 	 * high and wait for tRST or manually clear the bit in the configuration
774 	 * register to terminate the continuous read operation. We have no
775 	 * guarantee the SPI controller drivers will effectively deassert the CS
776 	 * when we expect them to, so take the register based approach.
777 	 */
778 	spinand_cont_read_enable(spinand, false);
779 
780 	return ret;
781 }
782 
spinand_cont_read_init(struct spinand_device * spinand)783 static void spinand_cont_read_init(struct spinand_device *spinand)
784 {
785 	struct nand_device *nand = spinand_to_nand(spinand);
786 	enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
787 
788 	/* OOBs cannot be retrieved so external/on-host ECC engine won't work */
789 	if (spinand->set_cont_read &&
790 	    (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
791 	     engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
792 		spinand->cont_read_possible = true;
793 	}
794 }
795 
spinand_use_cont_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)796 static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
797 				  struct mtd_oob_ops *ops)
798 {
799 	struct nand_device *nand = mtd_to_nanddev(mtd);
800 	struct spinand_device *spinand = nand_to_spinand(nand);
801 	struct nand_pos start_pos, end_pos;
802 
803 	if (!spinand->cont_read_possible)
804 		return false;
805 
806 	/* OOBs won't be retrieved */
807 	if (ops->ooblen || ops->oobbuf)
808 		return false;
809 
810 	nanddev_offs_to_pos(nand, from, &start_pos);
811 	nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
812 
813 	/*
814 	 * Continuous reads never cross LUN boundaries. Some devices don't
815 	 * support crossing planes boundaries. Some devices don't even support
816 	 * crossing blocks boundaries. The common case being to read through UBI,
817 	 * we will very rarely read two consequent blocks or more, so it is safer
818 	 * and easier (can be improved) to only enable continuous reads when
819 	 * reading within the same erase block.
820 	 */
821 	if (start_pos.target != end_pos.target ||
822 	    start_pos.plane != end_pos.plane ||
823 	    start_pos.eraseblock != end_pos.eraseblock)
824 		return false;
825 
826 	return start_pos.page < end_pos.page;
827 }
828 
spinand_mtd_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)829 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
830 			    struct mtd_oob_ops *ops)
831 {
832 	struct spinand_device *spinand = mtd_to_spinand(mtd);
833 	struct mtd_ecc_stats old_stats;
834 	unsigned int max_bitflips = 0;
835 	int ret;
836 
837 	mutex_lock(&spinand->lock);
838 
839 	old_stats = mtd->ecc_stats;
840 
841 	if (spinand_use_cont_read(mtd, from, ops))
842 		ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
843 	else
844 		ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
845 
846 	if (ops->stats) {
847 		ops->stats->uncorrectable_errors +=
848 			mtd->ecc_stats.failed - old_stats.failed;
849 		ops->stats->corrected_bitflips +=
850 			mtd->ecc_stats.corrected - old_stats.corrected;
851 	}
852 
853 	mutex_unlock(&spinand->lock);
854 
855 	return ret ? ret : max_bitflips;
856 }
857 
spinand_mtd_write(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)858 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
859 			     struct mtd_oob_ops *ops)
860 {
861 	struct spinand_device *spinand = mtd_to_spinand(mtd);
862 	struct nand_device *nand = mtd_to_nanddev(mtd);
863 	struct nand_io_iter iter;
864 	bool disable_ecc = false;
865 	int ret = 0;
866 
867 	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
868 		disable_ecc = true;
869 
870 	mutex_lock(&spinand->lock);
871 
872 	nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
873 		if (disable_ecc)
874 			iter.req.mode = MTD_OPS_RAW;
875 
876 		ret = spinand_select_target(spinand, iter.req.pos.target);
877 		if (ret)
878 			break;
879 
880 		ret = spinand_write_page(spinand, &iter.req);
881 		if (ret)
882 			break;
883 
884 		ops->retlen += iter.req.datalen;
885 		ops->oobretlen += iter.req.ooblen;
886 	}
887 
888 	mutex_unlock(&spinand->lock);
889 
890 	return ret;
891 }
892 
spinand_isbad(struct nand_device * nand,const struct nand_pos * pos)893 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
894 {
895 	struct spinand_device *spinand = nand_to_spinand(nand);
896 	u8 marker[2] = { };
897 	struct nand_page_io_req req = {
898 		.pos = *pos,
899 		.ooblen = sizeof(marker),
900 		.ooboffs = 0,
901 		.oobbuf.in = marker,
902 		.mode = MTD_OPS_RAW,
903 	};
904 
905 	spinand_select_target(spinand, pos->target);
906 	spinand_read_page(spinand, &req);
907 	if (marker[0] != 0xff || marker[1] != 0xff)
908 		return true;
909 
910 	return false;
911 }
912 
spinand_mtd_block_isbad(struct mtd_info * mtd,loff_t offs)913 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
914 {
915 	struct nand_device *nand = mtd_to_nanddev(mtd);
916 	struct spinand_device *spinand = nand_to_spinand(nand);
917 	struct nand_pos pos;
918 	int ret;
919 
920 	nanddev_offs_to_pos(nand, offs, &pos);
921 	mutex_lock(&spinand->lock);
922 	ret = nanddev_isbad(nand, &pos);
923 	mutex_unlock(&spinand->lock);
924 
925 	return ret;
926 }
927 
spinand_markbad(struct nand_device * nand,const struct nand_pos * pos)928 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
929 {
930 	struct spinand_device *spinand = nand_to_spinand(nand);
931 	u8 marker[2] = { };
932 	struct nand_page_io_req req = {
933 		.pos = *pos,
934 		.ooboffs = 0,
935 		.ooblen = sizeof(marker),
936 		.oobbuf.out = marker,
937 		.mode = MTD_OPS_RAW,
938 	};
939 	int ret;
940 
941 	ret = spinand_select_target(spinand, pos->target);
942 	if (ret)
943 		return ret;
944 
945 	ret = spinand_write_enable_op(spinand);
946 	if (ret)
947 		return ret;
948 
949 	return spinand_write_page(spinand, &req);
950 }
951 
spinand_mtd_block_markbad(struct mtd_info * mtd,loff_t offs)952 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
953 {
954 	struct nand_device *nand = mtd_to_nanddev(mtd);
955 	struct spinand_device *spinand = nand_to_spinand(nand);
956 	struct nand_pos pos;
957 	int ret;
958 
959 	nanddev_offs_to_pos(nand, offs, &pos);
960 	mutex_lock(&spinand->lock);
961 	ret = nanddev_markbad(nand, &pos);
962 	mutex_unlock(&spinand->lock);
963 
964 	return ret;
965 }
966 
spinand_erase(struct nand_device * nand,const struct nand_pos * pos)967 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
968 {
969 	struct spinand_device *spinand = nand_to_spinand(nand);
970 	u8 status;
971 	int ret;
972 
973 	ret = spinand_select_target(spinand, pos->target);
974 	if (ret)
975 		return ret;
976 
977 	ret = spinand_write_enable_op(spinand);
978 	if (ret)
979 		return ret;
980 
981 	ret = spinand_erase_op(spinand, pos);
982 	if (ret)
983 		return ret;
984 
985 	ret = spinand_wait(spinand,
986 			   SPINAND_ERASE_INITIAL_DELAY_US,
987 			   SPINAND_ERASE_POLL_DELAY_US,
988 			   &status);
989 
990 	if (!ret && (status & STATUS_ERASE_FAILED))
991 		ret = -EIO;
992 
993 	return ret;
994 }
995 
spinand_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)996 static int spinand_mtd_erase(struct mtd_info *mtd,
997 			     struct erase_info *einfo)
998 {
999 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1000 	int ret;
1001 
1002 	mutex_lock(&spinand->lock);
1003 	ret = nanddev_mtd_erase(mtd, einfo);
1004 	mutex_unlock(&spinand->lock);
1005 
1006 	return ret;
1007 }
1008 
spinand_mtd_block_isreserved(struct mtd_info * mtd,loff_t offs)1009 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
1010 {
1011 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1012 	struct nand_device *nand = mtd_to_nanddev(mtd);
1013 	struct nand_pos pos;
1014 	int ret;
1015 
1016 	nanddev_offs_to_pos(nand, offs, &pos);
1017 	mutex_lock(&spinand->lock);
1018 	ret = nanddev_isreserved(nand, &pos);
1019 	mutex_unlock(&spinand->lock);
1020 
1021 	return ret;
1022 }
1023 
spinand_create_dirmap(struct spinand_device * spinand,unsigned int plane)1024 static int spinand_create_dirmap(struct spinand_device *spinand,
1025 				 unsigned int plane)
1026 {
1027 	struct nand_device *nand = spinand_to_nand(spinand);
1028 	struct spi_mem_dirmap_info info = {
1029 		.length = nanddev_page_size(nand) +
1030 			  nanddev_per_page_oobsize(nand),
1031 	};
1032 	struct spi_mem_dirmap_desc *desc;
1033 
1034 	if (spinand->cont_read_possible)
1035 		info.length = nanddev_eraseblock_size(nand);
1036 
1037 	/* The plane number is passed in MSB just above the column address */
1038 	info.offset = plane << fls(nand->memorg.pagesize);
1039 
1040 	info.op_tmpl = *spinand->op_templates.update_cache;
1041 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1042 					  spinand->spimem, &info);
1043 	if (IS_ERR(desc))
1044 		return PTR_ERR(desc);
1045 
1046 	spinand->dirmaps[plane].wdesc = desc;
1047 
1048 	info.op_tmpl = *spinand->op_templates.read_cache;
1049 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1050 					  spinand->spimem, &info);
1051 	if (IS_ERR(desc))
1052 		return PTR_ERR(desc);
1053 
1054 	spinand->dirmaps[plane].rdesc = desc;
1055 
1056 	if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
1057 		spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
1058 		spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
1059 
1060 		return 0;
1061 	}
1062 
1063 	info.op_tmpl = *spinand->op_templates.update_cache;
1064 	info.op_tmpl.data.ecc = true;
1065 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1066 					  spinand->spimem, &info);
1067 	if (IS_ERR(desc))
1068 		return PTR_ERR(desc);
1069 
1070 	spinand->dirmaps[plane].wdesc_ecc = desc;
1071 
1072 	info.op_tmpl = *spinand->op_templates.read_cache;
1073 	info.op_tmpl.data.ecc = true;
1074 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1075 					  spinand->spimem, &info);
1076 	if (IS_ERR(desc))
1077 		return PTR_ERR(desc);
1078 
1079 	spinand->dirmaps[plane].rdesc_ecc = desc;
1080 
1081 	return 0;
1082 }
1083 
spinand_create_dirmaps(struct spinand_device * spinand)1084 static int spinand_create_dirmaps(struct spinand_device *spinand)
1085 {
1086 	struct nand_device *nand = spinand_to_nand(spinand);
1087 	int i, ret;
1088 
1089 	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
1090 					sizeof(*spinand->dirmaps) *
1091 					nand->memorg.planes_per_lun,
1092 					GFP_KERNEL);
1093 	if (!spinand->dirmaps)
1094 		return -ENOMEM;
1095 
1096 	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
1097 		ret = spinand_create_dirmap(spinand, i);
1098 		if (ret)
1099 			return ret;
1100 	}
1101 
1102 	return 0;
1103 }
1104 
1105 static const struct nand_ops spinand_ops = {
1106 	.erase = spinand_erase,
1107 	.markbad = spinand_markbad,
1108 	.isbad = spinand_isbad,
1109 };
1110 
1111 static const struct spinand_manufacturer *spinand_manufacturers[] = {
1112 	&alliancememory_spinand_manufacturer,
1113 	&ato_spinand_manufacturer,
1114 	&esmt_c8_spinand_manufacturer,
1115 	&foresee_spinand_manufacturer,
1116 	&gigadevice_spinand_manufacturer,
1117 	&macronix_spinand_manufacturer,
1118 	&micron_spinand_manufacturer,
1119 	&paragon_spinand_manufacturer,
1120 	&toshiba_spinand_manufacturer,
1121 	&winbond_spinand_manufacturer,
1122 	&xtx_spinand_manufacturer,
1123 };
1124 
spinand_manufacturer_match(struct spinand_device * spinand,enum spinand_readid_method rdid_method)1125 static int spinand_manufacturer_match(struct spinand_device *spinand,
1126 				      enum spinand_readid_method rdid_method)
1127 {
1128 	u8 *id = spinand->id.data;
1129 	unsigned int i;
1130 	int ret;
1131 
1132 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
1133 		const struct spinand_manufacturer *manufacturer =
1134 			spinand_manufacturers[i];
1135 
1136 		if (id[0] != manufacturer->id)
1137 			continue;
1138 
1139 		ret = spinand_match_and_init(spinand,
1140 					     manufacturer->chips,
1141 					     manufacturer->nchips,
1142 					     rdid_method);
1143 		if (ret < 0)
1144 			continue;
1145 
1146 		spinand->manufacturer = manufacturer;
1147 		return 0;
1148 	}
1149 	return -EOPNOTSUPP;
1150 }
1151 
spinand_id_detect(struct spinand_device * spinand)1152 static int spinand_id_detect(struct spinand_device *spinand)
1153 {
1154 	u8 *id = spinand->id.data;
1155 	int ret;
1156 
1157 	ret = spinand_read_id_op(spinand, 0, 0, id);
1158 	if (ret)
1159 		return ret;
1160 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
1161 	if (!ret)
1162 		return 0;
1163 
1164 	ret = spinand_read_id_op(spinand, 1, 0, id);
1165 	if (ret)
1166 		return ret;
1167 	ret = spinand_manufacturer_match(spinand,
1168 					 SPINAND_READID_METHOD_OPCODE_ADDR);
1169 	if (!ret)
1170 		return 0;
1171 
1172 	ret = spinand_read_id_op(spinand, 0, 1, id);
1173 	if (ret)
1174 		return ret;
1175 	ret = spinand_manufacturer_match(spinand,
1176 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
1177 
1178 	return ret;
1179 }
1180 
spinand_manufacturer_init(struct spinand_device * spinand)1181 static int spinand_manufacturer_init(struct spinand_device *spinand)
1182 {
1183 	if (spinand->manufacturer->ops->init)
1184 		return spinand->manufacturer->ops->init(spinand);
1185 
1186 	return 0;
1187 }
1188 
spinand_manufacturer_cleanup(struct spinand_device * spinand)1189 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
1190 {
1191 	/* Release manufacturer private data */
1192 	if (spinand->manufacturer->ops->cleanup)
1193 		return spinand->manufacturer->ops->cleanup(spinand);
1194 }
1195 
1196 static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device * spinand,const struct spinand_op_variants * variants)1197 spinand_select_op_variant(struct spinand_device *spinand,
1198 			  const struct spinand_op_variants *variants)
1199 {
1200 	struct nand_device *nand = spinand_to_nand(spinand);
1201 	unsigned int i;
1202 
1203 	for (i = 0; i < variants->nops; i++) {
1204 		struct spi_mem_op op = variants->ops[i];
1205 		unsigned int nbytes;
1206 		int ret;
1207 
1208 		nbytes = nanddev_per_page_oobsize(nand) +
1209 			 nanddev_page_size(nand);
1210 
1211 		while (nbytes) {
1212 			op.data.nbytes = nbytes;
1213 			ret = spi_mem_adjust_op_size(spinand->spimem, &op);
1214 			if (ret)
1215 				break;
1216 
1217 			if (!spi_mem_supports_op(spinand->spimem, &op))
1218 				break;
1219 
1220 			nbytes -= op.data.nbytes;
1221 		}
1222 
1223 		if (!nbytes)
1224 			return &variants->ops[i];
1225 	}
1226 
1227 	return NULL;
1228 }
1229 
1230 /**
1231  * spinand_match_and_init() - Try to find a match between a device ID and an
1232  *			      entry in a spinand_info table
1233  * @spinand: SPI NAND object
1234  * @table: SPI NAND device description table
1235  * @table_size: size of the device description table
1236  * @rdid_method: read id method to match
1237  *
1238  * Match between a device ID retrieved through the READ_ID command and an
1239  * entry in the SPI NAND description table. If a match is found, the spinand
1240  * object will be initialized with information provided by the matching
1241  * spinand_info entry.
1242  *
1243  * Return: 0 on success, a negative error code otherwise.
1244  */
spinand_match_and_init(struct spinand_device * spinand,const struct spinand_info * table,unsigned int table_size,enum spinand_readid_method rdid_method)1245 int spinand_match_and_init(struct spinand_device *spinand,
1246 			   const struct spinand_info *table,
1247 			   unsigned int table_size,
1248 			   enum spinand_readid_method rdid_method)
1249 {
1250 	u8 *id = spinand->id.data;
1251 	struct nand_device *nand = spinand_to_nand(spinand);
1252 	unsigned int i;
1253 
1254 	for (i = 0; i < table_size; i++) {
1255 		const struct spinand_info *info = &table[i];
1256 		const struct spi_mem_op *op;
1257 
1258 		if (rdid_method != info->devid.method)
1259 			continue;
1260 
1261 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1262 			continue;
1263 
1264 		nand->memorg = table[i].memorg;
1265 		nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1266 		spinand->eccinfo = table[i].eccinfo;
1267 		spinand->flags = table[i].flags;
1268 		spinand->id.len = 1 + table[i].devid.len;
1269 		spinand->select_target = table[i].select_target;
1270 		spinand->set_cont_read = table[i].set_cont_read;
1271 
1272 		op = spinand_select_op_variant(spinand,
1273 					       info->op_variants.read_cache);
1274 		if (!op)
1275 			return -ENOTSUPP;
1276 
1277 		spinand->op_templates.read_cache = op;
1278 
1279 		op = spinand_select_op_variant(spinand,
1280 					       info->op_variants.write_cache);
1281 		if (!op)
1282 			return -ENOTSUPP;
1283 
1284 		spinand->op_templates.write_cache = op;
1285 
1286 		op = spinand_select_op_variant(spinand,
1287 					       info->op_variants.update_cache);
1288 		spinand->op_templates.update_cache = op;
1289 
1290 		return 0;
1291 	}
1292 
1293 	return -ENOTSUPP;
1294 }
1295 
spinand_detect(struct spinand_device * spinand)1296 static int spinand_detect(struct spinand_device *spinand)
1297 {
1298 	struct device *dev = &spinand->spimem->spi->dev;
1299 	struct nand_device *nand = spinand_to_nand(spinand);
1300 	int ret;
1301 
1302 	ret = spinand_reset_op(spinand);
1303 	if (ret)
1304 		return ret;
1305 
1306 	ret = spinand_id_detect(spinand);
1307 	if (ret) {
1308 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1309 			spinand->id.data);
1310 		return ret;
1311 	}
1312 
1313 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1314 		dev_err(dev,
1315 			"SPI NANDs with more than one die must implement ->select_target()\n");
1316 		return -EINVAL;
1317 	}
1318 
1319 	dev_info(&spinand->spimem->spi->dev,
1320 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1321 	dev_info(&spinand->spimem->spi->dev,
1322 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1323 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1324 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1325 
1326 	return 0;
1327 }
1328 
spinand_init_flash(struct spinand_device * spinand)1329 static int spinand_init_flash(struct spinand_device *spinand)
1330 {
1331 	struct device *dev = &spinand->spimem->spi->dev;
1332 	struct nand_device *nand = spinand_to_nand(spinand);
1333 	int ret, i;
1334 
1335 	ret = spinand_read_cfg(spinand);
1336 	if (ret)
1337 		return ret;
1338 
1339 	ret = spinand_init_quad_enable(spinand);
1340 	if (ret)
1341 		return ret;
1342 
1343 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1344 	if (ret)
1345 		return ret;
1346 
1347 	ret = spinand_manufacturer_init(spinand);
1348 	if (ret) {
1349 		dev_err(dev,
1350 		"Failed to initialize the SPI NAND chip (err = %d)\n",
1351 		ret);
1352 		return ret;
1353 	}
1354 
1355 	/* After power up, all blocks are locked, so unlock them here. */
1356 	for (i = 0; i < nand->memorg.ntargets; i++) {
1357 		ret = spinand_select_target(spinand, i);
1358 		if (ret)
1359 			break;
1360 
1361 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1362 		if (ret)
1363 			break;
1364 	}
1365 
1366 	if (ret)
1367 		spinand_manufacturer_cleanup(spinand);
1368 
1369 	return ret;
1370 }
1371 
spinand_mtd_resume(struct mtd_info * mtd)1372 static void spinand_mtd_resume(struct mtd_info *mtd)
1373 {
1374 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1375 	int ret;
1376 
1377 	ret = spinand_reset_op(spinand);
1378 	if (ret)
1379 		return;
1380 
1381 	ret = spinand_init_flash(spinand);
1382 	if (ret)
1383 		return;
1384 
1385 	spinand_ecc_enable(spinand, false);
1386 }
1387 
spinand_init(struct spinand_device * spinand)1388 static int spinand_init(struct spinand_device *spinand)
1389 {
1390 	struct device *dev = &spinand->spimem->spi->dev;
1391 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1392 	struct nand_device *nand = mtd_to_nanddev(mtd);
1393 	int ret;
1394 
1395 	/*
1396 	 * We need a scratch buffer because the spi_mem interface requires that
1397 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1398 	 */
1399 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1400 	if (!spinand->scratchbuf)
1401 		return -ENOMEM;
1402 
1403 	ret = spinand_detect(spinand);
1404 	if (ret)
1405 		goto err_free_bufs;
1406 
1407 	/*
1408 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1409 	 * may use this buffer for DMA access.
1410 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1411 	 */
1412 	spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
1413 				   GFP_KERNEL);
1414 	if (!spinand->databuf) {
1415 		ret = -ENOMEM;
1416 		goto err_free_bufs;
1417 	}
1418 
1419 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1420 
1421 	ret = spinand_init_cfg_cache(spinand);
1422 	if (ret)
1423 		goto err_free_bufs;
1424 
1425 	ret = spinand_init_flash(spinand);
1426 	if (ret)
1427 		goto err_free_bufs;
1428 
1429 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1430 	if (ret)
1431 		goto err_manuf_cleanup;
1432 
1433 	/* SPI-NAND default ECC engine is on-die */
1434 	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1435 	nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1436 
1437 	spinand_ecc_enable(spinand, false);
1438 	ret = nanddev_ecc_engine_init(nand);
1439 	if (ret)
1440 		goto err_cleanup_nanddev;
1441 
1442 	/*
1443 	 * Continuous read can only be enabled with an on-die ECC engine, so the
1444 	 * ECC initialization must have happened previously.
1445 	 */
1446 	spinand_cont_read_init(spinand);
1447 
1448 	mtd->_read_oob = spinand_mtd_read;
1449 	mtd->_write_oob = spinand_mtd_write;
1450 	mtd->_block_isbad = spinand_mtd_block_isbad;
1451 	mtd->_block_markbad = spinand_mtd_block_markbad;
1452 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1453 	mtd->_erase = spinand_mtd_erase;
1454 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1455 	mtd->_resume = spinand_mtd_resume;
1456 
1457 	if (nand->ecc.engine) {
1458 		ret = mtd_ooblayout_count_freebytes(mtd);
1459 		if (ret < 0)
1460 			goto err_cleanup_ecc_engine;
1461 	}
1462 
1463 	mtd->oobavail = ret;
1464 
1465 	/* Propagate ECC information to mtd_info */
1466 	mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1467 	mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1468 	mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
1469 
1470 	ret = spinand_create_dirmaps(spinand);
1471 	if (ret) {
1472 		dev_err(dev,
1473 			"Failed to create direct mappings for read/write operations (err = %d)\n",
1474 			ret);
1475 		goto err_cleanup_ecc_engine;
1476 	}
1477 
1478 	return 0;
1479 
1480 err_cleanup_ecc_engine:
1481 	nanddev_ecc_engine_cleanup(nand);
1482 
1483 err_cleanup_nanddev:
1484 	nanddev_cleanup(nand);
1485 
1486 err_manuf_cleanup:
1487 	spinand_manufacturer_cleanup(spinand);
1488 
1489 err_free_bufs:
1490 	kfree(spinand->databuf);
1491 	kfree(spinand->scratchbuf);
1492 	return ret;
1493 }
1494 
spinand_cleanup(struct spinand_device * spinand)1495 static void spinand_cleanup(struct spinand_device *spinand)
1496 {
1497 	struct nand_device *nand = spinand_to_nand(spinand);
1498 
1499 	nanddev_cleanup(nand);
1500 	spinand_manufacturer_cleanup(spinand);
1501 	kfree(spinand->databuf);
1502 	kfree(spinand->scratchbuf);
1503 }
1504 
spinand_probe(struct spi_mem * mem)1505 static int spinand_probe(struct spi_mem *mem)
1506 {
1507 	struct spinand_device *spinand;
1508 	struct mtd_info *mtd;
1509 	int ret;
1510 
1511 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1512 			       GFP_KERNEL);
1513 	if (!spinand)
1514 		return -ENOMEM;
1515 
1516 	spinand->spimem = mem;
1517 	spi_mem_set_drvdata(mem, spinand);
1518 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1519 	mutex_init(&spinand->lock);
1520 	mtd = spinand_to_mtd(spinand);
1521 	mtd->dev.parent = &mem->spi->dev;
1522 
1523 	ret = spinand_init(spinand);
1524 	if (ret)
1525 		return ret;
1526 
1527 	ret = mtd_device_register(mtd, NULL, 0);
1528 	if (ret)
1529 		goto err_spinand_cleanup;
1530 
1531 	return 0;
1532 
1533 err_spinand_cleanup:
1534 	spinand_cleanup(spinand);
1535 
1536 	return ret;
1537 }
1538 
spinand_remove(struct spi_mem * mem)1539 static int spinand_remove(struct spi_mem *mem)
1540 {
1541 	struct spinand_device *spinand;
1542 	struct mtd_info *mtd;
1543 	int ret;
1544 
1545 	spinand = spi_mem_get_drvdata(mem);
1546 	mtd = spinand_to_mtd(spinand);
1547 
1548 	ret = mtd_device_unregister(mtd);
1549 	if (ret)
1550 		return ret;
1551 
1552 	spinand_cleanup(spinand);
1553 
1554 	return 0;
1555 }
1556 
1557 static const struct spi_device_id spinand_ids[] = {
1558 	{ .name = "spi-nand" },
1559 	{ /* sentinel */ },
1560 };
1561 MODULE_DEVICE_TABLE(spi, spinand_ids);
1562 
1563 #ifdef CONFIG_OF
1564 static const struct of_device_id spinand_of_ids[] = {
1565 	{ .compatible = "spi-nand" },
1566 	{ /* sentinel */ },
1567 };
1568 MODULE_DEVICE_TABLE(of, spinand_of_ids);
1569 #endif
1570 
1571 static struct spi_mem_driver spinand_drv = {
1572 	.spidrv = {
1573 		.id_table = spinand_ids,
1574 		.driver = {
1575 			.name = "spi-nand",
1576 			.of_match_table = of_match_ptr(spinand_of_ids),
1577 		},
1578 	},
1579 	.probe = spinand_probe,
1580 	.remove = spinand_remove,
1581 };
1582 module_spi_mem_driver(spinand_drv);
1583 
1584 MODULE_DESCRIPTION("SPI NAND framework");
1585 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1586 MODULE_LICENSE("GPL v2");
1587