xref: /linux/drivers/mtd/nand/spi/core.c (revision f474808acb3c4b30552d9c59b181244e0300d218)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 
22 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
23 {
24 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
25 						      spinand->scratchbuf);
26 	int ret;
27 
28 	ret = spi_mem_exec_op(spinand->spimem, &op);
29 	if (ret)
30 		return ret;
31 
32 	*val = *spinand->scratchbuf;
33 	return 0;
34 }
35 
36 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
37 {
38 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
39 						      spinand->scratchbuf);
40 
41 	*spinand->scratchbuf = val;
42 	return spi_mem_exec_op(spinand->spimem, &op);
43 }
44 
45 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
46 {
47 	return spinand_read_reg_op(spinand, REG_STATUS, status);
48 }
49 
50 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
51 {
52 	struct nand_device *nand = spinand_to_nand(spinand);
53 
54 	if (WARN_ON(spinand->cur_target < 0 ||
55 		    spinand->cur_target >= nand->memorg.ntargets))
56 		return -EINVAL;
57 
58 	*cfg = spinand->cfg_cache[spinand->cur_target];
59 	return 0;
60 }
61 
62 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
63 {
64 	struct nand_device *nand = spinand_to_nand(spinand);
65 	int ret;
66 
67 	if (WARN_ON(spinand->cur_target < 0 ||
68 		    spinand->cur_target >= nand->memorg.ntargets))
69 		return -EINVAL;
70 
71 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
72 		return 0;
73 
74 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
75 	if (ret)
76 		return ret;
77 
78 	spinand->cfg_cache[spinand->cur_target] = cfg;
79 	return 0;
80 }
81 
82 /**
83  * spinand_upd_cfg() - Update the configuration register
84  * @spinand: the spinand device
85  * @mask: the mask encoding the bits to update in the config reg
86  * @val: the new value to apply
87  *
88  * Update the configuration register.
89  *
90  * Return: 0 on success, a negative error code otherwise.
91  */
92 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
93 {
94 	int ret;
95 	u8 cfg;
96 
97 	ret = spinand_get_cfg(spinand, &cfg);
98 	if (ret)
99 		return ret;
100 
101 	cfg &= ~mask;
102 	cfg |= val;
103 
104 	return spinand_set_cfg(spinand, cfg);
105 }
106 
107 /**
108  * spinand_select_target() - Select a specific NAND target/die
109  * @spinand: the spinand device
110  * @target: the target/die to select
111  *
112  * Select a new target/die. If chip only has one die, this function is a NOOP.
113  *
114  * Return: 0 on success, a negative error code otherwise.
115  */
116 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
117 {
118 	struct nand_device *nand = spinand_to_nand(spinand);
119 	int ret;
120 
121 	if (WARN_ON(target >= nand->memorg.ntargets))
122 		return -EINVAL;
123 
124 	if (spinand->cur_target == target)
125 		return 0;
126 
127 	if (nand->memorg.ntargets == 1) {
128 		spinand->cur_target = target;
129 		return 0;
130 	}
131 
132 	ret = spinand->select_target(spinand, target);
133 	if (ret)
134 		return ret;
135 
136 	spinand->cur_target = target;
137 	return 0;
138 }
139 
140 static int spinand_init_cfg_cache(struct spinand_device *spinand)
141 {
142 	struct nand_device *nand = spinand_to_nand(spinand);
143 	struct device *dev = &spinand->spimem->spi->dev;
144 	unsigned int target;
145 	int ret;
146 
147 	spinand->cfg_cache = devm_kcalloc(dev,
148 					  nand->memorg.ntargets,
149 					  sizeof(*spinand->cfg_cache),
150 					  GFP_KERNEL);
151 	if (!spinand->cfg_cache)
152 		return -ENOMEM;
153 
154 	for (target = 0; target < nand->memorg.ntargets; target++) {
155 		ret = spinand_select_target(spinand, target);
156 		if (ret)
157 			return ret;
158 
159 		/*
160 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
161 		 * here to bypass the config cache.
162 		 */
163 		ret = spinand_read_reg_op(spinand, REG_CFG,
164 					  &spinand->cfg_cache[target]);
165 		if (ret)
166 			return ret;
167 	}
168 
169 	return 0;
170 }
171 
172 static int spinand_init_quad_enable(struct spinand_device *spinand)
173 {
174 	bool enable = false;
175 
176 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
177 		return 0;
178 
179 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
180 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
181 	    spinand->op_templates.update_cache->data.buswidth == 4)
182 		enable = true;
183 
184 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
185 			       enable ? CFG_QUAD_ENABLE : 0);
186 }
187 
188 static int spinand_ecc_enable(struct spinand_device *spinand,
189 			      bool enable)
190 {
191 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
192 			       enable ? CFG_ECC_ENABLE : 0);
193 }
194 
195 static int spinand_write_enable_op(struct spinand_device *spinand)
196 {
197 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
198 
199 	return spi_mem_exec_op(spinand->spimem, &op);
200 }
201 
202 static int spinand_load_page_op(struct spinand_device *spinand,
203 				const struct nand_page_io_req *req)
204 {
205 	struct nand_device *nand = spinand_to_nand(spinand);
206 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
207 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
208 
209 	return spi_mem_exec_op(spinand->spimem, &op);
210 }
211 
212 static int spinand_read_from_cache_op(struct spinand_device *spinand,
213 				      const struct nand_page_io_req *req)
214 {
215 	struct nand_device *nand = spinand_to_nand(spinand);
216 	struct mtd_info *mtd = nanddev_to_mtd(nand);
217 	struct spi_mem_dirmap_desc *rdesc;
218 	unsigned int nbytes = 0;
219 	void *buf = NULL;
220 	u16 column = 0;
221 	ssize_t ret;
222 
223 	if (req->datalen) {
224 		buf = spinand->databuf;
225 		nbytes = nanddev_page_size(nand);
226 		column = 0;
227 	}
228 
229 	if (req->ooblen) {
230 		nbytes += nanddev_per_page_oobsize(nand);
231 		if (!buf) {
232 			buf = spinand->oobbuf;
233 			column = nanddev_page_size(nand);
234 		}
235 	}
236 
237 	rdesc = spinand->dirmaps[req->pos.plane].rdesc;
238 
239 	while (nbytes) {
240 		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
241 		if (ret < 0)
242 			return ret;
243 
244 		if (!ret || ret > nbytes)
245 			return -EIO;
246 
247 		nbytes -= ret;
248 		column += ret;
249 		buf += ret;
250 	}
251 
252 	if (req->datalen)
253 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
254 		       req->datalen);
255 
256 	if (req->ooblen) {
257 		if (req->mode == MTD_OPS_AUTO_OOB)
258 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
259 						    spinand->oobbuf,
260 						    req->ooboffs,
261 						    req->ooblen);
262 		else
263 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
264 			       req->ooblen);
265 	}
266 
267 	return 0;
268 }
269 
270 static int spinand_write_to_cache_op(struct spinand_device *spinand,
271 				     const struct nand_page_io_req *req)
272 {
273 	struct nand_device *nand = spinand_to_nand(spinand);
274 	struct mtd_info *mtd = nanddev_to_mtd(nand);
275 	struct spi_mem_dirmap_desc *wdesc;
276 	unsigned int nbytes, column = 0;
277 	void *buf = spinand->databuf;
278 	ssize_t ret;
279 
280 	/*
281 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
282 	 * the cache content to 0xFF (depends on vendor implementation), so we
283 	 * must fill the page cache entirely even if we only want to program
284 	 * the data portion of the page, otherwise we might corrupt the BBM or
285 	 * user data previously programmed in OOB area.
286 	 */
287 	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
288 	memset(spinand->databuf, 0xff, nbytes);
289 
290 	if (req->datalen)
291 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
292 		       req->datalen);
293 
294 	if (req->ooblen) {
295 		if (req->mode == MTD_OPS_AUTO_OOB)
296 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
297 						    spinand->oobbuf,
298 						    req->ooboffs,
299 						    req->ooblen);
300 		else
301 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
302 			       req->ooblen);
303 	}
304 
305 	wdesc = spinand->dirmaps[req->pos.plane].wdesc;
306 
307 	while (nbytes) {
308 		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
309 		if (ret < 0)
310 			return ret;
311 
312 		if (!ret || ret > nbytes)
313 			return -EIO;
314 
315 		nbytes -= ret;
316 		column += ret;
317 		buf += ret;
318 	}
319 
320 	return 0;
321 }
322 
323 static int spinand_program_op(struct spinand_device *spinand,
324 			      const struct nand_page_io_req *req)
325 {
326 	struct nand_device *nand = spinand_to_nand(spinand);
327 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
328 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
329 
330 	return spi_mem_exec_op(spinand->spimem, &op);
331 }
332 
333 static int spinand_erase_op(struct spinand_device *spinand,
334 			    const struct nand_pos *pos)
335 {
336 	struct nand_device *nand = spinand_to_nand(spinand);
337 	unsigned int row = nanddev_pos_to_row(nand, pos);
338 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
339 
340 	return spi_mem_exec_op(spinand->spimem, &op);
341 }
342 
343 static int spinand_wait(struct spinand_device *spinand, u8 *s)
344 {
345 	unsigned long timeo =  jiffies + msecs_to_jiffies(400);
346 	u8 status;
347 	int ret;
348 
349 	do {
350 		ret = spinand_read_status(spinand, &status);
351 		if (ret)
352 			return ret;
353 
354 		if (!(status & STATUS_BUSY))
355 			goto out;
356 	} while (time_before(jiffies, timeo));
357 
358 	/*
359 	 * Extra read, just in case the STATUS_READY bit has changed
360 	 * since our last check
361 	 */
362 	ret = spinand_read_status(spinand, &status);
363 	if (ret)
364 		return ret;
365 
366 out:
367 	if (s)
368 		*s = status;
369 
370 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
371 }
372 
373 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
374 {
375 	struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
376 						 SPINAND_MAX_ID_LEN);
377 	int ret;
378 
379 	ret = spi_mem_exec_op(spinand->spimem, &op);
380 	if (!ret)
381 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
382 
383 	return ret;
384 }
385 
386 static int spinand_reset_op(struct spinand_device *spinand)
387 {
388 	struct spi_mem_op op = SPINAND_RESET_OP;
389 	int ret;
390 
391 	ret = spi_mem_exec_op(spinand->spimem, &op);
392 	if (ret)
393 		return ret;
394 
395 	return spinand_wait(spinand, NULL);
396 }
397 
398 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
399 {
400 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
401 }
402 
403 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
404 {
405 	struct nand_device *nand = spinand_to_nand(spinand);
406 
407 	if (spinand->eccinfo.get_status)
408 		return spinand->eccinfo.get_status(spinand, status);
409 
410 	switch (status & STATUS_ECC_MASK) {
411 	case STATUS_ECC_NO_BITFLIPS:
412 		return 0;
413 
414 	case STATUS_ECC_HAS_BITFLIPS:
415 		/*
416 		 * We have no way to know exactly how many bitflips have been
417 		 * fixed, so let's return the maximum possible value so that
418 		 * wear-leveling layers move the data immediately.
419 		 */
420 		return nand->eccreq.strength;
421 
422 	case STATUS_ECC_UNCOR_ERROR:
423 		return -EBADMSG;
424 
425 	default:
426 		break;
427 	}
428 
429 	return -EINVAL;
430 }
431 
432 static int spinand_read_page(struct spinand_device *spinand,
433 			     const struct nand_page_io_req *req,
434 			     bool ecc_enabled)
435 {
436 	u8 status;
437 	int ret;
438 
439 	ret = spinand_load_page_op(spinand, req);
440 	if (ret)
441 		return ret;
442 
443 	ret = spinand_wait(spinand, &status);
444 	if (ret < 0)
445 		return ret;
446 
447 	ret = spinand_read_from_cache_op(spinand, req);
448 	if (ret)
449 		return ret;
450 
451 	if (!ecc_enabled)
452 		return 0;
453 
454 	return spinand_check_ecc_status(spinand, status);
455 }
456 
457 static int spinand_write_page(struct spinand_device *spinand,
458 			      const struct nand_page_io_req *req)
459 {
460 	u8 status;
461 	int ret;
462 
463 	ret = spinand_write_enable_op(spinand);
464 	if (ret)
465 		return ret;
466 
467 	ret = spinand_write_to_cache_op(spinand, req);
468 	if (ret)
469 		return ret;
470 
471 	ret = spinand_program_op(spinand, req);
472 	if (ret)
473 		return ret;
474 
475 	ret = spinand_wait(spinand, &status);
476 	if (!ret && (status & STATUS_PROG_FAILED))
477 		ret = -EIO;
478 
479 	return ret;
480 }
481 
482 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
483 			    struct mtd_oob_ops *ops)
484 {
485 	struct spinand_device *spinand = mtd_to_spinand(mtd);
486 	struct nand_device *nand = mtd_to_nanddev(mtd);
487 	unsigned int max_bitflips = 0;
488 	struct nand_io_iter iter;
489 	bool enable_ecc = false;
490 	bool ecc_failed = false;
491 	int ret = 0;
492 
493 	if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
494 		enable_ecc = true;
495 
496 	mutex_lock(&spinand->lock);
497 
498 	nanddev_io_for_each_page(nand, from, ops, &iter) {
499 		ret = spinand_select_target(spinand, iter.req.pos.target);
500 		if (ret)
501 			break;
502 
503 		ret = spinand_ecc_enable(spinand, enable_ecc);
504 		if (ret)
505 			break;
506 
507 		ret = spinand_read_page(spinand, &iter.req, enable_ecc);
508 		if (ret < 0 && ret != -EBADMSG)
509 			break;
510 
511 		if (ret == -EBADMSG) {
512 			ecc_failed = true;
513 			mtd->ecc_stats.failed++;
514 		} else {
515 			mtd->ecc_stats.corrected += ret;
516 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
517 		}
518 
519 		ret = 0;
520 		ops->retlen += iter.req.datalen;
521 		ops->oobretlen += iter.req.ooblen;
522 	}
523 
524 	mutex_unlock(&spinand->lock);
525 
526 	if (ecc_failed && !ret)
527 		ret = -EBADMSG;
528 
529 	return ret ? ret : max_bitflips;
530 }
531 
532 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
533 			     struct mtd_oob_ops *ops)
534 {
535 	struct spinand_device *spinand = mtd_to_spinand(mtd);
536 	struct nand_device *nand = mtd_to_nanddev(mtd);
537 	struct nand_io_iter iter;
538 	bool enable_ecc = false;
539 	int ret = 0;
540 
541 	if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
542 		enable_ecc = true;
543 
544 	mutex_lock(&spinand->lock);
545 
546 	nanddev_io_for_each_page(nand, to, ops, &iter) {
547 		ret = spinand_select_target(spinand, iter.req.pos.target);
548 		if (ret)
549 			break;
550 
551 		ret = spinand_ecc_enable(spinand, enable_ecc);
552 		if (ret)
553 			break;
554 
555 		ret = spinand_write_page(spinand, &iter.req);
556 		if (ret)
557 			break;
558 
559 		ops->retlen += iter.req.datalen;
560 		ops->oobretlen += iter.req.ooblen;
561 	}
562 
563 	mutex_unlock(&spinand->lock);
564 
565 	return ret;
566 }
567 
568 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
569 {
570 	struct spinand_device *spinand = nand_to_spinand(nand);
571 	struct nand_page_io_req req = {
572 		.pos = *pos,
573 		.ooblen = 2,
574 		.ooboffs = 0,
575 		.oobbuf.in = spinand->oobbuf,
576 		.mode = MTD_OPS_RAW,
577 	};
578 
579 	memset(spinand->oobbuf, 0, 2);
580 	spinand_select_target(spinand, pos->target);
581 	spinand_read_page(spinand, &req, false);
582 	if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
583 		return true;
584 
585 	return false;
586 }
587 
588 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
589 {
590 	struct nand_device *nand = mtd_to_nanddev(mtd);
591 	struct spinand_device *spinand = nand_to_spinand(nand);
592 	struct nand_pos pos;
593 	int ret;
594 
595 	nanddev_offs_to_pos(nand, offs, &pos);
596 	mutex_lock(&spinand->lock);
597 	ret = nanddev_isbad(nand, &pos);
598 	mutex_unlock(&spinand->lock);
599 
600 	return ret;
601 }
602 
603 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
604 {
605 	struct spinand_device *spinand = nand_to_spinand(nand);
606 	struct nand_page_io_req req = {
607 		.pos = *pos,
608 		.ooboffs = 0,
609 		.ooblen = 2,
610 		.oobbuf.out = spinand->oobbuf,
611 	};
612 	int ret;
613 
614 	/* Erase block before marking it bad. */
615 	ret = spinand_select_target(spinand, pos->target);
616 	if (ret)
617 		return ret;
618 
619 	ret = spinand_write_enable_op(spinand);
620 	if (ret)
621 		return ret;
622 
623 	spinand_erase_op(spinand, pos);
624 
625 	memset(spinand->oobbuf, 0, 2);
626 	return spinand_write_page(spinand, &req);
627 }
628 
629 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
630 {
631 	struct nand_device *nand = mtd_to_nanddev(mtd);
632 	struct spinand_device *spinand = nand_to_spinand(nand);
633 	struct nand_pos pos;
634 	int ret;
635 
636 	nanddev_offs_to_pos(nand, offs, &pos);
637 	mutex_lock(&spinand->lock);
638 	ret = nanddev_markbad(nand, &pos);
639 	mutex_unlock(&spinand->lock);
640 
641 	return ret;
642 }
643 
644 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
645 {
646 	struct spinand_device *spinand = nand_to_spinand(nand);
647 	u8 status;
648 	int ret;
649 
650 	ret = spinand_select_target(spinand, pos->target);
651 	if (ret)
652 		return ret;
653 
654 	ret = spinand_write_enable_op(spinand);
655 	if (ret)
656 		return ret;
657 
658 	ret = spinand_erase_op(spinand, pos);
659 	if (ret)
660 		return ret;
661 
662 	ret = spinand_wait(spinand, &status);
663 	if (!ret && (status & STATUS_ERASE_FAILED))
664 		ret = -EIO;
665 
666 	return ret;
667 }
668 
669 static int spinand_mtd_erase(struct mtd_info *mtd,
670 			     struct erase_info *einfo)
671 {
672 	struct spinand_device *spinand = mtd_to_spinand(mtd);
673 	int ret;
674 
675 	mutex_lock(&spinand->lock);
676 	ret = nanddev_mtd_erase(mtd, einfo);
677 	mutex_unlock(&spinand->lock);
678 
679 	return ret;
680 }
681 
682 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
683 {
684 	struct spinand_device *spinand = mtd_to_spinand(mtd);
685 	struct nand_device *nand = mtd_to_nanddev(mtd);
686 	struct nand_pos pos;
687 	int ret;
688 
689 	nanddev_offs_to_pos(nand, offs, &pos);
690 	mutex_lock(&spinand->lock);
691 	ret = nanddev_isreserved(nand, &pos);
692 	mutex_unlock(&spinand->lock);
693 
694 	return ret;
695 }
696 
697 static int spinand_create_dirmap(struct spinand_device *spinand,
698 				 unsigned int plane)
699 {
700 	struct nand_device *nand = spinand_to_nand(spinand);
701 	struct spi_mem_dirmap_info info = {
702 		.length = nanddev_page_size(nand) +
703 			  nanddev_per_page_oobsize(nand),
704 	};
705 	struct spi_mem_dirmap_desc *desc;
706 
707 	/* The plane number is passed in MSB just above the column address */
708 	info.offset = plane << fls(nand->memorg.pagesize);
709 
710 	info.op_tmpl = *spinand->op_templates.update_cache;
711 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
712 					  spinand->spimem, &info);
713 	if (IS_ERR(desc))
714 		return PTR_ERR(desc);
715 
716 	spinand->dirmaps[plane].wdesc = desc;
717 
718 	info.op_tmpl = *spinand->op_templates.read_cache;
719 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
720 					  spinand->spimem, &info);
721 	if (IS_ERR(desc))
722 		return PTR_ERR(desc);
723 
724 	spinand->dirmaps[plane].rdesc = desc;
725 
726 	return 0;
727 }
728 
729 static int spinand_create_dirmaps(struct spinand_device *spinand)
730 {
731 	struct nand_device *nand = spinand_to_nand(spinand);
732 	int i, ret;
733 
734 	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
735 					sizeof(*spinand->dirmaps) *
736 					nand->memorg.planes_per_lun,
737 					GFP_KERNEL);
738 	if (!spinand->dirmaps)
739 		return -ENOMEM;
740 
741 	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
742 		ret = spinand_create_dirmap(spinand, i);
743 		if (ret)
744 			return ret;
745 	}
746 
747 	return 0;
748 }
749 
750 static const struct nand_ops spinand_ops = {
751 	.erase = spinand_erase,
752 	.markbad = spinand_markbad,
753 	.isbad = spinand_isbad,
754 };
755 
756 static const struct spinand_manufacturer *spinand_manufacturers[] = {
757 	&gigadevice_spinand_manufacturer,
758 	&macronix_spinand_manufacturer,
759 	&micron_spinand_manufacturer,
760 	&paragon_spinand_manufacturer,
761 	&toshiba_spinand_manufacturer,
762 	&winbond_spinand_manufacturer,
763 };
764 
765 static int spinand_manufacturer_detect(struct spinand_device *spinand)
766 {
767 	unsigned int i;
768 	int ret;
769 
770 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
771 		ret = spinand_manufacturers[i]->ops->detect(spinand);
772 		if (ret > 0) {
773 			spinand->manufacturer = spinand_manufacturers[i];
774 			return 0;
775 		} else if (ret < 0) {
776 			return ret;
777 		}
778 	}
779 
780 	return -ENOTSUPP;
781 }
782 
783 static int spinand_manufacturer_init(struct spinand_device *spinand)
784 {
785 	if (spinand->manufacturer->ops->init)
786 		return spinand->manufacturer->ops->init(spinand);
787 
788 	return 0;
789 }
790 
791 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
792 {
793 	/* Release manufacturer private data */
794 	if (spinand->manufacturer->ops->cleanup)
795 		return spinand->manufacturer->ops->cleanup(spinand);
796 }
797 
798 static const struct spi_mem_op *
799 spinand_select_op_variant(struct spinand_device *spinand,
800 			  const struct spinand_op_variants *variants)
801 {
802 	struct nand_device *nand = spinand_to_nand(spinand);
803 	unsigned int i;
804 
805 	for (i = 0; i < variants->nops; i++) {
806 		struct spi_mem_op op = variants->ops[i];
807 		unsigned int nbytes;
808 		int ret;
809 
810 		nbytes = nanddev_per_page_oobsize(nand) +
811 			 nanddev_page_size(nand);
812 
813 		while (nbytes) {
814 			op.data.nbytes = nbytes;
815 			ret = spi_mem_adjust_op_size(spinand->spimem, &op);
816 			if (ret)
817 				break;
818 
819 			if (!spi_mem_supports_op(spinand->spimem, &op))
820 				break;
821 
822 			nbytes -= op.data.nbytes;
823 		}
824 
825 		if (!nbytes)
826 			return &variants->ops[i];
827 	}
828 
829 	return NULL;
830 }
831 
832 /**
833  * spinand_match_and_init() - Try to find a match between a device ID and an
834  *			      entry in a spinand_info table
835  * @spinand: SPI NAND object
836  * @table: SPI NAND device description table
837  * @table_size: size of the device description table
838  *
839  * Should be used by SPI NAND manufacturer drivers when they want to find a
840  * match between a device ID retrieved through the READ_ID command and an
841  * entry in the SPI NAND description table. If a match is found, the spinand
842  * object will be initialized with information provided by the matching
843  * spinand_info entry.
844  *
845  * Return: 0 on success, a negative error code otherwise.
846  */
847 int spinand_match_and_init(struct spinand_device *spinand,
848 			   const struct spinand_info *table,
849 			   unsigned int table_size, u16 devid)
850 {
851 	struct nand_device *nand = spinand_to_nand(spinand);
852 	unsigned int i;
853 
854 	for (i = 0; i < table_size; i++) {
855 		const struct spinand_info *info = &table[i];
856 		const struct spi_mem_op *op;
857 
858 		if (devid != info->devid)
859 			continue;
860 
861 		nand->memorg = table[i].memorg;
862 		nand->eccreq = table[i].eccreq;
863 		spinand->eccinfo = table[i].eccinfo;
864 		spinand->flags = table[i].flags;
865 		spinand->select_target = table[i].select_target;
866 
867 		op = spinand_select_op_variant(spinand,
868 					       info->op_variants.read_cache);
869 		if (!op)
870 			return -ENOTSUPP;
871 
872 		spinand->op_templates.read_cache = op;
873 
874 		op = spinand_select_op_variant(spinand,
875 					       info->op_variants.write_cache);
876 		if (!op)
877 			return -ENOTSUPP;
878 
879 		spinand->op_templates.write_cache = op;
880 
881 		op = spinand_select_op_variant(spinand,
882 					       info->op_variants.update_cache);
883 		spinand->op_templates.update_cache = op;
884 
885 		return 0;
886 	}
887 
888 	return -ENOTSUPP;
889 }
890 
891 static int spinand_detect(struct spinand_device *spinand)
892 {
893 	struct device *dev = &spinand->spimem->spi->dev;
894 	struct nand_device *nand = spinand_to_nand(spinand);
895 	int ret;
896 
897 	ret = spinand_reset_op(spinand);
898 	if (ret)
899 		return ret;
900 
901 	ret = spinand_read_id_op(spinand, spinand->id.data);
902 	if (ret)
903 		return ret;
904 
905 	spinand->id.len = SPINAND_MAX_ID_LEN;
906 
907 	ret = spinand_manufacturer_detect(spinand);
908 	if (ret) {
909 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
910 			spinand->id.data);
911 		return ret;
912 	}
913 
914 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
915 		dev_err(dev,
916 			"SPI NANDs with more than one die must implement ->select_target()\n");
917 		return -EINVAL;
918 	}
919 
920 	dev_info(&spinand->spimem->spi->dev,
921 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
922 	dev_info(&spinand->spimem->spi->dev,
923 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
924 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
925 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
926 
927 	return 0;
928 }
929 
930 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
931 				       struct mtd_oob_region *region)
932 {
933 	return -ERANGE;
934 }
935 
936 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
937 					struct mtd_oob_region *region)
938 {
939 	if (section)
940 		return -ERANGE;
941 
942 	/* Reserve 2 bytes for the BBM. */
943 	region->offset = 2;
944 	region->length = 62;
945 
946 	return 0;
947 }
948 
949 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
950 	.ecc = spinand_noecc_ooblayout_ecc,
951 	.free = spinand_noecc_ooblayout_free,
952 };
953 
954 static int spinand_init(struct spinand_device *spinand)
955 {
956 	struct device *dev = &spinand->spimem->spi->dev;
957 	struct mtd_info *mtd = spinand_to_mtd(spinand);
958 	struct nand_device *nand = mtd_to_nanddev(mtd);
959 	int ret, i;
960 
961 	/*
962 	 * We need a scratch buffer because the spi_mem interface requires that
963 	 * buf passed in spi_mem_op->data.buf be DMA-able.
964 	 */
965 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
966 	if (!spinand->scratchbuf)
967 		return -ENOMEM;
968 
969 	ret = spinand_detect(spinand);
970 	if (ret)
971 		goto err_free_bufs;
972 
973 	/*
974 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
975 	 * may use this buffer for DMA access.
976 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
977 	 */
978 	spinand->databuf = kzalloc(nanddev_page_size(nand) +
979 			       nanddev_per_page_oobsize(nand),
980 			       GFP_KERNEL);
981 	if (!spinand->databuf) {
982 		ret = -ENOMEM;
983 		goto err_free_bufs;
984 	}
985 
986 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
987 
988 	ret = spinand_init_cfg_cache(spinand);
989 	if (ret)
990 		goto err_free_bufs;
991 
992 	ret = spinand_init_quad_enable(spinand);
993 	if (ret)
994 		goto err_free_bufs;
995 
996 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
997 	if (ret)
998 		goto err_free_bufs;
999 
1000 	ret = spinand_manufacturer_init(spinand);
1001 	if (ret) {
1002 		dev_err(dev,
1003 			"Failed to initialize the SPI NAND chip (err = %d)\n",
1004 			ret);
1005 		goto err_free_bufs;
1006 	}
1007 
1008 	ret = spinand_create_dirmaps(spinand);
1009 	if (ret) {
1010 		dev_err(dev,
1011 			"Failed to create direct mappings for read/write operations (err = %d)\n",
1012 			ret);
1013 		goto err_manuf_cleanup;
1014 	}
1015 
1016 	/* After power up, all blocks are locked, so unlock them here. */
1017 	for (i = 0; i < nand->memorg.ntargets; i++) {
1018 		ret = spinand_select_target(spinand, i);
1019 		if (ret)
1020 			goto err_manuf_cleanup;
1021 
1022 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1023 		if (ret)
1024 			goto err_manuf_cleanup;
1025 	}
1026 
1027 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1028 	if (ret)
1029 		goto err_manuf_cleanup;
1030 
1031 	/*
1032 	 * Right now, we don't support ECC, so let the whole oob
1033 	 * area is available for user.
1034 	 */
1035 	mtd->_read_oob = spinand_mtd_read;
1036 	mtd->_write_oob = spinand_mtd_write;
1037 	mtd->_block_isbad = spinand_mtd_block_isbad;
1038 	mtd->_block_markbad = spinand_mtd_block_markbad;
1039 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1040 	mtd->_erase = spinand_mtd_erase;
1041 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1042 
1043 	if (spinand->eccinfo.ooblayout)
1044 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1045 	else
1046 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1047 
1048 	ret = mtd_ooblayout_count_freebytes(mtd);
1049 	if (ret < 0)
1050 		goto err_cleanup_nanddev;
1051 
1052 	mtd->oobavail = ret;
1053 
1054 	return 0;
1055 
1056 err_cleanup_nanddev:
1057 	nanddev_cleanup(nand);
1058 
1059 err_manuf_cleanup:
1060 	spinand_manufacturer_cleanup(spinand);
1061 
1062 err_free_bufs:
1063 	kfree(spinand->databuf);
1064 	kfree(spinand->scratchbuf);
1065 	return ret;
1066 }
1067 
1068 static void spinand_cleanup(struct spinand_device *spinand)
1069 {
1070 	struct nand_device *nand = spinand_to_nand(spinand);
1071 
1072 	nanddev_cleanup(nand);
1073 	spinand_manufacturer_cleanup(spinand);
1074 	kfree(spinand->databuf);
1075 	kfree(spinand->scratchbuf);
1076 }
1077 
1078 static int spinand_probe(struct spi_mem *mem)
1079 {
1080 	struct spinand_device *spinand;
1081 	struct mtd_info *mtd;
1082 	int ret;
1083 
1084 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1085 			       GFP_KERNEL);
1086 	if (!spinand)
1087 		return -ENOMEM;
1088 
1089 	spinand->spimem = mem;
1090 	spi_mem_set_drvdata(mem, spinand);
1091 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1092 	mutex_init(&spinand->lock);
1093 	mtd = spinand_to_mtd(spinand);
1094 	mtd->dev.parent = &mem->spi->dev;
1095 
1096 	ret = spinand_init(spinand);
1097 	if (ret)
1098 		return ret;
1099 
1100 	ret = mtd_device_register(mtd, NULL, 0);
1101 	if (ret)
1102 		goto err_spinand_cleanup;
1103 
1104 	return 0;
1105 
1106 err_spinand_cleanup:
1107 	spinand_cleanup(spinand);
1108 
1109 	return ret;
1110 }
1111 
1112 static int spinand_remove(struct spi_mem *mem)
1113 {
1114 	struct spinand_device *spinand;
1115 	struct mtd_info *mtd;
1116 	int ret;
1117 
1118 	spinand = spi_mem_get_drvdata(mem);
1119 	mtd = spinand_to_mtd(spinand);
1120 
1121 	ret = mtd_device_unregister(mtd);
1122 	if (ret)
1123 		return ret;
1124 
1125 	spinand_cleanup(spinand);
1126 
1127 	return 0;
1128 }
1129 
1130 static const struct spi_device_id spinand_ids[] = {
1131 	{ .name = "spi-nand" },
1132 	{ /* sentinel */ },
1133 };
1134 
1135 #ifdef CONFIG_OF
1136 static const struct of_device_id spinand_of_ids[] = {
1137 	{ .compatible = "spi-nand" },
1138 	{ /* sentinel */ },
1139 };
1140 #endif
1141 
1142 static struct spi_mem_driver spinand_drv = {
1143 	.spidrv = {
1144 		.id_table = spinand_ids,
1145 		.driver = {
1146 			.name = "spi-nand",
1147 			.of_match_table = of_match_ptr(spinand_of_ids),
1148 		},
1149 	},
1150 	.probe = spinand_probe,
1151 	.remove = spinand_remove,
1152 };
1153 module_spi_mem_driver(spinand_drv);
1154 
1155 MODULE_DESCRIPTION("SPI NAND framework");
1156 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1157 MODULE_LICENSE("GPL v2");
1158