xref: /linux/drivers/mtd/nand/spi/core.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 
23 static struct spi_mem_op
spinand_fill_reset_op(struct spinand_device * spinand)24 spinand_fill_reset_op(struct spinand_device *spinand)
25 {
26 	return spinand->op_templates->reset;
27 }
28 
29 static struct spi_mem_op
spinand_fill_readid_op(struct spinand_device * spinand,u8 naddr,u8 ndummy,void * buf,unsigned int len)30 spinand_fill_readid_op(struct spinand_device *spinand,
31 		       u8 naddr, u8 ndummy, void *buf, unsigned int len)
32 {
33 	struct spi_mem_op op = spinand->op_templates->readid;
34 
35 	op.addr.nbytes = naddr;
36 	op.dummy.nbytes = ndummy;
37 	op.data.buf.in = buf;
38 	op.data.nbytes = len;
39 
40 	return op;
41 }
42 
43 struct spi_mem_op
spinand_fill_wr_en_op(struct spinand_device * spinand)44 spinand_fill_wr_en_op(struct spinand_device *spinand)
45 {
46 	return spinand->op_templates->wr_en;
47 }
48 
49 static __maybe_unused struct spi_mem_op
spinand_fill_wr_dis_op(struct spinand_device * spinand)50 spinand_fill_wr_dis_op(struct spinand_device *spinand)
51 {
52 	return spinand->op_templates->wr_dis;
53 }
54 
55 struct spi_mem_op
spinand_fill_set_feature_op(struct spinand_device * spinand,u64 reg,const void * valptr)56 spinand_fill_set_feature_op(struct spinand_device *spinand, u64 reg, const void *valptr)
57 {
58 	struct spi_mem_op op = spinand->op_templates->set_feature;
59 
60 	if (op.cmd.dtr && op.cmd.buswidth == 8)
61 		reg |= reg << 8;
62 
63 	op.addr.val = reg;
64 	op.data.buf.out = valptr;
65 
66 	return op;
67 }
68 
69 struct spi_mem_op
spinand_fill_get_feature_op(struct spinand_device * spinand,u64 reg,void * valptr)70 spinand_fill_get_feature_op(struct spinand_device *spinand, u64 reg, void *valptr)
71 {
72 	struct spi_mem_op op = spinand->op_templates->get_feature;
73 
74 	if (op.cmd.dtr && op.cmd.buswidth == 8)
75 		reg |= reg << 8;
76 
77 	op.addr.val = reg;
78 	op.data.buf.in = valptr;
79 
80 	return op;
81 }
82 
83 static struct spi_mem_op
spinand_fill_blk_erase_op(struct spinand_device * spinand,u64 addr)84 spinand_fill_blk_erase_op(struct spinand_device *spinand, u64 addr)
85 {
86 	struct spi_mem_op op = spinand->op_templates->blk_erase;
87 
88 	op.addr.val = addr;
89 
90 	return op;
91 }
92 
93 static struct spi_mem_op
spinand_fill_page_read_op(struct spinand_device * spinand,u64 addr)94 spinand_fill_page_read_op(struct spinand_device *spinand, u64 addr)
95 {
96 	struct spi_mem_op op = spinand->op_templates->page_read;
97 
98 	op.addr.val = addr;
99 
100 	return op;
101 }
102 
103 static struct spi_mem_op
spinand_fill_page_read_packed_op(struct spinand_device * spinand,u64 addr)104 spinand_fill_page_read_packed_op(struct spinand_device *spinand, u64 addr)
105 {
106 	struct spi_mem_op op = spinand->op_templates->page_read;
107 
108 	op.cmd.opcode |= addr >> 16;
109 	op.addr.val = addr & 0xFFFF;
110 
111 	return op;
112 }
113 
114 struct spi_mem_op
spinand_fill_prog_exec_op(struct spinand_device * spinand,u64 addr)115 spinand_fill_prog_exec_op(struct spinand_device *spinand, u64 addr)
116 {
117 	struct spi_mem_op op = spinand->op_templates->prog_exec;
118 
119 	op.addr.val = addr;
120 
121 	return op;
122 }
123 
spinand_read_reg_op(struct spinand_device * spinand,u8 reg,u8 * val)124 int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
125 {
126 	struct spi_mem_op op = SPINAND_OP(spinand, get_feature,
127 					  reg, spinand->scratchbuf);
128 	int ret;
129 
130 	ret = spi_mem_exec_op(spinand->spimem, &op);
131 	if (ret)
132 		return ret;
133 
134 	*val = *spinand->scratchbuf;
135 	return 0;
136 }
137 
spinand_write_reg_op(struct spinand_device * spinand,u8 reg,u8 val)138 int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
139 {
140 	struct spi_mem_op op = SPINAND_OP(spinand, set_feature,
141 					  reg, spinand->scratchbuf);
142 
143 	*spinand->scratchbuf = val;
144 	return spi_mem_exec_op(spinand->spimem, &op);
145 }
146 
spinand_read_status(struct spinand_device * spinand,u8 * status)147 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
148 {
149 	return spinand_read_reg_op(spinand, REG_STATUS, status);
150 }
151 
spinand_get_cfg(struct spinand_device * spinand,u8 * cfg)152 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
153 {
154 	struct nand_device *nand = spinand_to_nand(spinand);
155 
156 	if (WARN_ON(spinand->cur_target < 0 ||
157 		    spinand->cur_target >= nand->memorg.ntargets))
158 		return -EINVAL;
159 
160 	*cfg = spinand->cfg_cache[spinand->cur_target];
161 	return 0;
162 }
163 
spinand_set_cfg(struct spinand_device * spinand,u8 cfg)164 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
165 {
166 	struct nand_device *nand = spinand_to_nand(spinand);
167 	int ret;
168 
169 	if (WARN_ON(spinand->cur_target < 0 ||
170 		    spinand->cur_target >= nand->memorg.ntargets))
171 		return -EINVAL;
172 
173 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
174 		return 0;
175 
176 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
177 	if (ret)
178 		return ret;
179 
180 	spinand->cfg_cache[spinand->cur_target] = cfg;
181 	return 0;
182 }
183 
184 /**
185  * spinand_upd_cfg() - Update the configuration register
186  * @spinand: the spinand device
187  * @mask: the mask encoding the bits to update in the config reg
188  * @val: the new value to apply
189  *
190  * Update the configuration register.
191  *
192  * Return: 0 on success, a negative error code otherwise.
193  */
spinand_upd_cfg(struct spinand_device * spinand,u8 mask,u8 val)194 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
195 {
196 	int ret;
197 	u8 cfg;
198 
199 	ret = spinand_get_cfg(spinand, &cfg);
200 	if (ret)
201 		return ret;
202 
203 	cfg &= ~mask;
204 	cfg |= val;
205 
206 	return spinand_set_cfg(spinand, cfg);
207 }
208 
209 /**
210  * spinand_select_target() - Select a specific NAND target/die
211  * @spinand: the spinand device
212  * @target: the target/die to select
213  *
214  * Select a new target/die. If chip only has one die, this function is a NOOP.
215  *
216  * Return: 0 on success, a negative error code otherwise.
217  */
spinand_select_target(struct spinand_device * spinand,unsigned int target)218 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
219 {
220 	struct nand_device *nand = spinand_to_nand(spinand);
221 	int ret;
222 
223 	if (WARN_ON(target >= nand->memorg.ntargets))
224 		return -EINVAL;
225 
226 	if (spinand->cur_target == target)
227 		return 0;
228 
229 	if (nand->memorg.ntargets == 1) {
230 		spinand->cur_target = target;
231 		return 0;
232 	}
233 
234 	ret = spinand->select_target(spinand, target);
235 	if (ret)
236 		return ret;
237 
238 	spinand->cur_target = target;
239 	return 0;
240 }
241 
spinand_read_cfg(struct spinand_device * spinand)242 static int spinand_read_cfg(struct spinand_device *spinand)
243 {
244 	struct nand_device *nand = spinand_to_nand(spinand);
245 	unsigned int target;
246 	int ret;
247 
248 	for (target = 0; target < nand->memorg.ntargets; target++) {
249 		ret = spinand_select_target(spinand, target);
250 		if (ret)
251 			return ret;
252 
253 		/*
254 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
255 		 * here to bypass the config cache.
256 		 */
257 		ret = spinand_read_reg_op(spinand, REG_CFG,
258 					  &spinand->cfg_cache[target]);
259 		if (ret)
260 			return ret;
261 	}
262 
263 	return 0;
264 }
265 
spinand_init_cfg_cache(struct spinand_device * spinand)266 static int spinand_init_cfg_cache(struct spinand_device *spinand)
267 {
268 	struct nand_device *nand = spinand_to_nand(spinand);
269 	struct device *dev = &spinand->spimem->spi->dev;
270 
271 	spinand->cfg_cache = devm_kcalloc(dev,
272 					  nand->memorg.ntargets,
273 					  sizeof(*spinand->cfg_cache),
274 					  GFP_KERNEL);
275 	if (!spinand->cfg_cache)
276 		return -ENOMEM;
277 
278 	return 0;
279 }
280 
spinand_init_quad_enable(struct spinand_device * spinand,bool enable)281 static int spinand_init_quad_enable(struct spinand_device *spinand,
282 				    bool enable)
283 {
284 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
285 			       enable ? CFG_QUAD_ENABLE : 0);
286 }
287 
spinand_ecc_enable(struct spinand_device * spinand,bool enable)288 static int spinand_ecc_enable(struct spinand_device *spinand,
289 			      bool enable)
290 {
291 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
292 			       enable ? CFG_ECC_ENABLE : 0);
293 }
294 
spinand_cont_read_enable(struct spinand_device * spinand,bool enable)295 static int spinand_cont_read_enable(struct spinand_device *spinand,
296 				    bool enable)
297 {
298 	return spinand->set_cont_read(spinand, enable);
299 }
300 
spinand_check_ecc_status(struct spinand_device * spinand,u8 status)301 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
302 {
303 	struct nand_device *nand = spinand_to_nand(spinand);
304 
305 	if (spinand->eccinfo.get_status)
306 		return spinand->eccinfo.get_status(spinand, status);
307 
308 	switch (status & STATUS_ECC_MASK) {
309 	case STATUS_ECC_NO_BITFLIPS:
310 		return 0;
311 
312 	case STATUS_ECC_HAS_BITFLIPS:
313 		/*
314 		 * We have no way to know exactly how many bitflips have been
315 		 * fixed, so let's return the maximum possible value so that
316 		 * wear-leveling layers move the data immediately.
317 		 */
318 		return nanddev_get_ecc_conf(nand)->strength;
319 
320 	case STATUS_ECC_UNCOR_ERROR:
321 		return -EBADMSG;
322 
323 	default:
324 		break;
325 	}
326 
327 	return -EINVAL;
328 }
329 
spinand_noecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)330 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
331 				       struct mtd_oob_region *region)
332 {
333 	return -ERANGE;
334 }
335 
spinand_noecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)336 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
337 					struct mtd_oob_region *region)
338 {
339 	if (section)
340 		return -ERANGE;
341 
342 	/* Reserve 2 bytes for the BBM. */
343 	region->offset = 2;
344 	region->length = 62;
345 
346 	return 0;
347 }
348 
349 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
350 	.ecc = spinand_noecc_ooblayout_ecc,
351 	.free = spinand_noecc_ooblayout_free,
352 };
353 
spinand_ondie_ecc_init_ctx(struct nand_device * nand)354 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
355 {
356 	struct spinand_device *spinand = nand_to_spinand(nand);
357 	struct mtd_info *mtd = nanddev_to_mtd(nand);
358 	struct spinand_ondie_ecc_conf *engine_conf;
359 
360 	nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
361 	nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
362 	nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
363 
364 	engine_conf = kzalloc_obj(*engine_conf);
365 	if (!engine_conf)
366 		return -ENOMEM;
367 
368 	nand->ecc.ctx.priv = engine_conf;
369 
370 	if (spinand->eccinfo.ooblayout)
371 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
372 	else
373 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
374 
375 	return 0;
376 }
377 
spinand_ondie_ecc_cleanup_ctx(struct nand_device * nand)378 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
379 {
380 	kfree(nand->ecc.ctx.priv);
381 }
382 
spinand_ondie_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)383 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
384 					    struct nand_page_io_req *req)
385 {
386 	struct spinand_device *spinand = nand_to_spinand(nand);
387 	bool enable = (req->mode != MTD_OPS_RAW);
388 
389 	if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
390 		return -EOPNOTSUPP;
391 
392 	memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
393 
394 	/* Only enable or disable the engine */
395 	return spinand_ecc_enable(spinand, enable);
396 }
397 
spinand_ondie_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)398 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
399 					   struct nand_page_io_req *req)
400 {
401 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
402 	struct spinand_device *spinand = nand_to_spinand(nand);
403 	struct mtd_info *mtd = spinand_to_mtd(spinand);
404 	int ret;
405 
406 	if (req->mode == MTD_OPS_RAW)
407 		return 0;
408 
409 	/* Nothing to do when finishing a page write */
410 	if (req->type == NAND_PAGE_WRITE)
411 		return 0;
412 
413 	/* Finish a page read: check the status, report errors/bitflips */
414 	ret = spinand_check_ecc_status(spinand, engine_conf->status);
415 	if (ret == -EBADMSG) {
416 		mtd->ecc_stats.failed++;
417 	} else if (ret > 0) {
418 		unsigned int pages;
419 
420 		/*
421 		 * Continuous reads don't allow us to get the detail,
422 		 * so we may exagerate the actual number of corrected bitflips.
423 		 */
424 		if (!req->continuous)
425 			pages = 1;
426 		else
427 			pages = req->datalen / nanddev_page_size(nand);
428 
429 		mtd->ecc_stats.corrected += ret * pages;
430 	}
431 
432 	return ret;
433 }
434 
435 static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
436 	.init_ctx = spinand_ondie_ecc_init_ctx,
437 	.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
438 	.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
439 	.finish_io_req = spinand_ondie_ecc_finish_io_req,
440 };
441 
442 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
443 	.ops = &spinand_ondie_ecc_engine_ops,
444 };
445 
spinand_ondie_ecc_save_status(struct nand_device * nand,u8 status)446 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
447 {
448 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
449 
450 	if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
451 	    engine_conf)
452 		engine_conf->status = status;
453 }
454 
spinand_write_enable_op(struct spinand_device * spinand)455 int spinand_write_enable_op(struct spinand_device *spinand)
456 {
457 	struct spi_mem_op op = SPINAND_OP(spinand, wr_en);
458 
459 	return spi_mem_exec_op(spinand->spimem, &op);
460 }
461 
spinand_load_page_op(struct spinand_device * spinand,const struct nand_page_io_req * req)462 static int spinand_load_page_op(struct spinand_device *spinand,
463 				const struct nand_page_io_req *req)
464 {
465 	struct nand_device *nand = spinand_to_nand(spinand);
466 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
467 	bool packed = spinand->flags & SPINAND_ODTR_PACKED_PAGE_READ;
468 	struct spi_mem_op op = packed ?
469 		SPINAND_OP(spinand, page_read_packed, row) :
470 		SPINAND_OP(spinand, page_read, row);
471 
472 	return spi_mem_exec_op(spinand->spimem, &op);
473 }
474 
spinand_read_from_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)475 static int spinand_read_from_cache_op(struct spinand_device *spinand,
476 				      const struct nand_page_io_req *req)
477 {
478 	struct nand_device *nand = spinand_to_nand(spinand);
479 	struct mtd_info *mtd = spinand_to_mtd(spinand);
480 	struct spi_mem_dirmap_desc *rdesc;
481 	unsigned int nbytes = 0;
482 	void *buf = NULL;
483 	u16 column = 0;
484 	ssize_t ret;
485 
486 	if (req->datalen) {
487 		buf = spinand->databuf;
488 		if (!req->continuous)
489 			nbytes = nanddev_page_size(nand);
490 		else
491 			nbytes = round_up(req->dataoffs + req->datalen,
492 					  nanddev_page_size(nand));
493 		column = 0;
494 	}
495 
496 	if (req->ooblen) {
497 		nbytes += nanddev_per_page_oobsize(nand);
498 		if (!buf) {
499 			buf = spinand->oobbuf;
500 			column = nanddev_page_size(nand);
501 		}
502 	}
503 
504 	if (req->mode == MTD_OPS_RAW)
505 		rdesc = spinand->dirmaps[req->pos.plane].rdesc;
506 	else
507 		rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
508 
509 	if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
510 		column |= req->pos.plane << fls(nanddev_page_size(nand));
511 
512 	while (nbytes) {
513 		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
514 		if (ret < 0)
515 			return ret;
516 
517 		if (!ret || ret > nbytes)
518 			return -EIO;
519 
520 		nbytes -= ret;
521 		column += ret;
522 		buf += ret;
523 
524 		/*
525 		 * Dirmap accesses are allowed to toggle the CS.
526 		 * Toggling the CS during a continuous read is forbidden.
527 		 */
528 		if (nbytes && req->continuous) {
529 			/*
530 			 * Spi controller with broken support of continuous
531 			 * reading was detected. Disable future use of
532 			 * continuous reading and return -EAGAIN to retry
533 			 * reading within regular mode.
534 			 */
535 			spinand->cont_read_possible = false;
536 			return -EAGAIN;
537 		}
538 	}
539 
540 	if (req->datalen)
541 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
542 		       req->datalen);
543 
544 	if (req->ooblen) {
545 		if (req->mode == MTD_OPS_AUTO_OOB)
546 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
547 						    spinand->oobbuf,
548 						    req->ooboffs,
549 						    req->ooblen);
550 		else
551 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
552 			       req->ooblen);
553 	}
554 
555 	return 0;
556 }
557 
spinand_write_to_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)558 static int spinand_write_to_cache_op(struct spinand_device *spinand,
559 				     const struct nand_page_io_req *req)
560 {
561 	struct nand_device *nand = spinand_to_nand(spinand);
562 	struct mtd_info *mtd = spinand_to_mtd(spinand);
563 	struct spi_mem_dirmap_desc *wdesc;
564 	unsigned int nbytes, column = 0;
565 	void *buf = spinand->databuf;
566 	ssize_t ret;
567 
568 	/*
569 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
570 	 * the cache content to 0xFF (depends on vendor implementation), so we
571 	 * must fill the page cache entirely even if we only want to program
572 	 * the data portion of the page, otherwise we might corrupt the BBM or
573 	 * user data previously programmed in OOB area.
574 	 *
575 	 * Only reset the data buffer manually, the OOB buffer is prepared by
576 	 * ECC engines ->prepare_io_req() callback.
577 	 */
578 	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
579 	memset(spinand->databuf, 0xff, nanddev_page_size(nand));
580 
581 	if (req->datalen)
582 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
583 		       req->datalen);
584 
585 	if (req->ooblen) {
586 		if (req->mode == MTD_OPS_AUTO_OOB)
587 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
588 						    spinand->oobbuf,
589 						    req->ooboffs,
590 						    req->ooblen);
591 		else
592 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
593 			       req->ooblen);
594 	}
595 
596 	if (req->mode == MTD_OPS_RAW)
597 		wdesc = spinand->dirmaps[req->pos.plane].wdesc;
598 	else
599 		wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
600 
601 	if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
602 		column |= req->pos.plane << fls(nanddev_page_size(nand));
603 
604 	while (nbytes) {
605 		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
606 		if (ret < 0)
607 			return ret;
608 
609 		if (!ret || ret > nbytes)
610 			return -EIO;
611 
612 		nbytes -= ret;
613 		column += ret;
614 		buf += ret;
615 	}
616 
617 	return 0;
618 }
619 
spinand_program_op(struct spinand_device * spinand,const struct nand_page_io_req * req)620 static int spinand_program_op(struct spinand_device *spinand,
621 			      const struct nand_page_io_req *req)
622 {
623 	struct nand_device *nand = spinand_to_nand(spinand);
624 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
625 	struct spi_mem_op op = SPINAND_OP(spinand, prog_exec, row);
626 
627 	return spi_mem_exec_op(spinand->spimem, &op);
628 }
629 
spinand_erase_op(struct spinand_device * spinand,const struct nand_pos * pos)630 static int spinand_erase_op(struct spinand_device *spinand,
631 			    const struct nand_pos *pos)
632 {
633 	struct nand_device *nand = spinand_to_nand(spinand);
634 	unsigned int row = nanddev_pos_to_row(nand, pos);
635 	struct spi_mem_op op = SPINAND_OP(spinand, blk_erase, row);
636 
637 	return spi_mem_exec_op(spinand->spimem, &op);
638 }
639 
640 /**
641  * spinand_wait() - Poll memory device status
642  * @spinand: the spinand device
643  * @initial_delay_us: delay in us before starting to poll
644  * @poll_delay_us: time to sleep between reads in us
645  * @s: the pointer to variable to store the value of REG_STATUS
646  *
647  * This function polls a status register (REG_STATUS) and returns when
648  * the STATUS_READY bit is 0 or when the timeout has expired.
649  *
650  * Return: 0 on success, a negative error code otherwise.
651  */
spinand_wait(struct spinand_device * spinand,unsigned long initial_delay_us,unsigned long poll_delay_us,u8 * s)652 int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
653 		 unsigned long poll_delay_us, u8 *s)
654 {
655 	struct spi_mem_op op = SPINAND_OP(spinand, get_feature,
656 					  REG_STATUS, spinand->scratchbuf);
657 	u8 status;
658 	int ret;
659 
660 	ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
661 				  initial_delay_us,
662 				  poll_delay_us,
663 				  SPINAND_WAITRDY_TIMEOUT_MS);
664 	if (ret)
665 		return ret;
666 
667 	status = *spinand->scratchbuf;
668 	if (!(status & STATUS_BUSY))
669 		goto out;
670 
671 	/*
672 	 * Extra read, just in case the STATUS_READY bit has changed
673 	 * since our last check
674 	 */
675 	ret = spinand_read_status(spinand, &status);
676 	if (ret)
677 		return ret;
678 
679 out:
680 	if (s)
681 		*s = status;
682 
683 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
684 }
685 
spinand_read_id_op(struct spinand_device * spinand,u8 naddr,u8 ndummy,u8 * buf)686 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
687 			      u8 ndummy, u8 *buf)
688 {
689 	struct spi_mem_op op = SPINAND_OP(spinand, readid,
690 					  naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
691 	int ret;
692 
693 	ret = spi_mem_exec_op(spinand->spimem, &op);
694 	if (!ret)
695 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
696 
697 	return ret;
698 }
699 
spinand_reset_op(struct spinand_device * spinand)700 static int spinand_reset_op(struct spinand_device *spinand)
701 {
702 	struct spi_mem_op op = SPINAND_OP(spinand, reset);
703 	int ret;
704 
705 	ret = spi_mem_exec_op(spinand->spimem, &op);
706 	if (ret)
707 		return ret;
708 
709 	return spinand_wait(spinand,
710 			    SPINAND_RESET_INITIAL_DELAY_US,
711 			    SPINAND_RESET_POLL_DELAY_US,
712 			    NULL);
713 }
714 
spinand_lock_block(struct spinand_device * spinand,u8 lock)715 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
716 {
717 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
718 }
719 
720 /**
721  * spinand_read_page() - Read a page
722  * @spinand: the spinand device
723  * @req: the I/O request
724  *
725  * Return: 0 or a positive number of bitflips corrected on success.
726  * A negative error code otherwise.
727  */
spinand_read_page(struct spinand_device * spinand,const struct nand_page_io_req * req)728 int spinand_read_page(struct spinand_device *spinand,
729 		      const struct nand_page_io_req *req)
730 {
731 	struct nand_device *nand = spinand_to_nand(spinand);
732 	u8 status;
733 	int ret;
734 
735 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
736 	if (ret)
737 		return ret;
738 
739 	ret = spinand_load_page_op(spinand, req);
740 	if (ret)
741 		return ret;
742 
743 	ret = spinand_wait(spinand,
744 			   SPINAND_READ_INITIAL_DELAY_US,
745 			   SPINAND_READ_POLL_DELAY_US,
746 			   &status);
747 	if (ret < 0)
748 		return ret;
749 
750 	spinand_ondie_ecc_save_status(nand, status);
751 
752 	ret = spinand_read_from_cache_op(spinand, req);
753 	if (ret)
754 		return ret;
755 
756 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
757 }
758 
759 /**
760  * spinand_write_page() - Write a page
761  * @spinand: the spinand device
762  * @req: the I/O request
763  *
764  * Return: 0 or a positive number of bitflips corrected on success.
765  * A negative error code otherwise.
766  */
spinand_write_page(struct spinand_device * spinand,const struct nand_page_io_req * req)767 int spinand_write_page(struct spinand_device *spinand,
768 		       const struct nand_page_io_req *req)
769 {
770 	struct nand_device *nand = spinand_to_nand(spinand);
771 	u8 status;
772 	int ret;
773 
774 	ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
775 	if (ret)
776 		return ret;
777 
778 	ret = spinand_write_enable_op(spinand);
779 	if (ret)
780 		return ret;
781 
782 	ret = spinand_write_to_cache_op(spinand, req);
783 	if (ret)
784 		return ret;
785 
786 	ret = spinand_program_op(spinand, req);
787 	if (ret)
788 		return ret;
789 
790 	ret = spinand_wait(spinand,
791 			   SPINAND_WRITE_INITIAL_DELAY_US,
792 			   SPINAND_WRITE_POLL_DELAY_US,
793 			   &status);
794 	if (ret)
795 		return ret;
796 
797 	if (status & STATUS_PROG_FAILED)
798 		return -EIO;
799 
800 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
801 }
802 
spinand_mtd_regular_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)803 static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
804 					 struct mtd_oob_ops *ops,
805 					 unsigned int *max_bitflips)
806 {
807 	struct spinand_device *spinand = mtd_to_spinand(mtd);
808 	struct nand_device *nand = mtd_to_nanddev(mtd);
809 	struct mtd_ecc_stats old_stats;
810 	struct nand_io_iter iter;
811 	bool disable_ecc = false;
812 	bool ecc_failed = false;
813 	unsigned int retry_mode = 0;
814 	int ret;
815 
816 	old_stats = mtd->ecc_stats;
817 
818 	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
819 		disable_ecc = true;
820 
821 	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
822 		if (disable_ecc)
823 			iter.req.mode = MTD_OPS_RAW;
824 
825 		ret = spinand_select_target(spinand, iter.req.pos.target);
826 		if (ret)
827 			break;
828 
829 read_retry:
830 		ret = spinand_read_page(spinand, &iter.req);
831 		if (ret < 0 && ret != -EBADMSG)
832 			break;
833 
834 		if (ret == -EBADMSG && spinand->set_read_retry) {
835 			if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
836 				ret = spinand->set_read_retry(spinand, retry_mode);
837 				if (ret < 0) {
838 					spinand->set_read_retry(spinand, 0);
839 					return ret;
840 				}
841 
842 				/* Reset ecc_stats; retry */
843 				mtd->ecc_stats = old_stats;
844 				goto read_retry;
845 			} else {
846 				/* No more retry modes; real failure */
847 				ecc_failed = true;
848 			}
849 		} else if (ret == -EBADMSG) {
850 			ecc_failed = true;
851 		} else {
852 			*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
853 		}
854 
855 		ret = 0;
856 		ops->retlen += iter.req.datalen;
857 		ops->oobretlen += iter.req.ooblen;
858 
859 		/* Reset to retry mode 0 */
860 		if (retry_mode) {
861 			retry_mode = 0;
862 			ret = spinand->set_read_retry(spinand, retry_mode);
863 			if (ret < 0)
864 				return ret;
865 		}
866 	}
867 
868 	if (ecc_failed && !ret)
869 		ret = -EBADMSG;
870 
871 	return ret;
872 }
873 
spinand_mtd_continuous_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)874 static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
875 					    struct mtd_oob_ops *ops,
876 					    unsigned int *max_bitflips)
877 {
878 	struct spinand_device *spinand = mtd_to_spinand(mtd);
879 	struct nand_device *nand = mtd_to_nanddev(mtd);
880 	struct nand_io_iter iter;
881 	u8 status;
882 	int ret;
883 
884 	ret = spinand_cont_read_enable(spinand, true);
885 	if (ret)
886 		return ret;
887 
888 	/*
889 	 * The cache is divided into two halves. While one half of the cache has
890 	 * the requested data, the other half is loaded with the next chunk of data.
891 	 * Therefore, the host can read out the data continuously from page to page.
892 	 * Each data read must be a multiple of 4-bytes and full pages should be read;
893 	 * otherwise, the data output might get out of sequence from one read command
894 	 * to another.
895 	 */
896 	nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
897 		ret = spinand_select_target(spinand, iter.req.pos.target);
898 		if (ret)
899 			goto end_cont_read;
900 
901 		ret = nand_ecc_prepare_io_req(nand, &iter.req);
902 		if (ret)
903 			goto end_cont_read;
904 
905 		ret = spinand_load_page_op(spinand, &iter.req);
906 		if (ret)
907 			goto end_cont_read;
908 
909 		ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
910 				   SPINAND_READ_POLL_DELAY_US, NULL);
911 		if (ret < 0)
912 			goto end_cont_read;
913 
914 		ret = spinand_read_from_cache_op(spinand, &iter.req);
915 		if (ret)
916 			goto end_cont_read;
917 
918 		ops->retlen += iter.req.datalen;
919 
920 		ret = spinand_read_status(spinand, &status);
921 		if (ret)
922 			goto end_cont_read;
923 
924 		spinand_ondie_ecc_save_status(nand, status);
925 
926 		ret = nand_ecc_finish_io_req(nand, &iter.req);
927 		if (ret < 0)
928 			goto end_cont_read;
929 
930 		*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
931 		ret = 0;
932 	}
933 
934 end_cont_read:
935 	/*
936 	 * Once all the data has been read out, the host can either pull CS#
937 	 * high and wait for tRST or manually clear the bit in the configuration
938 	 * register to terminate the continuous read operation. We have no
939 	 * guarantee the SPI controller drivers will effectively deassert the CS
940 	 * when we expect them to, so take the register based approach.
941 	 */
942 	spinand_cont_read_enable(spinand, false);
943 
944 	return ret;
945 }
946 
spinand_cont_read_init(struct spinand_device * spinand)947 static void spinand_cont_read_init(struct spinand_device *spinand)
948 {
949 	struct nand_device *nand = spinand_to_nand(spinand);
950 	enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
951 
952 	/* OOBs cannot be retrieved so external/on-host ECC engine won't work */
953 	if (spinand->set_cont_read &&
954 	    (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
955 	     engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
956 		spinand->cont_read_possible = true;
957 
958 		/*
959 		 * Ensure continuous read is disabled on probe.
960 		 * Some devices retain this state across soft reset,
961 		 * which leaves the OOB area inaccessible and results
962 		 * in false positive returns from spinand_isbad().
963 		 */
964 		spinand_cont_read_enable(spinand, false);
965 	}
966 }
967 
spinand_use_cont_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)968 static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
969 				  struct mtd_oob_ops *ops)
970 {
971 	struct nand_device *nand = mtd_to_nanddev(mtd);
972 	struct spinand_device *spinand = nand_to_spinand(nand);
973 	struct nand_pos start_pos, end_pos;
974 
975 	if (!spinand->cont_read_possible)
976 		return false;
977 
978 	/* OOBs won't be retrieved */
979 	if (ops->ooblen || ops->oobbuf)
980 		return false;
981 
982 	nanddev_offs_to_pos(nand, from, &start_pos);
983 	nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
984 
985 	/*
986 	 * Continuous reads never cross LUN boundaries. Some devices don't
987 	 * support crossing planes boundaries. Some devices don't even support
988 	 * crossing blocks boundaries. The common case being to read through UBI,
989 	 * we will very rarely read two consequent blocks or more, so it is safer
990 	 * and easier (can be improved) to only enable continuous reads when
991 	 * reading within the same erase block.
992 	 */
993 	if (start_pos.target != end_pos.target ||
994 	    start_pos.plane != end_pos.plane ||
995 	    start_pos.eraseblock != end_pos.eraseblock)
996 		return false;
997 
998 	return start_pos.page < end_pos.page;
999 }
1000 
spinand_mtd_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1001 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
1002 			    struct mtd_oob_ops *ops)
1003 {
1004 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1005 	struct mtd_ecc_stats old_stats;
1006 	unsigned int max_bitflips = 0;
1007 	int ret;
1008 
1009 	mutex_lock(&spinand->lock);
1010 
1011 	old_stats = mtd->ecc_stats;
1012 
1013 	if (spinand_use_cont_read(mtd, from, ops)) {
1014 		ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
1015 		if (ret == -EAGAIN && !spinand->cont_read_possible) {
1016 			/*
1017 			 * Spi controller with broken support of continuous
1018 			 * reading was detected (see spinand_read_from_cache_op()),
1019 			 * repeat reading in regular mode.
1020 			 */
1021 			ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
1022 		}
1023 	} else {
1024 		ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
1025 	}
1026 
1027 	if (ops->stats) {
1028 		ops->stats->uncorrectable_errors +=
1029 			mtd->ecc_stats.failed - old_stats.failed;
1030 		ops->stats->corrected_bitflips +=
1031 			mtd->ecc_stats.corrected - old_stats.corrected;
1032 	}
1033 
1034 	mutex_unlock(&spinand->lock);
1035 
1036 	return ret ? ret : max_bitflips;
1037 }
1038 
spinand_mtd_write(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1039 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
1040 			     struct mtd_oob_ops *ops)
1041 {
1042 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1043 	struct nand_device *nand = mtd_to_nanddev(mtd);
1044 	struct nand_io_iter iter;
1045 	bool disable_ecc = false;
1046 	int ret = 0;
1047 
1048 	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
1049 		disable_ecc = true;
1050 
1051 	mutex_lock(&spinand->lock);
1052 
1053 	nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
1054 		if (disable_ecc)
1055 			iter.req.mode = MTD_OPS_RAW;
1056 
1057 		ret = spinand_select_target(spinand, iter.req.pos.target);
1058 		if (ret)
1059 			break;
1060 
1061 		ret = spinand_write_page(spinand, &iter.req);
1062 		if (ret)
1063 			break;
1064 
1065 		ops->retlen += iter.req.datalen;
1066 		ops->oobretlen += iter.req.ooblen;
1067 	}
1068 
1069 	mutex_unlock(&spinand->lock);
1070 
1071 	return ret;
1072 }
1073 
spinand_isbad(struct nand_device * nand,const struct nand_pos * pos)1074 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
1075 {
1076 	struct spinand_device *spinand = nand_to_spinand(nand);
1077 	u8 marker[2] = { };
1078 	struct nand_page_io_req req = {
1079 		.pos = *pos,
1080 		.ooblen = sizeof(marker),
1081 		.ooboffs = 0,
1082 		.oobbuf.in = marker,
1083 		.mode = MTD_OPS_RAW,
1084 	};
1085 	int ret;
1086 
1087 	spinand_select_target(spinand, pos->target);
1088 
1089 	ret = spinand_read_page(spinand, &req);
1090 	if (ret == -EOPNOTSUPP) {
1091 		/* Retry with ECC in case raw access is not supported */
1092 		req.mode = MTD_OPS_PLACE_OOB;
1093 		spinand_read_page(spinand, &req);
1094 	}
1095 
1096 	if (marker[0] != 0xff || marker[1] != 0xff)
1097 		return true;
1098 
1099 	return false;
1100 }
1101 
spinand_mtd_block_isbad(struct mtd_info * mtd,loff_t offs)1102 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
1103 {
1104 	struct nand_device *nand = mtd_to_nanddev(mtd);
1105 	struct spinand_device *spinand = nand_to_spinand(nand);
1106 	struct nand_pos pos;
1107 	int ret;
1108 
1109 	nanddev_offs_to_pos(nand, offs, &pos);
1110 	mutex_lock(&spinand->lock);
1111 	ret = nanddev_isbad(nand, &pos);
1112 	mutex_unlock(&spinand->lock);
1113 
1114 	return ret;
1115 }
1116 
spinand_markbad(struct nand_device * nand,const struct nand_pos * pos)1117 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
1118 {
1119 	struct spinand_device *spinand = nand_to_spinand(nand);
1120 	u8 marker[2] = { };
1121 	struct nand_page_io_req req = {
1122 		.pos = *pos,
1123 		.ooboffs = 0,
1124 		.ooblen = sizeof(marker),
1125 		.oobbuf.out = marker,
1126 		.mode = MTD_OPS_RAW,
1127 	};
1128 	int ret;
1129 
1130 	ret = spinand_select_target(spinand, pos->target);
1131 	if (ret)
1132 		return ret;
1133 
1134 	ret = spinand_write_page(spinand, &req);
1135 	if (ret == -EOPNOTSUPP) {
1136 		/* Retry with ECC in case raw access is not supported */
1137 		req.mode = MTD_OPS_PLACE_OOB;
1138 		ret = spinand_write_page(spinand, &req);
1139 	}
1140 
1141 	return ret;
1142 }
1143 
spinand_mtd_block_markbad(struct mtd_info * mtd,loff_t offs)1144 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
1145 {
1146 	struct nand_device *nand = mtd_to_nanddev(mtd);
1147 	struct spinand_device *spinand = nand_to_spinand(nand);
1148 	struct nand_pos pos;
1149 	int ret;
1150 
1151 	nanddev_offs_to_pos(nand, offs, &pos);
1152 	mutex_lock(&spinand->lock);
1153 	ret = nanddev_markbad(nand, &pos);
1154 	mutex_unlock(&spinand->lock);
1155 
1156 	return ret;
1157 }
1158 
spinand_erase(struct nand_device * nand,const struct nand_pos * pos)1159 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
1160 {
1161 	struct spinand_device *spinand = nand_to_spinand(nand);
1162 	u8 status;
1163 	int ret;
1164 
1165 	ret = spinand_select_target(spinand, pos->target);
1166 	if (ret)
1167 		return ret;
1168 
1169 	ret = spinand_write_enable_op(spinand);
1170 	if (ret)
1171 		return ret;
1172 
1173 	ret = spinand_erase_op(spinand, pos);
1174 	if (ret)
1175 		return ret;
1176 
1177 	ret = spinand_wait(spinand,
1178 			   SPINAND_ERASE_INITIAL_DELAY_US,
1179 			   SPINAND_ERASE_POLL_DELAY_US,
1180 			   &status);
1181 
1182 	if (!ret && (status & STATUS_ERASE_FAILED))
1183 		ret = -EIO;
1184 
1185 	return ret;
1186 }
1187 
spinand_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)1188 static int spinand_mtd_erase(struct mtd_info *mtd,
1189 			     struct erase_info *einfo)
1190 {
1191 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1192 	int ret;
1193 
1194 	mutex_lock(&spinand->lock);
1195 	ret = nanddev_mtd_erase(mtd, einfo);
1196 	mutex_unlock(&spinand->lock);
1197 
1198 	return ret;
1199 }
1200 
spinand_mtd_block_isreserved(struct mtd_info * mtd,loff_t offs)1201 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
1202 {
1203 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1204 	struct nand_device *nand = mtd_to_nanddev(mtd);
1205 	struct nand_pos pos;
1206 	int ret;
1207 
1208 	nanddev_offs_to_pos(nand, offs, &pos);
1209 	mutex_lock(&spinand->lock);
1210 	ret = nanddev_isreserved(nand, &pos);
1211 	mutex_unlock(&spinand->lock);
1212 
1213 	return ret;
1214 }
1215 
spinand_create_rdesc(struct spinand_device * spinand,struct spi_mem_dirmap_info * info)1216 static struct spi_mem_dirmap_desc *spinand_create_rdesc(
1217 					struct spinand_device *spinand,
1218 					struct spi_mem_dirmap_info *info)
1219 {
1220 	struct nand_device *nand = spinand_to_nand(spinand);
1221 	struct spi_mem_dirmap_desc *desc = NULL;
1222 
1223 	if (spinand->cont_read_possible) {
1224 		/*
1225 		 * spi controller may return an error if info->length is
1226 		 * too large
1227 		 */
1228 		info->length = nanddev_eraseblock_size(nand);
1229 		desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1230 						  spinand->spimem, info);
1231 	}
1232 
1233 	if (IS_ERR_OR_NULL(desc)) {
1234 		/*
1235 		 * continuous reading is not supported by flash or
1236 		 * its spi controller, use regular reading
1237 		 */
1238 		spinand->cont_read_possible = false;
1239 
1240 		info->length = nanddev_page_size(nand) +
1241 			       nanddev_per_page_oobsize(nand);
1242 		desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1243 						  spinand->spimem, info);
1244 	}
1245 
1246 	return desc;
1247 }
1248 
spinand_create_dirmap(struct spinand_device * spinand,unsigned int plane)1249 static int spinand_create_dirmap(struct spinand_device *spinand,
1250 				 unsigned int plane)
1251 {
1252 	struct nand_device *nand = spinand_to_nand(spinand);
1253 	struct spi_mem_dirmap_info info = { 0 };
1254 	struct spi_mem_dirmap_desc *desc;
1255 
1256 	/* The plane number is passed in MSB just above the column address */
1257 	info.offset = plane << fls(nand->memorg.pagesize);
1258 
1259 	info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
1260 	info.op_tmpl = *spinand->op_templates->update_cache;
1261 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1262 					  spinand->spimem, &info);
1263 	if (IS_ERR(desc))
1264 		return PTR_ERR(desc);
1265 
1266 	spinand->dirmaps[plane].wdesc = desc;
1267 
1268 	info.op_tmpl = *spinand->op_templates->read_cache;
1269 	desc = spinand_create_rdesc(spinand, &info);
1270 	if (IS_ERR(desc))
1271 		return PTR_ERR(desc);
1272 
1273 	spinand->dirmaps[plane].rdesc = desc;
1274 
1275 	if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
1276 		spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
1277 		spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
1278 
1279 		return 0;
1280 	}
1281 
1282 	info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
1283 	info.op_tmpl = *spinand->op_templates->update_cache;
1284 	info.op_tmpl.data.ecc = true;
1285 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1286 					  spinand->spimem, &info);
1287 	if (IS_ERR(desc))
1288 		return PTR_ERR(desc);
1289 
1290 	spinand->dirmaps[plane].wdesc_ecc = desc;
1291 
1292 	info.op_tmpl = *spinand->op_templates->read_cache;
1293 	info.op_tmpl.data.ecc = true;
1294 	desc = spinand_create_rdesc(spinand, &info);
1295 	if (IS_ERR(desc))
1296 		return PTR_ERR(desc);
1297 
1298 	spinand->dirmaps[plane].rdesc_ecc = desc;
1299 
1300 	return 0;
1301 }
1302 
spinand_create_dirmaps(struct spinand_device * spinand)1303 static int spinand_create_dirmaps(struct spinand_device *spinand)
1304 {
1305 	struct nand_device *nand = spinand_to_nand(spinand);
1306 	int i, ret;
1307 
1308 	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
1309 					sizeof(*spinand->dirmaps) *
1310 					nand->memorg.planes_per_lun,
1311 					GFP_KERNEL);
1312 	if (!spinand->dirmaps)
1313 		return -ENOMEM;
1314 
1315 	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
1316 		ret = spinand_create_dirmap(spinand, i);
1317 		if (ret)
1318 			return ret;
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 static const struct nand_ops spinand_ops = {
1325 	.erase = spinand_erase,
1326 	.markbad = spinand_markbad,
1327 	.isbad = spinand_isbad,
1328 };
1329 
1330 static const struct spinand_manufacturer *spinand_manufacturers[] = {
1331 	&alliancememory_spinand_manufacturer,
1332 	&ato_spinand_manufacturer,
1333 	&dosilicon_spinand_manufacturer,
1334 	&esmt_8c_spinand_manufacturer,
1335 	&esmt_c8_spinand_manufacturer,
1336 	&fmsh_spinand_manufacturer,
1337 	&foresee_spinand_manufacturer,
1338 	&gigadevice_spinand_manufacturer,
1339 	&macronix_spinand_manufacturer,
1340 	&micron_spinand_manufacturer,
1341 	&paragon_spinand_manufacturer,
1342 	&skyhigh_spinand_manufacturer,
1343 	&toshiba_spinand_manufacturer,
1344 	&winbond_spinand_manufacturer,
1345 	&xtx_spinand_manufacturer,
1346 };
1347 
spinand_manufacturer_match(struct spinand_device * spinand,enum spinand_readid_method rdid_method)1348 static int spinand_manufacturer_match(struct spinand_device *spinand,
1349 				      enum spinand_readid_method rdid_method)
1350 {
1351 	u8 *id = spinand->id.data;
1352 	unsigned int i;
1353 	int ret;
1354 
1355 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
1356 		const struct spinand_manufacturer *manufacturer =
1357 			spinand_manufacturers[i];
1358 
1359 		if (id[0] != manufacturer->id)
1360 			continue;
1361 
1362 		ret = spinand_match_and_init(spinand,
1363 					     manufacturer->chips,
1364 					     manufacturer->nchips,
1365 					     rdid_method);
1366 		if (ret < 0)
1367 			continue;
1368 
1369 		spinand->manufacturer = manufacturer;
1370 		return 0;
1371 	}
1372 	return -EOPNOTSUPP;
1373 }
1374 
spinand_id_detect(struct spinand_device * spinand)1375 static int spinand_id_detect(struct spinand_device *spinand)
1376 {
1377 	u8 *id = spinand->id.data;
1378 	int ret;
1379 
1380 	ret = spinand_read_id_op(spinand, 0, 0, id);
1381 	if (ret)
1382 		return ret;
1383 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
1384 	if (!ret)
1385 		return 0;
1386 
1387 	ret = spinand_read_id_op(spinand, 1, 0, id);
1388 	if (ret)
1389 		return ret;
1390 	ret = spinand_manufacturer_match(spinand,
1391 					 SPINAND_READID_METHOD_OPCODE_ADDR);
1392 	if (!ret)
1393 		return 0;
1394 
1395 	ret = spinand_read_id_op(spinand, 0, 1, id);
1396 	if (ret)
1397 		return ret;
1398 	ret = spinand_manufacturer_match(spinand,
1399 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
1400 
1401 	return ret;
1402 }
1403 
spinand_manufacturer_init(struct spinand_device * spinand)1404 static int spinand_manufacturer_init(struct spinand_device *spinand)
1405 {
1406 	int ret;
1407 
1408 	if (spinand->manufacturer->ops->init) {
1409 		ret = spinand->manufacturer->ops->init(spinand);
1410 		if (ret)
1411 			return ret;
1412 	}
1413 
1414 	return 0;
1415 }
1416 
spinand_manufacturer_cleanup(struct spinand_device * spinand)1417 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
1418 {
1419 	/* Release manufacturer private data */
1420 	if (spinand->manufacturer->ops->cleanup)
1421 		return spinand->manufacturer->ops->cleanup(spinand);
1422 }
1423 
spinand_op_is_odtr(const struct spi_mem_op * op)1424 static bool spinand_op_is_odtr(const struct spi_mem_op *op)
1425 {
1426 	return op->cmd.dtr && op->cmd.buswidth == 8;
1427 }
1428 
spinand_init_ssdr_templates(struct spinand_device * spinand)1429 static void spinand_init_ssdr_templates(struct spinand_device *spinand)
1430 {
1431 	struct spinand_mem_ops *tmpl = &spinand->ssdr_op_templates;
1432 
1433 	tmpl->reset = (struct spi_mem_op)SPINAND_RESET_1S_0_0_OP;
1434 	tmpl->readid = (struct spi_mem_op)SPINAND_READID_1S_1S_1S_OP(0, 0, NULL, 0);
1435 	tmpl->wr_en = (struct spi_mem_op)SPINAND_WR_EN_1S_0_0_OP;
1436 	tmpl->wr_dis = (struct spi_mem_op)SPINAND_WR_DIS_1S_0_0_OP;
1437 	tmpl->set_feature = (struct spi_mem_op)SPINAND_SET_FEATURE_1S_1S_1S_OP(0, NULL);
1438 	tmpl->get_feature = (struct spi_mem_op)SPINAND_GET_FEATURE_1S_1S_1S_OP(0, NULL);
1439 	tmpl->blk_erase = (struct spi_mem_op)SPINAND_BLK_ERASE_1S_1S_0_OP(0);
1440 	tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_1S_1S_0_OP(0);
1441 	tmpl->prog_exec = (struct spi_mem_op)SPINAND_PROG_EXEC_1S_1S_0_OP(0);
1442 	spinand->op_templates = &spinand->ssdr_op_templates;
1443 	spinand->bus_iface = SSDR;
1444 }
1445 
spinand_support_vendor_ops(struct spinand_device * spinand,const struct spinand_info * info,enum spinand_bus_interface iface)1446 static int spinand_support_vendor_ops(struct spinand_device *spinand,
1447 				      const struct spinand_info *info,
1448 				      enum spinand_bus_interface iface)
1449 {
1450 	int i;
1451 
1452 	if (!info->vendor_ops)
1453 		return 0;
1454 	/*
1455 	 * The vendor ops array is only used in order to verify this chip and all its memory
1456 	 * operations are supported. If we see patterns emerging, we could ideally name these
1457 	 * operations and define them at the SPI NAND core level instead.
1458 	 * For now, this only serves as a sanity check.
1459 	 */
1460 	for (i = 0; i < info->vendor_ops->nops; i++) {
1461 		const struct spi_mem_op *op = &info->vendor_ops->ops[i];
1462 
1463 		if ((iface == SSDR && spinand_op_is_odtr(op)) ||
1464 		    (iface == ODTR && !spinand_op_is_odtr(op)))
1465 			continue;
1466 
1467 		if (!spi_mem_supports_op(spinand->spimem, op))
1468 			return -EOPNOTSUPP;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
spinand_init_odtr_instruction_set(struct spinand_device * spinand)1474 static int spinand_init_odtr_instruction_set(struct spinand_device *spinand)
1475 {
1476 	struct spinand_mem_ops *tmpl = &spinand->odtr_op_templates;
1477 
1478 	tmpl->reset = (struct spi_mem_op)SPINAND_RESET_8D_0_0_OP;
1479 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->reset))
1480 		return -EOPNOTSUPP;
1481 
1482 	tmpl->readid = (struct spi_mem_op)SPINAND_READID_8D_8D_8D_OP(0, 0, NULL, 0);
1483 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->readid))
1484 		return -EOPNOTSUPP;
1485 
1486 	tmpl->wr_en = (struct spi_mem_op)SPINAND_WR_EN_8D_0_0_OP;
1487 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->wr_en))
1488 		return -EOPNOTSUPP;
1489 
1490 	tmpl->wr_dis = (struct spi_mem_op)SPINAND_WR_DIS_8D_0_0_OP;
1491 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->wr_dis))
1492 		return -EOPNOTSUPP;
1493 
1494 	tmpl->set_feature = (struct spi_mem_op)SPINAND_SET_FEATURE_8D_8D_8D_OP(0, NULL);
1495 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->set_feature))
1496 		return -EOPNOTSUPP;
1497 
1498 	tmpl->get_feature = (struct spi_mem_op)SPINAND_GET_FEATURE_8D_8D_8D_OP(0, NULL);
1499 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->get_feature))
1500 		return -EOPNOTSUPP;
1501 
1502 	tmpl->blk_erase = (struct spi_mem_op)SPINAND_BLK_ERASE_8D_8D_0_OP(0);
1503 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->blk_erase))
1504 		return -EOPNOTSUPP;
1505 
1506 	if (spinand->flags & SPINAND_ODTR_PACKED_PAGE_READ)
1507 		tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_PACKED_8D_8D_0_OP(0);
1508 	else
1509 		tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_8D_8D_0_OP(0);
1510 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->page_read)) {
1511 		return -EOPNOTSUPP;
1512 	}
1513 
1514 	tmpl->prog_exec = (struct spi_mem_op)SPINAND_PROG_EXEC_8D_8D_0_OP(0);
1515 	if (!spi_mem_supports_op(spinand->spimem, &tmpl->prog_exec))
1516 		return -EOPNOTSUPP;
1517 
1518 	return 0;
1519 }
1520 
1521 static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device * spinand,enum spinand_bus_interface iface,const struct spinand_op_variants * variants)1522 spinand_select_op_variant(struct spinand_device *spinand, enum spinand_bus_interface iface,
1523 			  const struct spinand_op_variants *variants)
1524 {
1525 	struct nand_device *nand = spinand_to_nand(spinand);
1526 	const struct spi_mem_op *best_variant = NULL;
1527 	u64 best_op_duration_ns = ULLONG_MAX;
1528 	unsigned int i;
1529 
1530 	for (i = 0; i < variants->nops; i++) {
1531 		struct spi_mem_op op = variants->ops[i];
1532 		u64 op_duration_ns = 0;
1533 		unsigned int nbytes;
1534 		int ret;
1535 
1536 		if ((iface == SSDR && spinand_op_is_odtr(&op)) ||
1537 		    (iface == ODTR && !spinand_op_is_odtr(&op)))
1538 			continue;
1539 
1540 		nbytes = nanddev_per_page_oobsize(nand) +
1541 			 nanddev_page_size(nand);
1542 
1543 		while (nbytes) {
1544 			op.data.nbytes = nbytes;
1545 			ret = spi_mem_adjust_op_size(spinand->spimem, &op);
1546 			if (ret)
1547 				break;
1548 
1549 			spi_mem_adjust_op_freq(spinand->spimem, &op);
1550 
1551 			if (!spi_mem_supports_op(spinand->spimem, &op))
1552 				break;
1553 
1554 			nbytes -= op.data.nbytes;
1555 
1556 			op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op);
1557 		}
1558 
1559 		if (!nbytes && op_duration_ns < best_op_duration_ns) {
1560 			best_op_duration_ns = op_duration_ns;
1561 			best_variant = &variants->ops[i];
1562 		}
1563 	}
1564 
1565 	return best_variant;
1566 }
1567 
1568 /**
1569  * spinand_match_and_init() - Try to find a match between a device ID and an
1570  *			      entry in a spinand_info table
1571  * @spinand: SPI NAND object
1572  * @table: SPI NAND device description table
1573  * @table_size: size of the device description table
1574  * @rdid_method: read id method to match
1575  *
1576  * Match between a device ID retrieved through the READ_ID command and an
1577  * entry in the SPI NAND description table. If a match is found, the spinand
1578  * object will be initialized with information provided by the matching
1579  * spinand_info entry.
1580  *
1581  * Return: 0 on success, a negative error code otherwise.
1582  */
spinand_match_and_init(struct spinand_device * spinand,const struct spinand_info * table,unsigned int table_size,enum spinand_readid_method rdid_method)1583 int spinand_match_and_init(struct spinand_device *spinand,
1584 			   const struct spinand_info *table,
1585 			   unsigned int table_size,
1586 			   enum spinand_readid_method rdid_method)
1587 {
1588 	u8 *id = spinand->id.data;
1589 	struct nand_device *nand = spinand_to_nand(spinand);
1590 	unsigned int i;
1591 	int ret;
1592 
1593 	for (i = 0; i < table_size; i++) {
1594 		const struct spinand_info *info = &table[i];
1595 		const struct spi_mem_op *op;
1596 
1597 		if (rdid_method != info->devid.method)
1598 			continue;
1599 
1600 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1601 			continue;
1602 
1603 		nand->memorg = table[i].memorg;
1604 		nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1605 		spinand->eccinfo = table[i].eccinfo;
1606 		spinand->flags = table[i].flags;
1607 		spinand->id.len = 1 + table[i].devid.len;
1608 		spinand->select_target = table[i].select_target;
1609 		spinand->configure_chip = table[i].configure_chip;
1610 		spinand->set_cont_read = table[i].set_cont_read;
1611 		spinand->fact_otp = &table[i].fact_otp;
1612 		spinand->user_otp = &table[i].user_otp;
1613 		spinand->read_retries = table[i].read_retries;
1614 		spinand->set_read_retry = table[i].set_read_retry;
1615 
1616 		/* I/O variants selection with single-spi SDR commands */
1617 
1618 		op = spinand_select_op_variant(spinand, SSDR,
1619 					       info->op_variants.read_cache);
1620 		if (!op)
1621 			return -EOPNOTSUPP;
1622 
1623 		spinand->ssdr_op_templates.read_cache = op;
1624 
1625 		op = spinand_select_op_variant(spinand, SSDR,
1626 					       info->op_variants.write_cache);
1627 		if (!op)
1628 			return -EOPNOTSUPP;
1629 
1630 		spinand->ssdr_op_templates.write_cache = op;
1631 
1632 		op = spinand_select_op_variant(spinand, SSDR,
1633 					       info->op_variants.update_cache);
1634 		if (!op)
1635 			return -EOPNOTSUPP;
1636 
1637 		spinand->ssdr_op_templates.update_cache = op;
1638 
1639 		ret = spinand_support_vendor_ops(spinand, info, SSDR);
1640 		if (ret)
1641 			return ret;
1642 
1643 		/* I/O variants selection with octo-spi DDR commands (optional) */
1644 
1645 		ret = spinand_init_odtr_instruction_set(spinand);
1646 		if (ret)
1647 			return 0;
1648 
1649 		ret = spinand_support_vendor_ops(spinand, info, ODTR);
1650 		if (ret)
1651 			return 0;
1652 
1653 		op = spinand_select_op_variant(spinand, ODTR,
1654 					       info->op_variants.read_cache);
1655 		spinand->odtr_op_templates.read_cache = op;
1656 
1657 		op = spinand_select_op_variant(spinand, ODTR,
1658 					       info->op_variants.write_cache);
1659 		spinand->odtr_op_templates.write_cache = op;
1660 
1661 		op = spinand_select_op_variant(spinand, ODTR,
1662 					       info->op_variants.update_cache);
1663 		spinand->odtr_op_templates.update_cache = op;
1664 
1665 		return 0;
1666 	}
1667 
1668 	return -EOPNOTSUPP;
1669 }
1670 
spinand_detect(struct spinand_device * spinand)1671 static int spinand_detect(struct spinand_device *spinand)
1672 {
1673 	struct device *dev = &spinand->spimem->spi->dev;
1674 	struct nand_device *nand = spinand_to_nand(spinand);
1675 	int ret;
1676 
1677 	ret = spinand_reset_op(spinand);
1678 	if (ret)
1679 		return ret;
1680 
1681 	ret = spinand_id_detect(spinand);
1682 	if (ret) {
1683 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1684 			spinand->id.data);
1685 		return ret;
1686 	}
1687 
1688 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1689 		dev_err(dev,
1690 			"SPI NANDs with more than one die must implement ->select_target()\n");
1691 		return -EINVAL;
1692 	}
1693 
1694 	dev_info(&spinand->spimem->spi->dev,
1695 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1696 	dev_info(&spinand->spimem->spi->dev,
1697 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1698 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1699 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1700 
1701 	return 0;
1702 }
1703 
spinand_configure_chip(struct spinand_device * spinand)1704 static int spinand_configure_chip(struct spinand_device *spinand)
1705 {
1706 	bool odtr = false, quad_enable = false;
1707 	int ret;
1708 
1709 	if (spinand->odtr_op_templates.read_cache &&
1710 	    spinand->odtr_op_templates.write_cache &&
1711 	    spinand->odtr_op_templates.update_cache)
1712 		odtr = true;
1713 
1714 	if (odtr) {
1715 		if (!spinand->configure_chip)
1716 			goto try_ssdr;
1717 
1718 		/* ODTR bus interface configuration happens here */
1719 		ret = spinand->configure_chip(spinand, ODTR);
1720 		if (ret) {
1721 			spinand->odtr_op_templates.read_cache = NULL;
1722 			spinand->odtr_op_templates.write_cache = NULL;
1723 			spinand->odtr_op_templates.update_cache = NULL;
1724 			goto try_ssdr;
1725 		}
1726 
1727 		spinand->op_templates = &spinand->odtr_op_templates;
1728 		spinand->bus_iface = ODTR;
1729 
1730 		return 0;
1731 	}
1732 
1733 try_ssdr:
1734 	if (spinand->flags & SPINAND_HAS_QE_BIT) {
1735 		if (spinand->ssdr_op_templates.read_cache->data.buswidth == 4 ||
1736 		    spinand->ssdr_op_templates.write_cache->data.buswidth == 4 ||
1737 		    spinand->ssdr_op_templates.update_cache->data.buswidth == 4)
1738 			quad_enable = true;
1739 	}
1740 
1741 	ret = spinand_init_quad_enable(spinand, quad_enable);
1742 	if (ret)
1743 		return ret;
1744 
1745 	if (spinand->configure_chip) {
1746 		ret = spinand->configure_chip(spinand, SSDR);
1747 		if (ret)
1748 			return ret;
1749 	}
1750 
1751 	return ret;
1752 }
1753 
spinand_init_flash(struct spinand_device * spinand)1754 static int spinand_init_flash(struct spinand_device *spinand)
1755 {
1756 	struct device *dev = &spinand->spimem->spi->dev;
1757 	struct nand_device *nand = spinand_to_nand(spinand);
1758 	int ret, i;
1759 
1760 	ret = spinand_read_cfg(spinand);
1761 	if (ret)
1762 		return ret;
1763 
1764 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1765 	if (ret)
1766 		return ret;
1767 
1768 	ret = spinand_manufacturer_init(spinand);
1769 	if (ret) {
1770 		dev_err(dev,
1771 		"Failed to initialize the SPI NAND chip (err = %d)\n",
1772 		ret);
1773 		return ret;
1774 	}
1775 
1776 	ret = spinand_configure_chip(spinand);
1777 	if (ret)
1778 		goto manuf_cleanup;
1779 
1780 	/* After power up, all blocks are locked, so unlock them here. */
1781 	for (i = 0; i < nand->memorg.ntargets; i++) {
1782 		ret = spinand_select_target(spinand, i);
1783 		if (ret)
1784 			goto manuf_cleanup;
1785 
1786 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1787 		if (ret)
1788 			goto manuf_cleanup;
1789 	}
1790 
1791 	return 0;
1792 
1793 manuf_cleanup:
1794 	spinand_manufacturer_cleanup(spinand);
1795 
1796 	return ret;
1797 }
1798 
spinand_mtd_resume(struct mtd_info * mtd)1799 static void spinand_mtd_resume(struct mtd_info *mtd)
1800 {
1801 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1802 	int ret;
1803 
1804 	ret = spinand_reset_op(spinand);
1805 	if (ret)
1806 		return;
1807 
1808 	ret = spinand_init_flash(spinand);
1809 	if (ret)
1810 		return;
1811 
1812 	spinand_ecc_enable(spinand, false);
1813 }
1814 
spinand_mtd_suspend(struct mtd_info * mtd)1815 static int spinand_mtd_suspend(struct mtd_info *mtd)
1816 {
1817 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1818 	int ret;
1819 
1820 	/*
1821 	 * Return to SSDR interface in the suspend path to make sure the
1822 	 * reset operation is correctly processed upon resume.
1823 	 *
1824 	 * Note: Once back in SSDR mode, every operation but the page helpers
1825 	 * (dirmap based I/O accessors) will work. Page accesses would require
1826 	 * destroying and recreating the dirmaps twice to work, which would be
1827 	 * impacting for no reason, as this is just a transitional state.
1828 	 */
1829 	if (spinand->bus_iface == ODTR) {
1830 		ret = spinand->configure_chip(spinand, SSDR);
1831 		if (ret)
1832 			return ret;
1833 
1834 		spinand->op_templates = &spinand->ssdr_op_templates;
1835 		spinand->bus_iface = SSDR;
1836 	}
1837 
1838 	return 0;
1839 }
1840 
spinand_init(struct spinand_device * spinand)1841 static int spinand_init(struct spinand_device *spinand)
1842 {
1843 	struct device *dev = &spinand->spimem->spi->dev;
1844 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1845 	struct nand_device *nand = mtd_to_nanddev(mtd);
1846 	int ret;
1847 
1848 	/*
1849 	 * We need a scratch buffer because the spi_mem interface requires that
1850 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1851 	 */
1852 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1853 	if (!spinand->scratchbuf)
1854 		return -ENOMEM;
1855 
1856 	spinand_init_ssdr_templates(spinand);
1857 
1858 	ret = spinand_detect(spinand);
1859 	if (ret)
1860 		goto err_free_bufs;
1861 
1862 	/*
1863 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1864 	 * may use this buffer for DMA access.
1865 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1866 	 */
1867 	spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
1868 				   GFP_KERNEL);
1869 	if (!spinand->databuf) {
1870 		ret = -ENOMEM;
1871 		goto err_free_bufs;
1872 	}
1873 
1874 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1875 
1876 	ret = spinand_init_cfg_cache(spinand);
1877 	if (ret)
1878 		goto err_free_bufs;
1879 
1880 	ret = spinand_init_flash(spinand);
1881 	if (ret)
1882 		goto err_free_bufs;
1883 
1884 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1885 	if (ret)
1886 		goto err_manuf_cleanup;
1887 
1888 	/* SPI-NAND default ECC engine is on-die */
1889 	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1890 	nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1891 
1892 	spinand_ecc_enable(spinand, false);
1893 	ret = nanddev_ecc_engine_init(nand);
1894 	if (ret)
1895 		goto err_cleanup_nanddev;
1896 
1897 	/*
1898 	 * Continuous read can only be enabled with an on-die ECC engine, so the
1899 	 * ECC initialization must have happened previously.
1900 	 */
1901 	spinand_cont_read_init(spinand);
1902 
1903 	mtd->_read_oob = spinand_mtd_read;
1904 	mtd->_write_oob = spinand_mtd_write;
1905 	mtd->_block_isbad = spinand_mtd_block_isbad;
1906 	mtd->_block_markbad = spinand_mtd_block_markbad;
1907 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1908 	mtd->_erase = spinand_mtd_erase;
1909 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1910 	mtd->_suspend = spinand_mtd_suspend;
1911 	mtd->_resume = spinand_mtd_resume;
1912 
1913 	if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
1914 		ret = spinand_set_mtd_otp_ops(spinand);
1915 		if (ret)
1916 			goto err_cleanup_ecc_engine;
1917 	}
1918 
1919 	if (nand->ecc.engine) {
1920 		ret = mtd_ooblayout_count_freebytes(mtd);
1921 		if (ret < 0)
1922 			goto err_cleanup_ecc_engine;
1923 	}
1924 
1925 	mtd->oobavail = ret;
1926 
1927 	/* Propagate ECC information to mtd_info */
1928 	mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1929 	mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1930 	mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
1931 
1932 	ret = spinand_create_dirmaps(spinand);
1933 	if (ret) {
1934 		dev_err(dev,
1935 			"Failed to create direct mappings for read/write operations (err = %d)\n",
1936 			ret);
1937 		goto err_cleanup_ecc_engine;
1938 	}
1939 
1940 	return 0;
1941 
1942 err_cleanup_ecc_engine:
1943 	nanddev_ecc_engine_cleanup(nand);
1944 
1945 err_cleanup_nanddev:
1946 	nanddev_cleanup(nand);
1947 
1948 err_manuf_cleanup:
1949 	spinand_manufacturer_cleanup(spinand);
1950 
1951 err_free_bufs:
1952 	kfree(spinand->databuf);
1953 	kfree(spinand->scratchbuf);
1954 	return ret;
1955 }
1956 
spinand_cleanup(struct spinand_device * spinand)1957 static void spinand_cleanup(struct spinand_device *spinand)
1958 {
1959 	struct nand_device *nand = spinand_to_nand(spinand);
1960 
1961 	nanddev_ecc_engine_cleanup(nand);
1962 	nanddev_cleanup(nand);
1963 	spinand_manufacturer_cleanup(spinand);
1964 	kfree(spinand->databuf);
1965 	kfree(spinand->scratchbuf);
1966 }
1967 
spinand_probe(struct spi_mem * mem)1968 static int spinand_probe(struct spi_mem *mem)
1969 {
1970 	struct spinand_device *spinand;
1971 	struct mtd_info *mtd;
1972 	int ret;
1973 
1974 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1975 			       GFP_KERNEL);
1976 	if (!spinand)
1977 		return -ENOMEM;
1978 
1979 	spinand->spimem = mem;
1980 	spi_mem_set_drvdata(mem, spinand);
1981 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1982 	mutex_init(&spinand->lock);
1983 	mtd = spinand_to_mtd(spinand);
1984 	mtd->dev.parent = &mem->spi->dev;
1985 
1986 	ret = spinand_init(spinand);
1987 	if (ret)
1988 		return ret;
1989 
1990 	ret = mtd_device_register(mtd, NULL, 0);
1991 	if (ret)
1992 		goto err_spinand_cleanup;
1993 
1994 	return 0;
1995 
1996 err_spinand_cleanup:
1997 	spinand_cleanup(spinand);
1998 
1999 	return ret;
2000 }
2001 
spinand_remove(struct spi_mem * mem)2002 static int spinand_remove(struct spi_mem *mem)
2003 {
2004 	struct spinand_device *spinand;
2005 	struct mtd_info *mtd;
2006 	int ret;
2007 
2008 	spinand = spi_mem_get_drvdata(mem);
2009 	mtd = spinand_to_mtd(spinand);
2010 
2011 	ret = mtd_device_unregister(mtd);
2012 	if (ret)
2013 		return ret;
2014 
2015 	spinand_cleanup(spinand);
2016 
2017 	return 0;
2018 }
2019 
2020 static const struct spi_device_id spinand_ids[] = {
2021 	{ .name = "spi-nand" },
2022 	{ /* sentinel */ },
2023 };
2024 MODULE_DEVICE_TABLE(spi, spinand_ids);
2025 
2026 #ifdef CONFIG_OF
2027 static const struct of_device_id spinand_of_ids[] = {
2028 	{ .compatible = "spi-nand" },
2029 	{ /* sentinel */ },
2030 };
2031 MODULE_DEVICE_TABLE(of, spinand_of_ids);
2032 #endif
2033 
2034 static struct spi_mem_driver spinand_drv = {
2035 	.spidrv = {
2036 		.id_table = spinand_ids,
2037 		.driver = {
2038 			.name = "spi-nand",
2039 			.of_match_table = of_match_ptr(spinand_of_ids),
2040 		},
2041 	},
2042 	.probe = spinand_probe,
2043 	.remove = spinand_remove,
2044 };
2045 module_spi_mem_driver(spinand_drv);
2046 
2047 MODULE_DESCRIPTION("SPI NAND framework");
2048 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
2049 MODULE_LICENSE("GPL v2");
2050