17529df46SPeter Pan // SPDX-License-Identifier: GPL-2.0
27529df46SPeter Pan /*
37529df46SPeter Pan * Copyright (C) 2016-2017 Micron Technology, Inc.
47529df46SPeter Pan *
57529df46SPeter Pan * Authors:
67529df46SPeter Pan * Peter Pan <peterpandong@micron.com>
77529df46SPeter Pan * Boris Brezillon <boris.brezillon@bootlin.com>
87529df46SPeter Pan */
97529df46SPeter Pan
107529df46SPeter Pan #define pr_fmt(fmt) "spi-nand: " fmt
117529df46SPeter Pan
127529df46SPeter Pan #include <linux/device.h>
137529df46SPeter Pan #include <linux/jiffies.h>
147529df46SPeter Pan #include <linux/kernel.h>
157529df46SPeter Pan #include <linux/module.h>
167529df46SPeter Pan #include <linux/mtd/spinand.h>
177529df46SPeter Pan #include <linux/of.h>
187529df46SPeter Pan #include <linux/slab.h>
19f1541773SChuanhong Guo #include <linux/string.h>
207529df46SPeter Pan #include <linux/spi/spi.h>
217529df46SPeter Pan #include <linux/spi/spi-mem.h>
227529df46SPeter Pan
spinand_read_reg_op(struct spinand_device * spinand,u8 reg,u8 * val)237529df46SPeter Pan static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
247529df46SPeter Pan {
257529df46SPeter Pan struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
267529df46SPeter Pan spinand->scratchbuf);
277529df46SPeter Pan int ret;
287529df46SPeter Pan
297529df46SPeter Pan ret = spi_mem_exec_op(spinand->spimem, &op);
307529df46SPeter Pan if (ret)
317529df46SPeter Pan return ret;
327529df46SPeter Pan
337529df46SPeter Pan *val = *spinand->scratchbuf;
347529df46SPeter Pan return 0;
357529df46SPeter Pan }
367529df46SPeter Pan
spinand_write_reg_op(struct spinand_device * spinand,u8 reg,u8 val)37a06f2e7cSMiquel Raynal int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
387529df46SPeter Pan {
397529df46SPeter Pan struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
407529df46SPeter Pan spinand->scratchbuf);
417529df46SPeter Pan
427529df46SPeter Pan *spinand->scratchbuf = val;
437529df46SPeter Pan return spi_mem_exec_op(spinand->spimem, &op);
447529df46SPeter Pan }
457529df46SPeter Pan
spinand_read_status(struct spinand_device * spinand,u8 * status)467529df46SPeter Pan static int spinand_read_status(struct spinand_device *spinand, u8 *status)
477529df46SPeter Pan {
487529df46SPeter Pan return spinand_read_reg_op(spinand, REG_STATUS, status);
497529df46SPeter Pan }
507529df46SPeter Pan
spinand_get_cfg(struct spinand_device * spinand,u8 * cfg)517529df46SPeter Pan static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
527529df46SPeter Pan {
537529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
547529df46SPeter Pan
557529df46SPeter Pan if (WARN_ON(spinand->cur_target < 0 ||
567529df46SPeter Pan spinand->cur_target >= nand->memorg.ntargets))
577529df46SPeter Pan return -EINVAL;
587529df46SPeter Pan
597529df46SPeter Pan *cfg = spinand->cfg_cache[spinand->cur_target];
607529df46SPeter Pan return 0;
617529df46SPeter Pan }
627529df46SPeter Pan
spinand_set_cfg(struct spinand_device * spinand,u8 cfg)637529df46SPeter Pan static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
647529df46SPeter Pan {
657529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
667529df46SPeter Pan int ret;
677529df46SPeter Pan
687529df46SPeter Pan if (WARN_ON(spinand->cur_target < 0 ||
697529df46SPeter Pan spinand->cur_target >= nand->memorg.ntargets))
707529df46SPeter Pan return -EINVAL;
717529df46SPeter Pan
727529df46SPeter Pan if (spinand->cfg_cache[spinand->cur_target] == cfg)
737529df46SPeter Pan return 0;
747529df46SPeter Pan
757529df46SPeter Pan ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
767529df46SPeter Pan if (ret)
777529df46SPeter Pan return ret;
787529df46SPeter Pan
797529df46SPeter Pan spinand->cfg_cache[spinand->cur_target] = cfg;
807529df46SPeter Pan return 0;
817529df46SPeter Pan }
827529df46SPeter Pan
837529df46SPeter Pan /**
847529df46SPeter Pan * spinand_upd_cfg() - Update the configuration register
857529df46SPeter Pan * @spinand: the spinand device
867529df46SPeter Pan * @mask: the mask encoding the bits to update in the config reg
877529df46SPeter Pan * @val: the new value to apply
887529df46SPeter Pan *
897529df46SPeter Pan * Update the configuration register.
907529df46SPeter Pan *
917529df46SPeter Pan * Return: 0 on success, a negative error code otherwise.
927529df46SPeter Pan */
spinand_upd_cfg(struct spinand_device * spinand,u8 mask,u8 val)937529df46SPeter Pan int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
947529df46SPeter Pan {
957529df46SPeter Pan int ret;
967529df46SPeter Pan u8 cfg;
977529df46SPeter Pan
987529df46SPeter Pan ret = spinand_get_cfg(spinand, &cfg);
997529df46SPeter Pan if (ret)
1007529df46SPeter Pan return ret;
1017529df46SPeter Pan
1027529df46SPeter Pan cfg &= ~mask;
1037529df46SPeter Pan cfg |= val;
1047529df46SPeter Pan
1057529df46SPeter Pan return spinand_set_cfg(spinand, cfg);
1067529df46SPeter Pan }
1077529df46SPeter Pan
1087529df46SPeter Pan /**
1097529df46SPeter Pan * spinand_select_target() - Select a specific NAND target/die
1107529df46SPeter Pan * @spinand: the spinand device
1117529df46SPeter Pan * @target: the target/die to select
1127529df46SPeter Pan *
1137529df46SPeter Pan * Select a new target/die. If chip only has one die, this function is a NOOP.
1147529df46SPeter Pan *
1157529df46SPeter Pan * Return: 0 on success, a negative error code otherwise.
1167529df46SPeter Pan */
spinand_select_target(struct spinand_device * spinand,unsigned int target)1177529df46SPeter Pan int spinand_select_target(struct spinand_device *spinand, unsigned int target)
1187529df46SPeter Pan {
1197529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
1207529df46SPeter Pan int ret;
1217529df46SPeter Pan
1227529df46SPeter Pan if (WARN_ON(target >= nand->memorg.ntargets))
1237529df46SPeter Pan return -EINVAL;
1247529df46SPeter Pan
1257529df46SPeter Pan if (spinand->cur_target == target)
1267529df46SPeter Pan return 0;
1277529df46SPeter Pan
1287529df46SPeter Pan if (nand->memorg.ntargets == 1) {
1297529df46SPeter Pan spinand->cur_target = target;
1307529df46SPeter Pan return 0;
1317529df46SPeter Pan }
1327529df46SPeter Pan
1337529df46SPeter Pan ret = spinand->select_target(spinand, target);
1347529df46SPeter Pan if (ret)
1357529df46SPeter Pan return ret;
1367529df46SPeter Pan
1377529df46SPeter Pan spinand->cur_target = target;
1387529df46SPeter Pan return 0;
1397529df46SPeter Pan }
1407529df46SPeter Pan
spinand_read_cfg(struct spinand_device * spinand)141ff0cd841SPatrice Chotard static int spinand_read_cfg(struct spinand_device *spinand)
1427529df46SPeter Pan {
1437529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
1447529df46SPeter Pan unsigned int target;
1457529df46SPeter Pan int ret;
1467529df46SPeter Pan
1477529df46SPeter Pan for (target = 0; target < nand->memorg.ntargets; target++) {
1487529df46SPeter Pan ret = spinand_select_target(spinand, target);
1497529df46SPeter Pan if (ret)
1507529df46SPeter Pan return ret;
1517529df46SPeter Pan
1527529df46SPeter Pan /*
1537529df46SPeter Pan * We use spinand_read_reg_op() instead of spinand_get_cfg()
1547529df46SPeter Pan * here to bypass the config cache.
1557529df46SPeter Pan */
1567529df46SPeter Pan ret = spinand_read_reg_op(spinand, REG_CFG,
1577529df46SPeter Pan &spinand->cfg_cache[target]);
1587529df46SPeter Pan if (ret)
1597529df46SPeter Pan return ret;
1607529df46SPeter Pan }
1617529df46SPeter Pan
1627529df46SPeter Pan return 0;
1637529df46SPeter Pan }
1647529df46SPeter Pan
spinand_init_cfg_cache(struct spinand_device * spinand)165ff0cd841SPatrice Chotard static int spinand_init_cfg_cache(struct spinand_device *spinand)
166ff0cd841SPatrice Chotard {
167ff0cd841SPatrice Chotard struct nand_device *nand = spinand_to_nand(spinand);
168ff0cd841SPatrice Chotard struct device *dev = &spinand->spimem->spi->dev;
169ff0cd841SPatrice Chotard
170ff0cd841SPatrice Chotard spinand->cfg_cache = devm_kcalloc(dev,
171ff0cd841SPatrice Chotard nand->memorg.ntargets,
172ff0cd841SPatrice Chotard sizeof(*spinand->cfg_cache),
173ff0cd841SPatrice Chotard GFP_KERNEL);
174ff0cd841SPatrice Chotard if (!spinand->cfg_cache)
175ff0cd841SPatrice Chotard return -ENOMEM;
176ff0cd841SPatrice Chotard
177ff0cd841SPatrice Chotard return 0;
178ff0cd841SPatrice Chotard }
179ff0cd841SPatrice Chotard
spinand_init_quad_enable(struct spinand_device * spinand)1807529df46SPeter Pan static int spinand_init_quad_enable(struct spinand_device *spinand)
1817529df46SPeter Pan {
1827529df46SPeter Pan bool enable = false;
1837529df46SPeter Pan
1847529df46SPeter Pan if (!(spinand->flags & SPINAND_HAS_QE_BIT))
1857529df46SPeter Pan return 0;
1867529df46SPeter Pan
1877529df46SPeter Pan if (spinand->op_templates.read_cache->data.buswidth == 4 ||
1887529df46SPeter Pan spinand->op_templates.write_cache->data.buswidth == 4 ||
1897529df46SPeter Pan spinand->op_templates.update_cache->data.buswidth == 4)
1907529df46SPeter Pan enable = true;
1917529df46SPeter Pan
1927529df46SPeter Pan return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
1937529df46SPeter Pan enable ? CFG_QUAD_ENABLE : 0);
1947529df46SPeter Pan }
1957529df46SPeter Pan
spinand_ecc_enable(struct spinand_device * spinand,bool enable)1967529df46SPeter Pan static int spinand_ecc_enable(struct spinand_device *spinand,
1977529df46SPeter Pan bool enable)
1987529df46SPeter Pan {
1997529df46SPeter Pan return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
2007529df46SPeter Pan enable ? CFG_ECC_ENABLE : 0);
2017529df46SPeter Pan }
2027529df46SPeter Pan
spinand_cont_read_enable(struct spinand_device * spinand,bool enable)203631cfdd0SMiquel Raynal static int spinand_cont_read_enable(struct spinand_device *spinand,
204631cfdd0SMiquel Raynal bool enable)
205631cfdd0SMiquel Raynal {
206631cfdd0SMiquel Raynal return spinand->set_cont_read(spinand, enable);
207631cfdd0SMiquel Raynal }
208631cfdd0SMiquel Raynal
spinand_check_ecc_status(struct spinand_device * spinand,u8 status)20955a1a71aSMiquel Raynal static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
21055a1a71aSMiquel Raynal {
21155a1a71aSMiquel Raynal struct nand_device *nand = spinand_to_nand(spinand);
21255a1a71aSMiquel Raynal
21355a1a71aSMiquel Raynal if (spinand->eccinfo.get_status)
21455a1a71aSMiquel Raynal return spinand->eccinfo.get_status(spinand, status);
21555a1a71aSMiquel Raynal
21655a1a71aSMiquel Raynal switch (status & STATUS_ECC_MASK) {
21755a1a71aSMiquel Raynal case STATUS_ECC_NO_BITFLIPS:
21855a1a71aSMiquel Raynal return 0;
21955a1a71aSMiquel Raynal
22055a1a71aSMiquel Raynal case STATUS_ECC_HAS_BITFLIPS:
22155a1a71aSMiquel Raynal /*
22255a1a71aSMiquel Raynal * We have no way to know exactly how many bitflips have been
22355a1a71aSMiquel Raynal * fixed, so let's return the maximum possible value so that
22455a1a71aSMiquel Raynal * wear-leveling layers move the data immediately.
22555a1a71aSMiquel Raynal */
22655a1a71aSMiquel Raynal return nanddev_get_ecc_conf(nand)->strength;
22755a1a71aSMiquel Raynal
22855a1a71aSMiquel Raynal case STATUS_ECC_UNCOR_ERROR:
22955a1a71aSMiquel Raynal return -EBADMSG;
23055a1a71aSMiquel Raynal
23155a1a71aSMiquel Raynal default:
23255a1a71aSMiquel Raynal break;
23355a1a71aSMiquel Raynal }
23455a1a71aSMiquel Raynal
23555a1a71aSMiquel Raynal return -EINVAL;
23655a1a71aSMiquel Raynal }
23755a1a71aSMiquel Raynal
spinand_noecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)23855a1a71aSMiquel Raynal static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
23955a1a71aSMiquel Raynal struct mtd_oob_region *region)
24055a1a71aSMiquel Raynal {
24155a1a71aSMiquel Raynal return -ERANGE;
24255a1a71aSMiquel Raynal }
24355a1a71aSMiquel Raynal
spinand_noecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)24455a1a71aSMiquel Raynal static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
24555a1a71aSMiquel Raynal struct mtd_oob_region *region)
24655a1a71aSMiquel Raynal {
24755a1a71aSMiquel Raynal if (section)
24855a1a71aSMiquel Raynal return -ERANGE;
24955a1a71aSMiquel Raynal
25055a1a71aSMiquel Raynal /* Reserve 2 bytes for the BBM. */
25155a1a71aSMiquel Raynal region->offset = 2;
25255a1a71aSMiquel Raynal region->length = 62;
25355a1a71aSMiquel Raynal
25455a1a71aSMiquel Raynal return 0;
25555a1a71aSMiquel Raynal }
25655a1a71aSMiquel Raynal
25755a1a71aSMiquel Raynal static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
25855a1a71aSMiquel Raynal .ecc = spinand_noecc_ooblayout_ecc,
25955a1a71aSMiquel Raynal .free = spinand_noecc_ooblayout_free,
26055a1a71aSMiquel Raynal };
26155a1a71aSMiquel Raynal
spinand_ondie_ecc_init_ctx(struct nand_device * nand)262945845b5SMiquel Raynal static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
263945845b5SMiquel Raynal {
264945845b5SMiquel Raynal struct spinand_device *spinand = nand_to_spinand(nand);
265945845b5SMiquel Raynal struct mtd_info *mtd = nanddev_to_mtd(nand);
266945845b5SMiquel Raynal struct spinand_ondie_ecc_conf *engine_conf;
267945845b5SMiquel Raynal
268945845b5SMiquel Raynal nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
269945845b5SMiquel Raynal nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
270945845b5SMiquel Raynal nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
271945845b5SMiquel Raynal
272945845b5SMiquel Raynal engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
273945845b5SMiquel Raynal if (!engine_conf)
274945845b5SMiquel Raynal return -ENOMEM;
275945845b5SMiquel Raynal
276945845b5SMiquel Raynal nand->ecc.ctx.priv = engine_conf;
277945845b5SMiquel Raynal
278945845b5SMiquel Raynal if (spinand->eccinfo.ooblayout)
279945845b5SMiquel Raynal mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
280945845b5SMiquel Raynal else
281945845b5SMiquel Raynal mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
282945845b5SMiquel Raynal
283945845b5SMiquel Raynal return 0;
284945845b5SMiquel Raynal }
285945845b5SMiquel Raynal
spinand_ondie_ecc_cleanup_ctx(struct nand_device * nand)286945845b5SMiquel Raynal static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
287945845b5SMiquel Raynal {
288945845b5SMiquel Raynal kfree(nand->ecc.ctx.priv);
289945845b5SMiquel Raynal }
290945845b5SMiquel Raynal
spinand_ondie_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)291945845b5SMiquel Raynal static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
292945845b5SMiquel Raynal struct nand_page_io_req *req)
293945845b5SMiquel Raynal {
294945845b5SMiquel Raynal struct spinand_device *spinand = nand_to_spinand(nand);
295945845b5SMiquel Raynal bool enable = (req->mode != MTD_OPS_RAW);
296945845b5SMiquel Raynal
297df12a75aSDaniel Palmer memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
298df12a75aSDaniel Palmer
299945845b5SMiquel Raynal /* Only enable or disable the engine */
300945845b5SMiquel Raynal return spinand_ecc_enable(spinand, enable);
301945845b5SMiquel Raynal }
302945845b5SMiquel Raynal
spinand_ondie_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)303945845b5SMiquel Raynal static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
304945845b5SMiquel Raynal struct nand_page_io_req *req)
305945845b5SMiquel Raynal {
306945845b5SMiquel Raynal struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
307945845b5SMiquel Raynal struct spinand_device *spinand = nand_to_spinand(nand);
308c93081b2SMiquel Raynal struct mtd_info *mtd = spinand_to_mtd(spinand);
309c93081b2SMiquel Raynal int ret;
310945845b5SMiquel Raynal
311945845b5SMiquel Raynal if (req->mode == MTD_OPS_RAW)
312945845b5SMiquel Raynal return 0;
313945845b5SMiquel Raynal
314945845b5SMiquel Raynal /* Nothing to do when finishing a page write */
315945845b5SMiquel Raynal if (req->type == NAND_PAGE_WRITE)
316945845b5SMiquel Raynal return 0;
317945845b5SMiquel Raynal
3188363dfc8SMiquel Raynal /* Finish a page read: check the status, report errors/bitflips */
319c93081b2SMiquel Raynal ret = spinand_check_ecc_status(spinand, engine_conf->status);
320631cfdd0SMiquel Raynal if (ret == -EBADMSG) {
321c93081b2SMiquel Raynal mtd->ecc_stats.failed++;
322631cfdd0SMiquel Raynal } else if (ret > 0) {
323631cfdd0SMiquel Raynal unsigned int pages;
324631cfdd0SMiquel Raynal
325631cfdd0SMiquel Raynal /*
326631cfdd0SMiquel Raynal * Continuous reads don't allow us to get the detail,
327631cfdd0SMiquel Raynal * so we may exagerate the actual number of corrected bitflips.
328631cfdd0SMiquel Raynal */
329631cfdd0SMiquel Raynal if (!req->continuous)
330631cfdd0SMiquel Raynal pages = 1;
331631cfdd0SMiquel Raynal else
332631cfdd0SMiquel Raynal pages = req->datalen / nanddev_page_size(nand);
333631cfdd0SMiquel Raynal
334631cfdd0SMiquel Raynal mtd->ecc_stats.corrected += ret * pages;
335631cfdd0SMiquel Raynal }
336c93081b2SMiquel Raynal
337c93081b2SMiquel Raynal return ret;
338945845b5SMiquel Raynal }
339945845b5SMiquel Raynal
340945845b5SMiquel Raynal static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
341945845b5SMiquel Raynal .init_ctx = spinand_ondie_ecc_init_ctx,
342945845b5SMiquel Raynal .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
343945845b5SMiquel Raynal .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
344945845b5SMiquel Raynal .finish_io_req = spinand_ondie_ecc_finish_io_req,
345945845b5SMiquel Raynal };
346945845b5SMiquel Raynal
347c8efe010SMiquel Raynal static struct nand_ecc_engine spinand_ondie_ecc_engine = {
348945845b5SMiquel Raynal .ops = &spinand_ondie_ecc_engine_ops,
349945845b5SMiquel Raynal };
350945845b5SMiquel Raynal
spinand_ondie_ecc_save_status(struct nand_device * nand,u8 status)3513d1f08b0SMiquel Raynal static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
3523d1f08b0SMiquel Raynal {
3533d1f08b0SMiquel Raynal struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
3543d1f08b0SMiquel Raynal
3553d1f08b0SMiquel Raynal if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
3563d1f08b0SMiquel Raynal engine_conf)
3573d1f08b0SMiquel Raynal engine_conf->status = status;
3583d1f08b0SMiquel Raynal }
3593d1f08b0SMiquel Raynal
spinand_write_enable_op(struct spinand_device * spinand)3607529df46SPeter Pan static int spinand_write_enable_op(struct spinand_device *spinand)
3617529df46SPeter Pan {
3627529df46SPeter Pan struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
3637529df46SPeter Pan
3647529df46SPeter Pan return spi_mem_exec_op(spinand->spimem, &op);
3657529df46SPeter Pan }
3667529df46SPeter Pan
spinand_load_page_op(struct spinand_device * spinand,const struct nand_page_io_req * req)3677529df46SPeter Pan static int spinand_load_page_op(struct spinand_device *spinand,
3687529df46SPeter Pan const struct nand_page_io_req *req)
3697529df46SPeter Pan {
3707529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
3717529df46SPeter Pan unsigned int row = nanddev_pos_to_row(nand, &req->pos);
3727529df46SPeter Pan struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
3737529df46SPeter Pan
3747529df46SPeter Pan return spi_mem_exec_op(spinand->spimem, &op);
3757529df46SPeter Pan }
3767529df46SPeter Pan
spinand_read_from_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)3777529df46SPeter Pan static int spinand_read_from_cache_op(struct spinand_device *spinand,
3787529df46SPeter Pan const struct nand_page_io_req *req)
3797529df46SPeter Pan {
3807529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
381e708789cSMiquel Raynal struct mtd_info *mtd = spinand_to_mtd(spinand);
382981d1aa0SBoris Brezillon struct spi_mem_dirmap_desc *rdesc;
3837529df46SPeter Pan unsigned int nbytes = 0;
3847529df46SPeter Pan void *buf = NULL;
3857529df46SPeter Pan u16 column = 0;
386981d1aa0SBoris Brezillon ssize_t ret;
3877529df46SPeter Pan
3887529df46SPeter Pan if (req->datalen) {
3897529df46SPeter Pan buf = spinand->databuf;
390631cfdd0SMiquel Raynal if (!req->continuous)
391981d1aa0SBoris Brezillon nbytes = nanddev_page_size(nand);
392631cfdd0SMiquel Raynal else
393631cfdd0SMiquel Raynal nbytes = round_up(req->dataoffs + req->datalen,
394631cfdd0SMiquel Raynal nanddev_page_size(nand));
395981d1aa0SBoris Brezillon column = 0;
3967529df46SPeter Pan }
3977529df46SPeter Pan
3987529df46SPeter Pan if (req->ooblen) {
3997529df46SPeter Pan nbytes += nanddev_per_page_oobsize(nand);
4007529df46SPeter Pan if (!buf) {
4017529df46SPeter Pan buf = spinand->oobbuf;
4027529df46SPeter Pan column = nanddev_page_size(nand);
4037529df46SPeter Pan }
4047529df46SPeter Pan }
4057529df46SPeter Pan
406f9d7c726SMiquel Raynal if (req->mode == MTD_OPS_RAW)
407981d1aa0SBoris Brezillon rdesc = spinand->dirmaps[req->pos.plane].rdesc;
408f9d7c726SMiquel Raynal else
409f9d7c726SMiquel Raynal rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
4107529df46SPeter Pan
411*ca229bdbSCheng Ming Lin if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
412*ca229bdbSCheng Ming Lin column |= req->pos.plane << fls(nanddev_page_size(nand));
413*ca229bdbSCheng Ming Lin
4147529df46SPeter Pan while (nbytes) {
415981d1aa0SBoris Brezillon ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
416981d1aa0SBoris Brezillon if (ret < 0)
4177529df46SPeter Pan return ret;
4187529df46SPeter Pan
419981d1aa0SBoris Brezillon if (!ret || ret > nbytes)
420981d1aa0SBoris Brezillon return -EIO;
4217529df46SPeter Pan
422981d1aa0SBoris Brezillon nbytes -= ret;
423981d1aa0SBoris Brezillon column += ret;
424981d1aa0SBoris Brezillon buf += ret;
425631cfdd0SMiquel Raynal
426631cfdd0SMiquel Raynal /*
427631cfdd0SMiquel Raynal * Dirmap accesses are allowed to toggle the CS.
428631cfdd0SMiquel Raynal * Toggling the CS during a continuous read is forbidden.
429631cfdd0SMiquel Raynal */
430631cfdd0SMiquel Raynal if (nbytes && req->continuous)
431631cfdd0SMiquel Raynal return -EIO;
4327529df46SPeter Pan }
4337529df46SPeter Pan
4347529df46SPeter Pan if (req->datalen)
4357529df46SPeter Pan memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
4367529df46SPeter Pan req->datalen);
4377529df46SPeter Pan
438e708789cSMiquel Raynal if (req->ooblen) {
439e708789cSMiquel Raynal if (req->mode == MTD_OPS_AUTO_OOB)
440e708789cSMiquel Raynal mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
441e708789cSMiquel Raynal spinand->oobbuf,
442e708789cSMiquel Raynal req->ooboffs,
443e708789cSMiquel Raynal req->ooblen);
444e708789cSMiquel Raynal else
445868cbe2aSMiquel Raynal memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
446868cbe2aSMiquel Raynal req->ooblen);
447e708789cSMiquel Raynal }
448868cbe2aSMiquel Raynal
4497529df46SPeter Pan return 0;
4507529df46SPeter Pan }
4517529df46SPeter Pan
spinand_write_to_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)4527529df46SPeter Pan static int spinand_write_to_cache_op(struct spinand_device *spinand,
4537529df46SPeter Pan const struct nand_page_io_req *req)
4547529df46SPeter Pan {
4557529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
4563d1f08b0SMiquel Raynal struct mtd_info *mtd = spinand_to_mtd(spinand);
457981d1aa0SBoris Brezillon struct spi_mem_dirmap_desc *wdesc;
458981d1aa0SBoris Brezillon unsigned int nbytes, column = 0;
45913c15e07SBoris Brezillon void *buf = spinand->databuf;
460981d1aa0SBoris Brezillon ssize_t ret;
4617529df46SPeter Pan
46213c15e07SBoris Brezillon /*
46313c15e07SBoris Brezillon * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
46413c15e07SBoris Brezillon * the cache content to 0xFF (depends on vendor implementation), so we
46513c15e07SBoris Brezillon * must fill the page cache entirely even if we only want to program
46613c15e07SBoris Brezillon * the data portion of the page, otherwise we might corrupt the BBM or
46713c15e07SBoris Brezillon * user data previously programmed in OOB area.
4683d1f08b0SMiquel Raynal *
4693d1f08b0SMiquel Raynal * Only reset the data buffer manually, the OOB buffer is prepared by
4703d1f08b0SMiquel Raynal * ECC engines ->prepare_io_req() callback.
47113c15e07SBoris Brezillon */
47213c15e07SBoris Brezillon nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
4733d1f08b0SMiquel Raynal memset(spinand->databuf, 0xff, nanddev_page_size(nand));
47413c15e07SBoris Brezillon
47513c15e07SBoris Brezillon if (req->datalen)
47613c15e07SBoris Brezillon memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
47713c15e07SBoris Brezillon req->datalen);
4787529df46SPeter Pan
4797529df46SPeter Pan if (req->ooblen) {
4807529df46SPeter Pan if (req->mode == MTD_OPS_AUTO_OOB)
4817529df46SPeter Pan mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
4827529df46SPeter Pan spinand->oobbuf,
4837529df46SPeter Pan req->ooboffs,
4847529df46SPeter Pan req->ooblen);
4857529df46SPeter Pan else
4867529df46SPeter Pan memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
4877529df46SPeter Pan req->ooblen);
4887529df46SPeter Pan }
4897529df46SPeter Pan
490f9d7c726SMiquel Raynal if (req->mode == MTD_OPS_RAW)
491981d1aa0SBoris Brezillon wdesc = spinand->dirmaps[req->pos.plane].wdesc;
492f9d7c726SMiquel Raynal else
493f9d7c726SMiquel Raynal wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
4947529df46SPeter Pan
495*ca229bdbSCheng Ming Lin if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
496*ca229bdbSCheng Ming Lin column |= req->pos.plane << fls(nanddev_page_size(nand));
497*ca229bdbSCheng Ming Lin
4987529df46SPeter Pan while (nbytes) {
499981d1aa0SBoris Brezillon ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
500981d1aa0SBoris Brezillon if (ret < 0)
5017529df46SPeter Pan return ret;
5027529df46SPeter Pan
503981d1aa0SBoris Brezillon if (!ret || ret > nbytes)
504981d1aa0SBoris Brezillon return -EIO;
5057529df46SPeter Pan
506981d1aa0SBoris Brezillon nbytes -= ret;
507981d1aa0SBoris Brezillon column += ret;
508981d1aa0SBoris Brezillon buf += ret;
5097529df46SPeter Pan }
5107529df46SPeter Pan
5117529df46SPeter Pan return 0;
5127529df46SPeter Pan }
5137529df46SPeter Pan
spinand_program_op(struct spinand_device * spinand,const struct nand_page_io_req * req)5147529df46SPeter Pan static int spinand_program_op(struct spinand_device *spinand,
5157529df46SPeter Pan const struct nand_page_io_req *req)
5167529df46SPeter Pan {
5177529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
5187529df46SPeter Pan unsigned int row = nanddev_pos_to_row(nand, &req->pos);
5197529df46SPeter Pan struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
5207529df46SPeter Pan
5217529df46SPeter Pan return spi_mem_exec_op(spinand->spimem, &op);
5227529df46SPeter Pan }
5237529df46SPeter Pan
spinand_erase_op(struct spinand_device * spinand,const struct nand_pos * pos)5247529df46SPeter Pan static int spinand_erase_op(struct spinand_device *spinand,
5257529df46SPeter Pan const struct nand_pos *pos)
5267529df46SPeter Pan {
5277529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
5287529df46SPeter Pan unsigned int row = nanddev_pos_to_row(nand, pos);
5297529df46SPeter Pan struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
5307529df46SPeter Pan
5317529df46SPeter Pan return spi_mem_exec_op(spinand->spimem, &op);
5327529df46SPeter Pan }
5337529df46SPeter Pan
spinand_wait(struct spinand_device * spinand,unsigned long initial_delay_us,unsigned long poll_delay_us,u8 * s)5348941cd8dSPatrice Chotard static int spinand_wait(struct spinand_device *spinand,
5358941cd8dSPatrice Chotard unsigned long initial_delay_us,
5368941cd8dSPatrice Chotard unsigned long poll_delay_us,
5378941cd8dSPatrice Chotard u8 *s)
5387529df46SPeter Pan {
5398941cd8dSPatrice Chotard struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
5408941cd8dSPatrice Chotard spinand->scratchbuf);
5417529df46SPeter Pan u8 status;
5427529df46SPeter Pan int ret;
5437529df46SPeter Pan
5448941cd8dSPatrice Chotard ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
5458941cd8dSPatrice Chotard initial_delay_us,
5468941cd8dSPatrice Chotard poll_delay_us,
5478941cd8dSPatrice Chotard SPINAND_WAITRDY_TIMEOUT_MS);
5487529df46SPeter Pan if (ret)
5497529df46SPeter Pan return ret;
5507529df46SPeter Pan
5518941cd8dSPatrice Chotard status = *spinand->scratchbuf;
5527529df46SPeter Pan if (!(status & STATUS_BUSY))
5537529df46SPeter Pan goto out;
5547529df46SPeter Pan
5557529df46SPeter Pan /*
5567529df46SPeter Pan * Extra read, just in case the STATUS_READY bit has changed
5577529df46SPeter Pan * since our last check
5587529df46SPeter Pan */
5597529df46SPeter Pan ret = spinand_read_status(spinand, &status);
5607529df46SPeter Pan if (ret)
5617529df46SPeter Pan return ret;
5627529df46SPeter Pan
5637529df46SPeter Pan out:
5647529df46SPeter Pan if (s)
5657529df46SPeter Pan *s = status;
5667529df46SPeter Pan
5677529df46SPeter Pan return status & STATUS_BUSY ? -ETIMEDOUT : 0;
5687529df46SPeter Pan }
5697529df46SPeter Pan
spinand_read_id_op(struct spinand_device * spinand,u8 naddr,u8 ndummy,u8 * buf)570f1541773SChuanhong Guo static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
571f1541773SChuanhong Guo u8 ndummy, u8 *buf)
5727529df46SPeter Pan {
573f1541773SChuanhong Guo struct spi_mem_op op = SPINAND_READID_OP(
574f1541773SChuanhong Guo naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
5757529df46SPeter Pan int ret;
5767529df46SPeter Pan
5777529df46SPeter Pan ret = spi_mem_exec_op(spinand->spimem, &op);
5787529df46SPeter Pan if (!ret)
5797529df46SPeter Pan memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
5807529df46SPeter Pan
5817529df46SPeter Pan return ret;
5827529df46SPeter Pan }
5837529df46SPeter Pan
spinand_reset_op(struct spinand_device * spinand)5847529df46SPeter Pan static int spinand_reset_op(struct spinand_device *spinand)
5857529df46SPeter Pan {
5867529df46SPeter Pan struct spi_mem_op op = SPINAND_RESET_OP;
5877529df46SPeter Pan int ret;
5887529df46SPeter Pan
5897529df46SPeter Pan ret = spi_mem_exec_op(spinand->spimem, &op);
5907529df46SPeter Pan if (ret)
5917529df46SPeter Pan return ret;
5927529df46SPeter Pan
5938941cd8dSPatrice Chotard return spinand_wait(spinand,
5948941cd8dSPatrice Chotard SPINAND_RESET_INITIAL_DELAY_US,
5958941cd8dSPatrice Chotard SPINAND_RESET_POLL_DELAY_US,
5968941cd8dSPatrice Chotard NULL);
5977529df46SPeter Pan }
5987529df46SPeter Pan
spinand_lock_block(struct spinand_device * spinand,u8 lock)5997529df46SPeter Pan static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
6007529df46SPeter Pan {
6017529df46SPeter Pan return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
6027529df46SPeter Pan }
6037529df46SPeter Pan
spinand_read_page(struct spinand_device * spinand,const struct nand_page_io_req * req)6047529df46SPeter Pan static int spinand_read_page(struct spinand_device *spinand,
6053d1f08b0SMiquel Raynal const struct nand_page_io_req *req)
6067529df46SPeter Pan {
6073d1f08b0SMiquel Raynal struct nand_device *nand = spinand_to_nand(spinand);
6087529df46SPeter Pan u8 status;
6097529df46SPeter Pan int ret;
6107529df46SPeter Pan
6113d1f08b0SMiquel Raynal ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
6123d1f08b0SMiquel Raynal if (ret)
6133d1f08b0SMiquel Raynal return ret;
6143d1f08b0SMiquel Raynal
6157529df46SPeter Pan ret = spinand_load_page_op(spinand, req);
6167529df46SPeter Pan if (ret)
6177529df46SPeter Pan return ret;
6187529df46SPeter Pan
6198941cd8dSPatrice Chotard ret = spinand_wait(spinand,
6208941cd8dSPatrice Chotard SPINAND_READ_INITIAL_DELAY_US,
6218941cd8dSPatrice Chotard SPINAND_READ_POLL_DELAY_US,
6228941cd8dSPatrice Chotard &status);
6237529df46SPeter Pan if (ret < 0)
6247529df46SPeter Pan return ret;
6257529df46SPeter Pan
6263d1f08b0SMiquel Raynal spinand_ondie_ecc_save_status(nand, status);
6273d1f08b0SMiquel Raynal
6287529df46SPeter Pan ret = spinand_read_from_cache_op(spinand, req);
6297529df46SPeter Pan if (ret)
6307529df46SPeter Pan return ret;
6317529df46SPeter Pan
6323d1f08b0SMiquel Raynal return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
6337529df46SPeter Pan }
6347529df46SPeter Pan
spinand_write_page(struct spinand_device * spinand,const struct nand_page_io_req * req)6357529df46SPeter Pan static int spinand_write_page(struct spinand_device *spinand,
6367529df46SPeter Pan const struct nand_page_io_req *req)
6377529df46SPeter Pan {
6383d1f08b0SMiquel Raynal struct nand_device *nand = spinand_to_nand(spinand);
6397529df46SPeter Pan u8 status;
6407529df46SPeter Pan int ret;
6417529df46SPeter Pan
6423d1f08b0SMiquel Raynal ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
6433d1f08b0SMiquel Raynal if (ret)
6443d1f08b0SMiquel Raynal return ret;
6453d1f08b0SMiquel Raynal
6467529df46SPeter Pan ret = spinand_write_enable_op(spinand);
6477529df46SPeter Pan if (ret)
6487529df46SPeter Pan return ret;
6497529df46SPeter Pan
6507529df46SPeter Pan ret = spinand_write_to_cache_op(spinand, req);
6517529df46SPeter Pan if (ret)
6527529df46SPeter Pan return ret;
6537529df46SPeter Pan
6547529df46SPeter Pan ret = spinand_program_op(spinand, req);
6557529df46SPeter Pan if (ret)
6567529df46SPeter Pan return ret;
6577529df46SPeter Pan
6588941cd8dSPatrice Chotard ret = spinand_wait(spinand,
6598941cd8dSPatrice Chotard SPINAND_WRITE_INITIAL_DELAY_US,
6608941cd8dSPatrice Chotard SPINAND_WRITE_POLL_DELAY_US,
6618941cd8dSPatrice Chotard &status);
6627529df46SPeter Pan if (!ret && (status & STATUS_PROG_FAILED))
6633d1f08b0SMiquel Raynal return -EIO;
6647529df46SPeter Pan
6653d1f08b0SMiquel Raynal return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
6667529df46SPeter Pan }
6677529df46SPeter Pan
spinand_mtd_regular_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)66879da1707SMiquel Raynal static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
66979da1707SMiquel Raynal struct mtd_oob_ops *ops,
67079da1707SMiquel Raynal unsigned int *max_bitflips)
6717529df46SPeter Pan {
6727529df46SPeter Pan struct spinand_device *spinand = mtd_to_spinand(mtd);
6737529df46SPeter Pan struct nand_device *nand = mtd_to_nanddev(mtd);
6747529df46SPeter Pan struct nand_io_iter iter;
6753d1f08b0SMiquel Raynal bool disable_ecc = false;
6767529df46SPeter Pan bool ecc_failed = false;
67779da1707SMiquel Raynal int ret;
6787529df46SPeter Pan
67979da1707SMiquel Raynal if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
6803d1f08b0SMiquel Raynal disable_ecc = true;
6817529df46SPeter Pan
682701981caSMiquel Raynal nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
6833d1f08b0SMiquel Raynal if (disable_ecc)
6843d1f08b0SMiquel Raynal iter.req.mode = MTD_OPS_RAW;
6853d1f08b0SMiquel Raynal
6867529df46SPeter Pan ret = spinand_select_target(spinand, iter.req.pos.target);
6877529df46SPeter Pan if (ret)
6887529df46SPeter Pan break;
6897529df46SPeter Pan
6903d1f08b0SMiquel Raynal ret = spinand_read_page(spinand, &iter.req);
6917529df46SPeter Pan if (ret < 0 && ret != -EBADMSG)
6927529df46SPeter Pan break;
6937529df46SPeter Pan
694c93081b2SMiquel Raynal if (ret == -EBADMSG)
6957529df46SPeter Pan ecc_failed = true;
696c93081b2SMiquel Raynal else
69779da1707SMiquel Raynal *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
6987529df46SPeter Pan
699b83408b5Sliaoweixiong ret = 0;
7007529df46SPeter Pan ops->retlen += iter.req.datalen;
7017529df46SPeter Pan ops->oobretlen += iter.req.ooblen;
7027529df46SPeter Pan }
7037529df46SPeter Pan
70479da1707SMiquel Raynal if (ecc_failed && !ret)
70579da1707SMiquel Raynal ret = -EBADMSG;
70679da1707SMiquel Raynal
70779da1707SMiquel Raynal return ret;
70879da1707SMiquel Raynal }
70979da1707SMiquel Raynal
spinand_mtd_continuous_page_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops,unsigned int * max_bitflips)710631cfdd0SMiquel Raynal static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
711631cfdd0SMiquel Raynal struct mtd_oob_ops *ops,
712631cfdd0SMiquel Raynal unsigned int *max_bitflips)
713631cfdd0SMiquel Raynal {
714631cfdd0SMiquel Raynal struct spinand_device *spinand = mtd_to_spinand(mtd);
715631cfdd0SMiquel Raynal struct nand_device *nand = mtd_to_nanddev(mtd);
716631cfdd0SMiquel Raynal struct nand_io_iter iter;
717631cfdd0SMiquel Raynal u8 status;
718631cfdd0SMiquel Raynal int ret;
719631cfdd0SMiquel Raynal
720631cfdd0SMiquel Raynal ret = spinand_cont_read_enable(spinand, true);
721631cfdd0SMiquel Raynal if (ret)
722631cfdd0SMiquel Raynal return ret;
723631cfdd0SMiquel Raynal
724631cfdd0SMiquel Raynal /*
725631cfdd0SMiquel Raynal * The cache is divided into two halves. While one half of the cache has
726631cfdd0SMiquel Raynal * the requested data, the other half is loaded with the next chunk of data.
727631cfdd0SMiquel Raynal * Therefore, the host can read out the data continuously from page to page.
728631cfdd0SMiquel Raynal * Each data read must be a multiple of 4-bytes and full pages should be read;
729631cfdd0SMiquel Raynal * otherwise, the data output might get out of sequence from one read command
730631cfdd0SMiquel Raynal * to another.
731631cfdd0SMiquel Raynal */
732631cfdd0SMiquel Raynal nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
733631cfdd0SMiquel Raynal ret = spinand_select_target(spinand, iter.req.pos.target);
734631cfdd0SMiquel Raynal if (ret)
735631cfdd0SMiquel Raynal goto end_cont_read;
736631cfdd0SMiquel Raynal
737631cfdd0SMiquel Raynal ret = nand_ecc_prepare_io_req(nand, &iter.req);
738631cfdd0SMiquel Raynal if (ret)
739631cfdd0SMiquel Raynal goto end_cont_read;
740631cfdd0SMiquel Raynal
741631cfdd0SMiquel Raynal ret = spinand_load_page_op(spinand, &iter.req);
742631cfdd0SMiquel Raynal if (ret)
743631cfdd0SMiquel Raynal goto end_cont_read;
744631cfdd0SMiquel Raynal
745631cfdd0SMiquel Raynal ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
746631cfdd0SMiquel Raynal SPINAND_READ_POLL_DELAY_US, NULL);
747631cfdd0SMiquel Raynal if (ret < 0)
748631cfdd0SMiquel Raynal goto end_cont_read;
749631cfdd0SMiquel Raynal
750631cfdd0SMiquel Raynal ret = spinand_read_from_cache_op(spinand, &iter.req);
751631cfdd0SMiquel Raynal if (ret)
752631cfdd0SMiquel Raynal goto end_cont_read;
753631cfdd0SMiquel Raynal
754631cfdd0SMiquel Raynal ops->retlen += iter.req.datalen;
755631cfdd0SMiquel Raynal
756631cfdd0SMiquel Raynal ret = spinand_read_status(spinand, &status);
757631cfdd0SMiquel Raynal if (ret)
758631cfdd0SMiquel Raynal goto end_cont_read;
759631cfdd0SMiquel Raynal
760631cfdd0SMiquel Raynal spinand_ondie_ecc_save_status(nand, status);
761631cfdd0SMiquel Raynal
762631cfdd0SMiquel Raynal ret = nand_ecc_finish_io_req(nand, &iter.req);
763631cfdd0SMiquel Raynal if (ret < 0)
764631cfdd0SMiquel Raynal goto end_cont_read;
765631cfdd0SMiquel Raynal
766631cfdd0SMiquel Raynal *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
767631cfdd0SMiquel Raynal ret = 0;
768631cfdd0SMiquel Raynal }
769631cfdd0SMiquel Raynal
770631cfdd0SMiquel Raynal end_cont_read:
771631cfdd0SMiquel Raynal /*
772631cfdd0SMiquel Raynal * Once all the data has been read out, the host can either pull CS#
773631cfdd0SMiquel Raynal * high and wait for tRST or manually clear the bit in the configuration
774631cfdd0SMiquel Raynal * register to terminate the continuous read operation. We have no
775631cfdd0SMiquel Raynal * guarantee the SPI controller drivers will effectively deassert the CS
776631cfdd0SMiquel Raynal * when we expect them to, so take the register based approach.
777631cfdd0SMiquel Raynal */
778631cfdd0SMiquel Raynal spinand_cont_read_enable(spinand, false);
779631cfdd0SMiquel Raynal
780631cfdd0SMiquel Raynal return ret;
781631cfdd0SMiquel Raynal }
782631cfdd0SMiquel Raynal
spinand_cont_read_init(struct spinand_device * spinand)783631cfdd0SMiquel Raynal static void spinand_cont_read_init(struct spinand_device *spinand)
784631cfdd0SMiquel Raynal {
785631cfdd0SMiquel Raynal struct nand_device *nand = spinand_to_nand(spinand);
786631cfdd0SMiquel Raynal enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
787631cfdd0SMiquel Raynal
788631cfdd0SMiquel Raynal /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
789631cfdd0SMiquel Raynal if (spinand->set_cont_read &&
790631cfdd0SMiquel Raynal (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
791631cfdd0SMiquel Raynal engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
792631cfdd0SMiquel Raynal spinand->cont_read_possible = true;
793631cfdd0SMiquel Raynal }
794631cfdd0SMiquel Raynal }
795631cfdd0SMiquel Raynal
spinand_use_cont_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)796631cfdd0SMiquel Raynal static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
797631cfdd0SMiquel Raynal struct mtd_oob_ops *ops)
798631cfdd0SMiquel Raynal {
799631cfdd0SMiquel Raynal struct nand_device *nand = mtd_to_nanddev(mtd);
800631cfdd0SMiquel Raynal struct spinand_device *spinand = nand_to_spinand(nand);
801631cfdd0SMiquel Raynal struct nand_pos start_pos, end_pos;
802631cfdd0SMiquel Raynal
803631cfdd0SMiquel Raynal if (!spinand->cont_read_possible)
804631cfdd0SMiquel Raynal return false;
805631cfdd0SMiquel Raynal
806631cfdd0SMiquel Raynal /* OOBs won't be retrieved */
807631cfdd0SMiquel Raynal if (ops->ooblen || ops->oobbuf)
808631cfdd0SMiquel Raynal return false;
809631cfdd0SMiquel Raynal
810631cfdd0SMiquel Raynal nanddev_offs_to_pos(nand, from, &start_pos);
811631cfdd0SMiquel Raynal nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
812631cfdd0SMiquel Raynal
813631cfdd0SMiquel Raynal /*
814631cfdd0SMiquel Raynal * Continuous reads never cross LUN boundaries. Some devices don't
815631cfdd0SMiquel Raynal * support crossing planes boundaries. Some devices don't even support
816631cfdd0SMiquel Raynal * crossing blocks boundaries. The common case being to read through UBI,
817631cfdd0SMiquel Raynal * we will very rarely read two consequent blocks or more, so it is safer
818631cfdd0SMiquel Raynal * and easier (can be improved) to only enable continuous reads when
819631cfdd0SMiquel Raynal * reading within the same erase block.
820631cfdd0SMiquel Raynal */
821631cfdd0SMiquel Raynal if (start_pos.target != end_pos.target ||
822631cfdd0SMiquel Raynal start_pos.plane != end_pos.plane ||
823631cfdd0SMiquel Raynal start_pos.eraseblock != end_pos.eraseblock)
824631cfdd0SMiquel Raynal return false;
825631cfdd0SMiquel Raynal
826631cfdd0SMiquel Raynal return start_pos.page < end_pos.page;
827631cfdd0SMiquel Raynal }
828631cfdd0SMiquel Raynal
spinand_mtd_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)82979da1707SMiquel Raynal static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
83079da1707SMiquel Raynal struct mtd_oob_ops *ops)
83179da1707SMiquel Raynal {
83279da1707SMiquel Raynal struct spinand_device *spinand = mtd_to_spinand(mtd);
83379da1707SMiquel Raynal struct mtd_ecc_stats old_stats;
83479da1707SMiquel Raynal unsigned int max_bitflips = 0;
83579da1707SMiquel Raynal int ret;
83679da1707SMiquel Raynal
83779da1707SMiquel Raynal mutex_lock(&spinand->lock);
83879da1707SMiquel Raynal
83979da1707SMiquel Raynal old_stats = mtd->ecc_stats;
84079da1707SMiquel Raynal
841631cfdd0SMiquel Raynal if (spinand_use_cont_read(mtd, from, ops))
842631cfdd0SMiquel Raynal ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
843631cfdd0SMiquel Raynal else
84479da1707SMiquel Raynal ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
84579da1707SMiquel Raynal
8467bea6056SMichał Kępień if (ops->stats) {
8477bea6056SMichał Kępień ops->stats->uncorrectable_errors +=
8487bea6056SMichał Kępień mtd->ecc_stats.failed - old_stats.failed;
8497bea6056SMichał Kępień ops->stats->corrected_bitflips +=
8507bea6056SMichał Kępień mtd->ecc_stats.corrected - old_stats.corrected;
8517bea6056SMichał Kępień }
8527bea6056SMichał Kępień
8537529df46SPeter Pan mutex_unlock(&spinand->lock);
8547529df46SPeter Pan
8557529df46SPeter Pan return ret ? ret : max_bitflips;
8567529df46SPeter Pan }
8577529df46SPeter Pan
spinand_mtd_write(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)8587529df46SPeter Pan static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
8597529df46SPeter Pan struct mtd_oob_ops *ops)
8607529df46SPeter Pan {
8617529df46SPeter Pan struct spinand_device *spinand = mtd_to_spinand(mtd);
8627529df46SPeter Pan struct nand_device *nand = mtd_to_nanddev(mtd);
8637529df46SPeter Pan struct nand_io_iter iter;
8643d1f08b0SMiquel Raynal bool disable_ecc = false;
8657529df46SPeter Pan int ret = 0;
8667529df46SPeter Pan
8673d1f08b0SMiquel Raynal if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
8683d1f08b0SMiquel Raynal disable_ecc = true;
8697529df46SPeter Pan
8707529df46SPeter Pan mutex_lock(&spinand->lock);
8717529df46SPeter Pan
872701981caSMiquel Raynal nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
8733d1f08b0SMiquel Raynal if (disable_ecc)
8743d1f08b0SMiquel Raynal iter.req.mode = MTD_OPS_RAW;
8757529df46SPeter Pan
8763d1f08b0SMiquel Raynal ret = spinand_select_target(spinand, iter.req.pos.target);
8777529df46SPeter Pan if (ret)
8787529df46SPeter Pan break;
8797529df46SPeter Pan
8807529df46SPeter Pan ret = spinand_write_page(spinand, &iter.req);
8817529df46SPeter Pan if (ret)
8827529df46SPeter Pan break;
8837529df46SPeter Pan
8847529df46SPeter Pan ops->retlen += iter.req.datalen;
8857529df46SPeter Pan ops->oobretlen += iter.req.ooblen;
8867529df46SPeter Pan }
8877529df46SPeter Pan
8887529df46SPeter Pan mutex_unlock(&spinand->lock);
8897529df46SPeter Pan
8907529df46SPeter Pan return ret;
8917529df46SPeter Pan }
8927529df46SPeter Pan
spinand_isbad(struct nand_device * nand,const struct nand_pos * pos)8937529df46SPeter Pan static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
8947529df46SPeter Pan {
8957529df46SPeter Pan struct spinand_device *spinand = nand_to_spinand(nand);
89621489375SFrieder Schrempf u8 marker[2] = { };
8977529df46SPeter Pan struct nand_page_io_req req = {
8987529df46SPeter Pan .pos = *pos,
89921489375SFrieder Schrempf .ooblen = sizeof(marker),
9007529df46SPeter Pan .ooboffs = 0,
90121489375SFrieder Schrempf .oobbuf.in = marker,
9027529df46SPeter Pan .mode = MTD_OPS_RAW,
9037529df46SPeter Pan };
9047529df46SPeter Pan
9057529df46SPeter Pan spinand_select_target(spinand, pos->target);
9063d1f08b0SMiquel Raynal spinand_read_page(spinand, &req);
90721489375SFrieder Schrempf if (marker[0] != 0xff || marker[1] != 0xff)
9087529df46SPeter Pan return true;
9097529df46SPeter Pan
9107529df46SPeter Pan return false;
9117529df46SPeter Pan }
9127529df46SPeter Pan
spinand_mtd_block_isbad(struct mtd_info * mtd,loff_t offs)9137529df46SPeter Pan static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
9147529df46SPeter Pan {
9157529df46SPeter Pan struct nand_device *nand = mtd_to_nanddev(mtd);
9167529df46SPeter Pan struct spinand_device *spinand = nand_to_spinand(nand);
9177529df46SPeter Pan struct nand_pos pos;
9187529df46SPeter Pan int ret;
9197529df46SPeter Pan
9207529df46SPeter Pan nanddev_offs_to_pos(nand, offs, &pos);
9217529df46SPeter Pan mutex_lock(&spinand->lock);
9227529df46SPeter Pan ret = nanddev_isbad(nand, &pos);
9237529df46SPeter Pan mutex_unlock(&spinand->lock);
9247529df46SPeter Pan
9257529df46SPeter Pan return ret;
9267529df46SPeter Pan }
9277529df46SPeter Pan
spinand_markbad(struct nand_device * nand,const struct nand_pos * pos)9287529df46SPeter Pan static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
9297529df46SPeter Pan {
9307529df46SPeter Pan struct spinand_device *spinand = nand_to_spinand(nand);
93121489375SFrieder Schrempf u8 marker[2] = { };
9327529df46SPeter Pan struct nand_page_io_req req = {
9337529df46SPeter Pan .pos = *pos,
9347529df46SPeter Pan .ooboffs = 0,
93521489375SFrieder Schrempf .ooblen = sizeof(marker),
93621489375SFrieder Schrempf .oobbuf.out = marker,
937621a7b78SFrieder Schrempf .mode = MTD_OPS_RAW,
9387529df46SPeter Pan };
9397529df46SPeter Pan int ret;
9407529df46SPeter Pan
9417529df46SPeter Pan ret = spinand_select_target(spinand, pos->target);
9427529df46SPeter Pan if (ret)
9437529df46SPeter Pan return ret;
9447529df46SPeter Pan
9457529df46SPeter Pan ret = spinand_write_enable_op(spinand);
9467529df46SPeter Pan if (ret)
9477529df46SPeter Pan return ret;
9487529df46SPeter Pan
9497529df46SPeter Pan return spinand_write_page(spinand, &req);
9507529df46SPeter Pan }
9517529df46SPeter Pan
spinand_mtd_block_markbad(struct mtd_info * mtd,loff_t offs)9527529df46SPeter Pan static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
9537529df46SPeter Pan {
9547529df46SPeter Pan struct nand_device *nand = mtd_to_nanddev(mtd);
9557529df46SPeter Pan struct spinand_device *spinand = nand_to_spinand(nand);
9567529df46SPeter Pan struct nand_pos pos;
9577529df46SPeter Pan int ret;
9587529df46SPeter Pan
9597529df46SPeter Pan nanddev_offs_to_pos(nand, offs, &pos);
9607529df46SPeter Pan mutex_lock(&spinand->lock);
9617529df46SPeter Pan ret = nanddev_markbad(nand, &pos);
9627529df46SPeter Pan mutex_unlock(&spinand->lock);
9637529df46SPeter Pan
9647529df46SPeter Pan return ret;
9657529df46SPeter Pan }
9667529df46SPeter Pan
spinand_erase(struct nand_device * nand,const struct nand_pos * pos)9677529df46SPeter Pan static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
9687529df46SPeter Pan {
9697529df46SPeter Pan struct spinand_device *spinand = nand_to_spinand(nand);
9707529df46SPeter Pan u8 status;
9717529df46SPeter Pan int ret;
9727529df46SPeter Pan
9737529df46SPeter Pan ret = spinand_select_target(spinand, pos->target);
9747529df46SPeter Pan if (ret)
9757529df46SPeter Pan return ret;
9767529df46SPeter Pan
9777529df46SPeter Pan ret = spinand_write_enable_op(spinand);
9787529df46SPeter Pan if (ret)
9797529df46SPeter Pan return ret;
9807529df46SPeter Pan
9817529df46SPeter Pan ret = spinand_erase_op(spinand, pos);
9827529df46SPeter Pan if (ret)
9837529df46SPeter Pan return ret;
9847529df46SPeter Pan
9858941cd8dSPatrice Chotard ret = spinand_wait(spinand,
9868941cd8dSPatrice Chotard SPINAND_ERASE_INITIAL_DELAY_US,
9878941cd8dSPatrice Chotard SPINAND_ERASE_POLL_DELAY_US,
9888941cd8dSPatrice Chotard &status);
9898941cd8dSPatrice Chotard
9907529df46SPeter Pan if (!ret && (status & STATUS_ERASE_FAILED))
9917529df46SPeter Pan ret = -EIO;
9927529df46SPeter Pan
9937529df46SPeter Pan return ret;
9947529df46SPeter Pan }
9957529df46SPeter Pan
spinand_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)9967529df46SPeter Pan static int spinand_mtd_erase(struct mtd_info *mtd,
9977529df46SPeter Pan struct erase_info *einfo)
9987529df46SPeter Pan {
9997529df46SPeter Pan struct spinand_device *spinand = mtd_to_spinand(mtd);
10007529df46SPeter Pan int ret;
10017529df46SPeter Pan
10027529df46SPeter Pan mutex_lock(&spinand->lock);
10037529df46SPeter Pan ret = nanddev_mtd_erase(mtd, einfo);
10047529df46SPeter Pan mutex_unlock(&spinand->lock);
10057529df46SPeter Pan
10067529df46SPeter Pan return ret;
10077529df46SPeter Pan }
10087529df46SPeter Pan
spinand_mtd_block_isreserved(struct mtd_info * mtd,loff_t offs)10097529df46SPeter Pan static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
10107529df46SPeter Pan {
10117529df46SPeter Pan struct spinand_device *spinand = mtd_to_spinand(mtd);
10127529df46SPeter Pan struct nand_device *nand = mtd_to_nanddev(mtd);
10137529df46SPeter Pan struct nand_pos pos;
10147529df46SPeter Pan int ret;
10157529df46SPeter Pan
10167529df46SPeter Pan nanddev_offs_to_pos(nand, offs, &pos);
10177529df46SPeter Pan mutex_lock(&spinand->lock);
10187529df46SPeter Pan ret = nanddev_isreserved(nand, &pos);
10197529df46SPeter Pan mutex_unlock(&spinand->lock);
10207529df46SPeter Pan
10217529df46SPeter Pan return ret;
10227529df46SPeter Pan }
10237529df46SPeter Pan
spinand_create_dirmap(struct spinand_device * spinand,unsigned int plane)1024981d1aa0SBoris Brezillon static int spinand_create_dirmap(struct spinand_device *spinand,
1025981d1aa0SBoris Brezillon unsigned int plane)
1026981d1aa0SBoris Brezillon {
1027981d1aa0SBoris Brezillon struct nand_device *nand = spinand_to_nand(spinand);
1028981d1aa0SBoris Brezillon struct spi_mem_dirmap_info info = {
1029981d1aa0SBoris Brezillon .length = nanddev_page_size(nand) +
1030981d1aa0SBoris Brezillon nanddev_per_page_oobsize(nand),
1031981d1aa0SBoris Brezillon };
1032981d1aa0SBoris Brezillon struct spi_mem_dirmap_desc *desc;
1033981d1aa0SBoris Brezillon
1034631cfdd0SMiquel Raynal if (spinand->cont_read_possible)
1035631cfdd0SMiquel Raynal info.length = nanddev_eraseblock_size(nand);
1036631cfdd0SMiquel Raynal
1037981d1aa0SBoris Brezillon /* The plane number is passed in MSB just above the column address */
1038981d1aa0SBoris Brezillon info.offset = plane << fls(nand->memorg.pagesize);
1039981d1aa0SBoris Brezillon
1040981d1aa0SBoris Brezillon info.op_tmpl = *spinand->op_templates.update_cache;
1041981d1aa0SBoris Brezillon desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1042981d1aa0SBoris Brezillon spinand->spimem, &info);
1043981d1aa0SBoris Brezillon if (IS_ERR(desc))
1044981d1aa0SBoris Brezillon return PTR_ERR(desc);
1045981d1aa0SBoris Brezillon
1046981d1aa0SBoris Brezillon spinand->dirmaps[plane].wdesc = desc;
1047981d1aa0SBoris Brezillon
1048981d1aa0SBoris Brezillon info.op_tmpl = *spinand->op_templates.read_cache;
1049981d1aa0SBoris Brezillon desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1050981d1aa0SBoris Brezillon spinand->spimem, &info);
1051981d1aa0SBoris Brezillon if (IS_ERR(desc))
1052981d1aa0SBoris Brezillon return PTR_ERR(desc);
1053981d1aa0SBoris Brezillon
1054981d1aa0SBoris Brezillon spinand->dirmaps[plane].rdesc = desc;
1055981d1aa0SBoris Brezillon
1056f9d7c726SMiquel Raynal if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
1057f9d7c726SMiquel Raynal spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
1058f9d7c726SMiquel Raynal spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
1059f9d7c726SMiquel Raynal
1060f9d7c726SMiquel Raynal return 0;
1061f9d7c726SMiquel Raynal }
1062f9d7c726SMiquel Raynal
1063f9d7c726SMiquel Raynal info.op_tmpl = *spinand->op_templates.update_cache;
1064f9d7c726SMiquel Raynal info.op_tmpl.data.ecc = true;
1065f9d7c726SMiquel Raynal desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1066f9d7c726SMiquel Raynal spinand->spimem, &info);
1067f9d7c726SMiquel Raynal if (IS_ERR(desc))
1068f9d7c726SMiquel Raynal return PTR_ERR(desc);
1069f9d7c726SMiquel Raynal
1070f9d7c726SMiquel Raynal spinand->dirmaps[plane].wdesc_ecc = desc;
1071f9d7c726SMiquel Raynal
1072f9d7c726SMiquel Raynal info.op_tmpl = *spinand->op_templates.read_cache;
1073f9d7c726SMiquel Raynal info.op_tmpl.data.ecc = true;
1074f9d7c726SMiquel Raynal desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1075f9d7c726SMiquel Raynal spinand->spimem, &info);
1076f9d7c726SMiquel Raynal if (IS_ERR(desc))
1077f9d7c726SMiquel Raynal return PTR_ERR(desc);
1078f9d7c726SMiquel Raynal
1079f9d7c726SMiquel Raynal spinand->dirmaps[plane].rdesc_ecc = desc;
1080f9d7c726SMiquel Raynal
1081981d1aa0SBoris Brezillon return 0;
1082981d1aa0SBoris Brezillon }
1083981d1aa0SBoris Brezillon
spinand_create_dirmaps(struct spinand_device * spinand)1084981d1aa0SBoris Brezillon static int spinand_create_dirmaps(struct spinand_device *spinand)
1085981d1aa0SBoris Brezillon {
1086981d1aa0SBoris Brezillon struct nand_device *nand = spinand_to_nand(spinand);
1087981d1aa0SBoris Brezillon int i, ret;
1088981d1aa0SBoris Brezillon
1089981d1aa0SBoris Brezillon spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
1090981d1aa0SBoris Brezillon sizeof(*spinand->dirmaps) *
1091981d1aa0SBoris Brezillon nand->memorg.planes_per_lun,
1092981d1aa0SBoris Brezillon GFP_KERNEL);
1093981d1aa0SBoris Brezillon if (!spinand->dirmaps)
1094981d1aa0SBoris Brezillon return -ENOMEM;
1095981d1aa0SBoris Brezillon
1096981d1aa0SBoris Brezillon for (i = 0; i < nand->memorg.planes_per_lun; i++) {
1097981d1aa0SBoris Brezillon ret = spinand_create_dirmap(spinand, i);
1098981d1aa0SBoris Brezillon if (ret)
1099981d1aa0SBoris Brezillon return ret;
1100981d1aa0SBoris Brezillon }
1101981d1aa0SBoris Brezillon
1102981d1aa0SBoris Brezillon return 0;
1103981d1aa0SBoris Brezillon }
1104981d1aa0SBoris Brezillon
11057529df46SPeter Pan static const struct nand_ops spinand_ops = {
11067529df46SPeter Pan .erase = spinand_erase,
11077529df46SPeter Pan .markbad = spinand_markbad,
11087529df46SPeter Pan .isbad = spinand_isbad,
11097529df46SPeter Pan };
11107529df46SPeter Pan
1111a508e887SPeter Pan static const struct spinand_manufacturer *spinand_manufacturers[] = {
1112724ef015SMario Kicherer &alliancememory_spinand_manufacturer,
1113fc602b4fSAidan MacDonald &ato_spinand_manufacturer,
1114d74c3648SChuanhong Guo &esmt_c8_spinand_manufacturer,
1115f447318fSMartin Kurbanov &foresee_spinand_manufacturer,
1116c93c6132SChuanhong Guo &gigadevice_spinand_manufacturer,
1117b02308afSBoris Brezillon ¯onix_spinand_manufacturer,
1118a508e887SPeter Pan µn_spinand_manufacturer,
111935526916SJeff Kletsky ¶gon_spinand_manufacturer,
112010949af1SSchrempf Frieder &toshiba_spinand_manufacturer,
11211075492bSFrieder Schrempf &winbond_spinand_manufacturer,
1122f4c5c7f9SFelix Matouschek &xtx_spinand_manufacturer,
1123a508e887SPeter Pan };
1124a508e887SPeter Pan
spinand_manufacturer_match(struct spinand_device * spinand,enum spinand_readid_method rdid_method)1125f1541773SChuanhong Guo static int spinand_manufacturer_match(struct spinand_device *spinand,
1126f1541773SChuanhong Guo enum spinand_readid_method rdid_method)
11277529df46SPeter Pan {
1128f1541773SChuanhong Guo u8 *id = spinand->id.data;
1129a508e887SPeter Pan unsigned int i;
1130a508e887SPeter Pan int ret;
1131a508e887SPeter Pan
1132a508e887SPeter Pan for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
1133f1541773SChuanhong Guo const struct spinand_manufacturer *manufacturer =
1134f1541773SChuanhong Guo spinand_manufacturers[i];
1135f1541773SChuanhong Guo
1136f1541773SChuanhong Guo if (id[0] != manufacturer->id)
1137f1541773SChuanhong Guo continue;
1138f1541773SChuanhong Guo
1139f1541773SChuanhong Guo ret = spinand_match_and_init(spinand,
1140f1541773SChuanhong Guo manufacturer->chips,
1141f1541773SChuanhong Guo manufacturer->nchips,
1142f1541773SChuanhong Guo rdid_method);
1143f1541773SChuanhong Guo if (ret < 0)
1144f1541773SChuanhong Guo continue;
1145f1541773SChuanhong Guo
1146f1541773SChuanhong Guo spinand->manufacturer = manufacturer;
1147a508e887SPeter Pan return 0;
1148a508e887SPeter Pan }
1149cff49d58SChia-Lin Kao (AceLan) return -EOPNOTSUPP;
1150a508e887SPeter Pan }
1151a508e887SPeter Pan
spinand_id_detect(struct spinand_device * spinand)1152f1541773SChuanhong Guo static int spinand_id_detect(struct spinand_device *spinand)
1153f1541773SChuanhong Guo {
1154f1541773SChuanhong Guo u8 *id = spinand->id.data;
1155f1541773SChuanhong Guo int ret;
1156f1541773SChuanhong Guo
1157f1541773SChuanhong Guo ret = spinand_read_id_op(spinand, 0, 0, id);
1158f1541773SChuanhong Guo if (ret)
1159f1541773SChuanhong Guo return ret;
1160f1541773SChuanhong Guo ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
1161f1541773SChuanhong Guo if (!ret)
1162f1541773SChuanhong Guo return 0;
1163f1541773SChuanhong Guo
1164f1541773SChuanhong Guo ret = spinand_read_id_op(spinand, 1, 0, id);
1165f1541773SChuanhong Guo if (ret)
1166f1541773SChuanhong Guo return ret;
1167f1541773SChuanhong Guo ret = spinand_manufacturer_match(spinand,
1168f1541773SChuanhong Guo SPINAND_READID_METHOD_OPCODE_ADDR);
1169f1541773SChuanhong Guo if (!ret)
1170f1541773SChuanhong Guo return 0;
1171f1541773SChuanhong Guo
1172f1541773SChuanhong Guo ret = spinand_read_id_op(spinand, 0, 1, id);
1173f1541773SChuanhong Guo if (ret)
1174f1541773SChuanhong Guo return ret;
1175f1541773SChuanhong Guo ret = spinand_manufacturer_match(spinand,
1176f1541773SChuanhong Guo SPINAND_READID_METHOD_OPCODE_DUMMY);
1177f1541773SChuanhong Guo
1178f1541773SChuanhong Guo return ret;
11797529df46SPeter Pan }
11807529df46SPeter Pan
spinand_manufacturer_init(struct spinand_device * spinand)11817529df46SPeter Pan static int spinand_manufacturer_init(struct spinand_device *spinand)
11827529df46SPeter Pan {
11837529df46SPeter Pan if (spinand->manufacturer->ops->init)
11847529df46SPeter Pan return spinand->manufacturer->ops->init(spinand);
11857529df46SPeter Pan
11867529df46SPeter Pan return 0;
11877529df46SPeter Pan }
11887529df46SPeter Pan
spinand_manufacturer_cleanup(struct spinand_device * spinand)11897529df46SPeter Pan static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
11907529df46SPeter Pan {
11917529df46SPeter Pan /* Release manufacturer private data */
11927529df46SPeter Pan if (spinand->manufacturer->ops->cleanup)
11937529df46SPeter Pan return spinand->manufacturer->ops->cleanup(spinand);
11947529df46SPeter Pan }
11957529df46SPeter Pan
11967529df46SPeter Pan static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device * spinand,const struct spinand_op_variants * variants)11977529df46SPeter Pan spinand_select_op_variant(struct spinand_device *spinand,
11987529df46SPeter Pan const struct spinand_op_variants *variants)
11997529df46SPeter Pan {
12007529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
12017529df46SPeter Pan unsigned int i;
12027529df46SPeter Pan
12037529df46SPeter Pan for (i = 0; i < variants->nops; i++) {
12047529df46SPeter Pan struct spi_mem_op op = variants->ops[i];
12057529df46SPeter Pan unsigned int nbytes;
12067529df46SPeter Pan int ret;
12077529df46SPeter Pan
12087529df46SPeter Pan nbytes = nanddev_per_page_oobsize(nand) +
12097529df46SPeter Pan nanddev_page_size(nand);
12107529df46SPeter Pan
12117529df46SPeter Pan while (nbytes) {
12127529df46SPeter Pan op.data.nbytes = nbytes;
12137529df46SPeter Pan ret = spi_mem_adjust_op_size(spinand->spimem, &op);
12147529df46SPeter Pan if (ret)
12157529df46SPeter Pan break;
12167529df46SPeter Pan
12177529df46SPeter Pan if (!spi_mem_supports_op(spinand->spimem, &op))
12187529df46SPeter Pan break;
12197529df46SPeter Pan
12207529df46SPeter Pan nbytes -= op.data.nbytes;
12217529df46SPeter Pan }
12227529df46SPeter Pan
12237529df46SPeter Pan if (!nbytes)
12247529df46SPeter Pan return &variants->ops[i];
12257529df46SPeter Pan }
12267529df46SPeter Pan
12277529df46SPeter Pan return NULL;
12287529df46SPeter Pan }
12297529df46SPeter Pan
12307529df46SPeter Pan /**
12317529df46SPeter Pan * spinand_match_and_init() - Try to find a match between a device ID and an
12327529df46SPeter Pan * entry in a spinand_info table
12337529df46SPeter Pan * @spinand: SPI NAND object
12347529df46SPeter Pan * @table: SPI NAND device description table
12357529df46SPeter Pan * @table_size: size of the device description table
1236f1541773SChuanhong Guo * @rdid_method: read id method to match
12377529df46SPeter Pan *
1238f1541773SChuanhong Guo * Match between a device ID retrieved through the READ_ID command and an
12397529df46SPeter Pan * entry in the SPI NAND description table. If a match is found, the spinand
12407529df46SPeter Pan * object will be initialized with information provided by the matching
12417529df46SPeter Pan * spinand_info entry.
12427529df46SPeter Pan *
12437529df46SPeter Pan * Return: 0 on success, a negative error code otherwise.
12447529df46SPeter Pan */
spinand_match_and_init(struct spinand_device * spinand,const struct spinand_info * table,unsigned int table_size,enum spinand_readid_method rdid_method)12457529df46SPeter Pan int spinand_match_and_init(struct spinand_device *spinand,
12467529df46SPeter Pan const struct spinand_info *table,
1247f1541773SChuanhong Guo unsigned int table_size,
1248f1541773SChuanhong Guo enum spinand_readid_method rdid_method)
12497529df46SPeter Pan {
1250f1541773SChuanhong Guo u8 *id = spinand->id.data;
12517529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
12527529df46SPeter Pan unsigned int i;
12537529df46SPeter Pan
12547529df46SPeter Pan for (i = 0; i < table_size; i++) {
12557529df46SPeter Pan const struct spinand_info *info = &table[i];
12567529df46SPeter Pan const struct spi_mem_op *op;
12577529df46SPeter Pan
1258f1541773SChuanhong Guo if (rdid_method != info->devid.method)
1259f1541773SChuanhong Guo continue;
1260f1541773SChuanhong Guo
1261f1541773SChuanhong Guo if (memcmp(id + 1, info->devid.id, info->devid.len))
12627529df46SPeter Pan continue;
12637529df46SPeter Pan
12647529df46SPeter Pan nand->memorg = table[i].memorg;
126553576c7bSMiquel Raynal nanddev_set_ecc_requirements(nand, &table[i].eccreq);
12667529df46SPeter Pan spinand->eccinfo = table[i].eccinfo;
12677529df46SPeter Pan spinand->flags = table[i].flags;
1268f1541773SChuanhong Guo spinand->id.len = 1 + table[i].devid.len;
12697529df46SPeter Pan spinand->select_target = table[i].select_target;
1270631cfdd0SMiquel Raynal spinand->set_cont_read = table[i].set_cont_read;
12717529df46SPeter Pan
12727529df46SPeter Pan op = spinand_select_op_variant(spinand,
12737529df46SPeter Pan info->op_variants.read_cache);
12747529df46SPeter Pan if (!op)
12757529df46SPeter Pan return -ENOTSUPP;
12767529df46SPeter Pan
12777529df46SPeter Pan spinand->op_templates.read_cache = op;
12787529df46SPeter Pan
12797529df46SPeter Pan op = spinand_select_op_variant(spinand,
12807529df46SPeter Pan info->op_variants.write_cache);
12817529df46SPeter Pan if (!op)
12827529df46SPeter Pan return -ENOTSUPP;
12837529df46SPeter Pan
12847529df46SPeter Pan spinand->op_templates.write_cache = op;
12857529df46SPeter Pan
12867529df46SPeter Pan op = spinand_select_op_variant(spinand,
12877529df46SPeter Pan info->op_variants.update_cache);
12887529df46SPeter Pan spinand->op_templates.update_cache = op;
12897529df46SPeter Pan
12907529df46SPeter Pan return 0;
12917529df46SPeter Pan }
12927529df46SPeter Pan
12937529df46SPeter Pan return -ENOTSUPP;
12947529df46SPeter Pan }
12957529df46SPeter Pan
spinand_detect(struct spinand_device * spinand)12967529df46SPeter Pan static int spinand_detect(struct spinand_device *spinand)
12977529df46SPeter Pan {
12987529df46SPeter Pan struct device *dev = &spinand->spimem->spi->dev;
12997529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
13007529df46SPeter Pan int ret;
13017529df46SPeter Pan
13027529df46SPeter Pan ret = spinand_reset_op(spinand);
13037529df46SPeter Pan if (ret)
13047529df46SPeter Pan return ret;
13057529df46SPeter Pan
1306f1541773SChuanhong Guo ret = spinand_id_detect(spinand);
13077529df46SPeter Pan if (ret) {
13087529df46SPeter Pan dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
13097529df46SPeter Pan spinand->id.data);
13107529df46SPeter Pan return ret;
13117529df46SPeter Pan }
13127529df46SPeter Pan
13137529df46SPeter Pan if (nand->memorg.ntargets > 1 && !spinand->select_target) {
13147529df46SPeter Pan dev_err(dev,
13157529df46SPeter Pan "SPI NANDs with more than one die must implement ->select_target()\n");
13167529df46SPeter Pan return -EINVAL;
13177529df46SPeter Pan }
13187529df46SPeter Pan
13197529df46SPeter Pan dev_info(&spinand->spimem->spi->dev,
13207529df46SPeter Pan "%s SPI NAND was found.\n", spinand->manufacturer->name);
13217529df46SPeter Pan dev_info(&spinand->spimem->spi->dev,
13227529df46SPeter Pan "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
13237529df46SPeter Pan nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
13247529df46SPeter Pan nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
13257529df46SPeter Pan
13267529df46SPeter Pan return 0;
13277529df46SPeter Pan }
13287529df46SPeter Pan
spinand_init_flash(struct spinand_device * spinand)132941e005c2SPatrice Chotard static int spinand_init_flash(struct spinand_device *spinand)
133041e005c2SPatrice Chotard {
133141e005c2SPatrice Chotard struct device *dev = &spinand->spimem->spi->dev;
133241e005c2SPatrice Chotard struct nand_device *nand = spinand_to_nand(spinand);
133341e005c2SPatrice Chotard int ret, i;
133441e005c2SPatrice Chotard
133541e005c2SPatrice Chotard ret = spinand_read_cfg(spinand);
133641e005c2SPatrice Chotard if (ret)
133741e005c2SPatrice Chotard return ret;
133841e005c2SPatrice Chotard
133941e005c2SPatrice Chotard ret = spinand_init_quad_enable(spinand);
134041e005c2SPatrice Chotard if (ret)
134141e005c2SPatrice Chotard return ret;
134241e005c2SPatrice Chotard
134341e005c2SPatrice Chotard ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
134441e005c2SPatrice Chotard if (ret)
134541e005c2SPatrice Chotard return ret;
134641e005c2SPatrice Chotard
134741e005c2SPatrice Chotard ret = spinand_manufacturer_init(spinand);
134841e005c2SPatrice Chotard if (ret) {
134941e005c2SPatrice Chotard dev_err(dev,
135041e005c2SPatrice Chotard "Failed to initialize the SPI NAND chip (err = %d)\n",
135141e005c2SPatrice Chotard ret);
135241e005c2SPatrice Chotard return ret;
135341e005c2SPatrice Chotard }
135441e005c2SPatrice Chotard
135541e005c2SPatrice Chotard /* After power up, all blocks are locked, so unlock them here. */
135641e005c2SPatrice Chotard for (i = 0; i < nand->memorg.ntargets; i++) {
135741e005c2SPatrice Chotard ret = spinand_select_target(spinand, i);
135841e005c2SPatrice Chotard if (ret)
135941e005c2SPatrice Chotard break;
136041e005c2SPatrice Chotard
136141e005c2SPatrice Chotard ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
136241e005c2SPatrice Chotard if (ret)
136341e005c2SPatrice Chotard break;
136441e005c2SPatrice Chotard }
136541e005c2SPatrice Chotard
136641e005c2SPatrice Chotard if (ret)
136741e005c2SPatrice Chotard spinand_manufacturer_cleanup(spinand);
136841e005c2SPatrice Chotard
136941e005c2SPatrice Chotard return ret;
137041e005c2SPatrice Chotard }
137141e005c2SPatrice Chotard
spinand_mtd_resume(struct mtd_info * mtd)1372f145b9dcSPatrice Chotard static void spinand_mtd_resume(struct mtd_info *mtd)
1373f145b9dcSPatrice Chotard {
1374f145b9dcSPatrice Chotard struct spinand_device *spinand = mtd_to_spinand(mtd);
1375f145b9dcSPatrice Chotard int ret;
1376f145b9dcSPatrice Chotard
1377f145b9dcSPatrice Chotard ret = spinand_reset_op(spinand);
1378f145b9dcSPatrice Chotard if (ret)
1379f145b9dcSPatrice Chotard return;
1380f145b9dcSPatrice Chotard
1381f145b9dcSPatrice Chotard ret = spinand_init_flash(spinand);
1382f145b9dcSPatrice Chotard if (ret)
1383f145b9dcSPatrice Chotard return;
1384f145b9dcSPatrice Chotard
1385f145b9dcSPatrice Chotard spinand_ecc_enable(spinand, false);
1386f145b9dcSPatrice Chotard }
1387f145b9dcSPatrice Chotard
spinand_init(struct spinand_device * spinand)13887529df46SPeter Pan static int spinand_init(struct spinand_device *spinand)
13897529df46SPeter Pan {
13907529df46SPeter Pan struct device *dev = &spinand->spimem->spi->dev;
13917529df46SPeter Pan struct mtd_info *mtd = spinand_to_mtd(spinand);
13927529df46SPeter Pan struct nand_device *nand = mtd_to_nanddev(mtd);
139341e005c2SPatrice Chotard int ret;
13947529df46SPeter Pan
13957529df46SPeter Pan /*
13967529df46SPeter Pan * We need a scratch buffer because the spi_mem interface requires that
13977529df46SPeter Pan * buf passed in spi_mem_op->data.buf be DMA-able.
13987529df46SPeter Pan */
13997529df46SPeter Pan spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
14007529df46SPeter Pan if (!spinand->scratchbuf)
14017529df46SPeter Pan return -ENOMEM;
14027529df46SPeter Pan
14037529df46SPeter Pan ret = spinand_detect(spinand);
14047529df46SPeter Pan if (ret)
14057529df46SPeter Pan goto err_free_bufs;
14067529df46SPeter Pan
14077529df46SPeter Pan /*
14087529df46SPeter Pan * Use kzalloc() instead of devm_kzalloc() here, because some drivers
14097529df46SPeter Pan * may use this buffer for DMA access.
14107529df46SPeter Pan * Memory allocated by devm_ does not guarantee DMA-safe alignment.
14117529df46SPeter Pan */
1412631cfdd0SMiquel Raynal spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
14137529df46SPeter Pan GFP_KERNEL);
14147529df46SPeter Pan if (!spinand->databuf) {
14157529df46SPeter Pan ret = -ENOMEM;
14167529df46SPeter Pan goto err_free_bufs;
14177529df46SPeter Pan }
14187529df46SPeter Pan
14197529df46SPeter Pan spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
14207529df46SPeter Pan
14217529df46SPeter Pan ret = spinand_init_cfg_cache(spinand);
14227529df46SPeter Pan if (ret)
14237529df46SPeter Pan goto err_free_bufs;
14247529df46SPeter Pan
142541e005c2SPatrice Chotard ret = spinand_init_flash(spinand);
14267529df46SPeter Pan if (ret)
14277529df46SPeter Pan goto err_free_bufs;
14287529df46SPeter Pan
14297529df46SPeter Pan ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
14307529df46SPeter Pan if (ret)
14317529df46SPeter Pan goto err_manuf_cleanup;
14327529df46SPeter Pan
1433c8efe010SMiquel Raynal /* SPI-NAND default ECC engine is on-die */
1434c8efe010SMiquel Raynal nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1435c8efe010SMiquel Raynal nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1436c8efe010SMiquel Raynal
14373d1f08b0SMiquel Raynal spinand_ecc_enable(spinand, false);
14383d1f08b0SMiquel Raynal ret = nanddev_ecc_engine_init(nand);
14393d1f08b0SMiquel Raynal if (ret)
14403d1f08b0SMiquel Raynal goto err_cleanup_nanddev;
14413d1f08b0SMiquel Raynal
1442631cfdd0SMiquel Raynal /*
1443631cfdd0SMiquel Raynal * Continuous read can only be enabled with an on-die ECC engine, so the
1444631cfdd0SMiquel Raynal * ECC initialization must have happened previously.
1445631cfdd0SMiquel Raynal */
1446631cfdd0SMiquel Raynal spinand_cont_read_init(spinand);
1447631cfdd0SMiquel Raynal
14487529df46SPeter Pan mtd->_read_oob = spinand_mtd_read;
14497529df46SPeter Pan mtd->_write_oob = spinand_mtd_write;
14507529df46SPeter Pan mtd->_block_isbad = spinand_mtd_block_isbad;
14517529df46SPeter Pan mtd->_block_markbad = spinand_mtd_block_markbad;
14527529df46SPeter Pan mtd->_block_isreserved = spinand_mtd_block_isreserved;
14537529df46SPeter Pan mtd->_erase = spinand_mtd_erase;
145450919848SBoris Brezillon mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1455f145b9dcSPatrice Chotard mtd->_resume = spinand_mtd_resume;
14567529df46SPeter Pan
145700c15b78SMiquel Raynal if (nand->ecc.engine) {
14587529df46SPeter Pan ret = mtd_ooblayout_count_freebytes(mtd);
14597529df46SPeter Pan if (ret < 0)
14603d1f08b0SMiquel Raynal goto err_cleanup_ecc_engine;
146100c15b78SMiquel Raynal }
14627529df46SPeter Pan
14637529df46SPeter Pan mtd->oobavail = ret;
14647529df46SPeter Pan
14653507273dSMiquel Raynal /* Propagate ECC information to mtd_info */
14669a333a72SMiquel Raynal mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
14679a333a72SMiquel Raynal mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
14681824520eSDaniel Golle mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
14693507273dSMiquel Raynal
1470dc4c2cbfSMiquel Raynal ret = spinand_create_dirmaps(spinand);
1471dc4c2cbfSMiquel Raynal if (ret) {
1472dc4c2cbfSMiquel Raynal dev_err(dev,
1473dc4c2cbfSMiquel Raynal "Failed to create direct mappings for read/write operations (err = %d)\n",
1474dc4c2cbfSMiquel Raynal ret);
1475dc4c2cbfSMiquel Raynal goto err_cleanup_ecc_engine;
1476dc4c2cbfSMiquel Raynal }
1477dc4c2cbfSMiquel Raynal
14787529df46SPeter Pan return 0;
14797529df46SPeter Pan
14803d1f08b0SMiquel Raynal err_cleanup_ecc_engine:
14813d1f08b0SMiquel Raynal nanddev_ecc_engine_cleanup(nand);
14823d1f08b0SMiquel Raynal
14837529df46SPeter Pan err_cleanup_nanddev:
14847529df46SPeter Pan nanddev_cleanup(nand);
14857529df46SPeter Pan
14867529df46SPeter Pan err_manuf_cleanup:
14877529df46SPeter Pan spinand_manufacturer_cleanup(spinand);
14887529df46SPeter Pan
14897529df46SPeter Pan err_free_bufs:
14907529df46SPeter Pan kfree(spinand->databuf);
14917529df46SPeter Pan kfree(spinand->scratchbuf);
14927529df46SPeter Pan return ret;
14937529df46SPeter Pan }
14947529df46SPeter Pan
spinand_cleanup(struct spinand_device * spinand)14957529df46SPeter Pan static void spinand_cleanup(struct spinand_device *spinand)
14967529df46SPeter Pan {
14977529df46SPeter Pan struct nand_device *nand = spinand_to_nand(spinand);
14987529df46SPeter Pan
14997529df46SPeter Pan nanddev_cleanup(nand);
15007529df46SPeter Pan spinand_manufacturer_cleanup(spinand);
15017529df46SPeter Pan kfree(spinand->databuf);
15027529df46SPeter Pan kfree(spinand->scratchbuf);
15037529df46SPeter Pan }
15047529df46SPeter Pan
spinand_probe(struct spi_mem * mem)15057529df46SPeter Pan static int spinand_probe(struct spi_mem *mem)
15067529df46SPeter Pan {
15077529df46SPeter Pan struct spinand_device *spinand;
15087529df46SPeter Pan struct mtd_info *mtd;
15097529df46SPeter Pan int ret;
15107529df46SPeter Pan
15117529df46SPeter Pan spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
15127529df46SPeter Pan GFP_KERNEL);
15137529df46SPeter Pan if (!spinand)
15147529df46SPeter Pan return -ENOMEM;
15157529df46SPeter Pan
15167529df46SPeter Pan spinand->spimem = mem;
15177529df46SPeter Pan spi_mem_set_drvdata(mem, spinand);
15187529df46SPeter Pan spinand_set_of_node(spinand, mem->spi->dev.of_node);
15197529df46SPeter Pan mutex_init(&spinand->lock);
15207529df46SPeter Pan mtd = spinand_to_mtd(spinand);
15217529df46SPeter Pan mtd->dev.parent = &mem->spi->dev;
15227529df46SPeter Pan
15237529df46SPeter Pan ret = spinand_init(spinand);
15247529df46SPeter Pan if (ret)
15257529df46SPeter Pan return ret;
15267529df46SPeter Pan
15277529df46SPeter Pan ret = mtd_device_register(mtd, NULL, 0);
15287529df46SPeter Pan if (ret)
15297529df46SPeter Pan goto err_spinand_cleanup;
15307529df46SPeter Pan
15317529df46SPeter Pan return 0;
15327529df46SPeter Pan
15337529df46SPeter Pan err_spinand_cleanup:
15347529df46SPeter Pan spinand_cleanup(spinand);
15357529df46SPeter Pan
15367529df46SPeter Pan return ret;
15377529df46SPeter Pan }
15387529df46SPeter Pan
spinand_remove(struct spi_mem * mem)15397529df46SPeter Pan static int spinand_remove(struct spi_mem *mem)
15407529df46SPeter Pan {
15417529df46SPeter Pan struct spinand_device *spinand;
15427529df46SPeter Pan struct mtd_info *mtd;
15437529df46SPeter Pan int ret;
15447529df46SPeter Pan
15457529df46SPeter Pan spinand = spi_mem_get_drvdata(mem);
15467529df46SPeter Pan mtd = spinand_to_mtd(spinand);
15477529df46SPeter Pan
15487529df46SPeter Pan ret = mtd_device_unregister(mtd);
15497529df46SPeter Pan if (ret)
15507529df46SPeter Pan return ret;
15517529df46SPeter Pan
15527529df46SPeter Pan spinand_cleanup(spinand);
15537529df46SPeter Pan
15547529df46SPeter Pan return 0;
15557529df46SPeter Pan }
15567529df46SPeter Pan
15577529df46SPeter Pan static const struct spi_device_id spinand_ids[] = {
15587529df46SPeter Pan { .name = "spi-nand" },
15597529df46SPeter Pan { /* sentinel */ },
15607529df46SPeter Pan };
156125fefc88SAlexander Lobakin MODULE_DEVICE_TABLE(spi, spinand_ids);
15627529df46SPeter Pan
15637529df46SPeter Pan #ifdef CONFIG_OF
15647529df46SPeter Pan static const struct of_device_id spinand_of_ids[] = {
15657529df46SPeter Pan { .compatible = "spi-nand" },
15667529df46SPeter Pan { /* sentinel */ },
15677529df46SPeter Pan };
156825fefc88SAlexander Lobakin MODULE_DEVICE_TABLE(of, spinand_of_ids);
15697529df46SPeter Pan #endif
15707529df46SPeter Pan
15717529df46SPeter Pan static struct spi_mem_driver spinand_drv = {
15727529df46SPeter Pan .spidrv = {
15737529df46SPeter Pan .id_table = spinand_ids,
15747529df46SPeter Pan .driver = {
15757529df46SPeter Pan .name = "spi-nand",
15767529df46SPeter Pan .of_match_table = of_match_ptr(spinand_of_ids),
15777529df46SPeter Pan },
15787529df46SPeter Pan },
15797529df46SPeter Pan .probe = spinand_probe,
15807529df46SPeter Pan .remove = spinand_remove,
15817529df46SPeter Pan };
15827529df46SPeter Pan module_spi_mem_driver(spinand_drv);
15837529df46SPeter Pan
15847529df46SPeter Pan MODULE_DESCRIPTION("SPI NAND framework");
15857529df46SPeter Pan MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
15867529df46SPeter Pan MODULE_LICENSE("GPL v2");
1587