Lines Matching full:nand

10 #define pr_fmt(fmt)	"nand: " fmt
13 #include <linux/mtd/nand.h>
17 * @nand: NAND device
22 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos) in nanddev_isbad() argument
27 if (nanddev_bbt_is_initialized(nand)) { in nanddev_isbad()
31 entry = nanddev_bbt_pos_to_entry(nand, pos); in nanddev_isbad()
32 status = nanddev_bbt_get_block_status(nand, entry); in nanddev_isbad()
35 if (nand->ops->isbad(nand, pos)) in nanddev_isbad()
40 nanddev_bbt_set_block_status(nand, entry, status); in nanddev_isbad()
50 return nand->ops->isbad(nand, pos); in nanddev_isbad()
56 * @nand: NAND device
60 * calls the low-level markbad hook (nand->ops->markbad()).
64 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos) in nanddev_markbad() argument
66 struct mtd_info *mtd = nanddev_to_mtd(nand); in nanddev_markbad()
70 if (nanddev_isbad(nand, pos)) in nanddev_markbad()
73 ret = nand->ops->markbad(nand, pos); in nanddev_markbad()
76 nanddev_pos_to_offs(nand, pos), ret); in nanddev_markbad()
78 if (!nanddev_bbt_is_initialized(nand)) in nanddev_markbad()
81 entry = nanddev_bbt_pos_to_entry(nand, pos); in nanddev_markbad()
82 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN); in nanddev_markbad()
86 ret = nanddev_bbt_update(nand); in nanddev_markbad()
98 * @nand: NAND device
99 * @pos: NAND position to test
105 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos) in nanddev_isreserved() argument
110 if (!nanddev_bbt_is_initialized(nand)) in nanddev_isreserved()
114 entry = nanddev_bbt_pos_to_entry(nand, pos); in nanddev_isreserved()
115 status = nanddev_bbt_get_block_status(nand, entry); in nanddev_isreserved()
121 * nanddev_erase() - Erase a NAND portion
122 * @nand: NAND device
129 static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos) in nanddev_erase() argument
131 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) { in nanddev_erase()
133 nanddev_pos_to_offs(nand, pos)); in nanddev_erase()
137 return nand->ops->erase(nand, pos); in nanddev_erase()
141 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
146 * concerned by @einfo and calling nand->ops->erase() on each of them.
149 * because there's no locking here. NAND specialized layers should instead
157 struct nand_device *nand = mtd_to_nanddev(mtd); in nanddev_mtd_erase() local
161 nanddev_offs_to_pos(nand, einfo->addr, &pos); in nanddev_mtd_erase()
162 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last); in nanddev_mtd_erase()
164 ret = nanddev_erase(nand, &pos); in nanddev_mtd_erase()
166 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); in nanddev_mtd_erase()
171 nanddev_pos_next_eraseblock(nand, &pos); in nanddev_mtd_erase()
180 * a specific region of the NAND device
182 * @offs: offset of the NAND region
183 * @len: length of the NAND region
186 * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
193 struct nand_device *nand = mtd_to_nanddev(mtd); in nanddev_mtd_max_bad_blocks() local
197 if (!nand->memorg.max_bad_eraseblocks_per_lun) in nanddev_mtd_max_bad_blocks()
200 nanddev_offs_to_pos(nand, offs, &pos); in nanddev_mtd_max_bad_blocks()
201 nanddev_offs_to_pos(nand, offs + len, &end); in nanddev_mtd_max_bad_blocks()
203 for (nanddev_offs_to_pos(nand, offs, &pos); in nanddev_mtd_max_bad_blocks()
205 nanddev_pos_next_lun(nand, &pos)) in nanddev_mtd_max_bad_blocks()
206 max_bb += nand->memorg.max_bad_eraseblocks_per_lun; in nanddev_mtd_max_bad_blocks()
214 * @nand: NAND device
216 static int nanddev_get_ecc_engine(struct nand_device *nand) in nanddev_get_ecc_engine() argument
221 of_get_nand_ecc_user_config(nand); in nanddev_get_ecc_engine()
223 engine_type = nand->ecc.user_conf.engine_type; in nanddev_get_ecc_engine()
225 engine_type = nand->ecc.defaults.engine_type; in nanddev_get_ecc_engine()
231 nand->ecc.engine = nand_ecc_get_sw_engine(nand); in nanddev_get_ecc_engine()
234 nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand); in nanddev_get_ecc_engine()
237 nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand); in nanddev_get_ecc_engine()
238 if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER) in nanddev_get_ecc_engine()
245 if (!nand->ecc.engine) in nanddev_get_ecc_engine()
253 * @nand: NAND device
255 static int nanddev_put_ecc_engine(struct nand_device *nand) in nanddev_put_ecc_engine() argument
257 switch (nand->ecc.ctx.conf.engine_type) { in nanddev_put_ecc_engine()
259 nand_ecc_put_on_host_hw_engine(nand); in nanddev_put_ecc_engine()
273 * @nand: NAND device
275 static int nanddev_find_ecc_configuration(struct nand_device *nand) in nanddev_find_ecc_configuration() argument
279 if (!nand->ecc.engine) in nanddev_find_ecc_configuration()
282 ret = nand_ecc_init_ctx(nand); in nanddev_find_ecc_configuration()
286 if (!nand_ecc_is_strong_enough(nand)) in nanddev_find_ecc_configuration()
287 …NING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", in nanddev_find_ecc_configuration()
288 nand->mtd.name); in nanddev_find_ecc_configuration()
295 * @nand: NAND device
297 int nanddev_ecc_engine_init(struct nand_device *nand) in nanddev_ecc_engine_init() argument
302 ret = nanddev_get_ecc_engine(nand); in nanddev_ecc_engine_init()
311 if (!nand->ecc.engine) in nanddev_ecc_engine_init()
315 ret = nanddev_find_ecc_configuration(nand); in nanddev_ecc_engine_init()
318 nanddev_put_ecc_engine(nand); in nanddev_ecc_engine_init()
329 * @nand: NAND device
331 void nanddev_ecc_engine_cleanup(struct nand_device *nand) in nanddev_ecc_engine_cleanup() argument
333 if (nand->ecc.engine) in nanddev_ecc_engine_cleanup()
334 nand_ecc_cleanup_ctx(nand); in nanddev_ecc_engine_cleanup()
336 nanddev_put_ecc_engine(nand); in nanddev_ecc_engine_cleanup()
341 * nanddev_init() - Initialize a NAND device
342 * @nand: NAND device
343 * @ops: NAND device operations
344 * @owner: NAND device owner
346 * Initializes a NAND device object. Consistency checks are done on @ops and
347 * @nand->memorg. Also takes care of initializing the BBT.
351 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, in nanddev_init() argument
354 struct mtd_info *mtd = nanddev_to_mtd(nand); in nanddev_init()
355 struct nand_memory_organization *memorg = nanddev_get_memorg(nand); in nanddev_init()
357 if (!nand || !ops) in nanddev_init()
369 nand->rowconv.eraseblock_addr_shift = in nanddev_init()
371 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) + in nanddev_init()
372 nand->rowconv.eraseblock_addr_shift; in nanddev_init()
374 nand->ops = ops; in nanddev_init()
383 mtd->size = nanddev_size(nand); in nanddev_init()
386 return nanddev_bbt_init(nand); in nanddev_init()
392 * @nand: NAND device
396 void nanddev_cleanup(struct nand_device *nand) in nanddev_cleanup() argument
398 if (nanddev_bbt_is_initialized(nand)) in nanddev_cleanup()
399 nanddev_bbt_cleanup(nand); in nanddev_cleanup()
403 MODULE_DESCRIPTION("Generic NAND framework");