1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2017 Free Electrons 4 * 5 * Authors: 6 * Boris Brezillon <boris.brezillon@free-electrons.com> 7 * Peter Pan <peterpandong@micron.com> 8 */ 9 10 #define pr_fmt(fmt) "nand: " fmt 11 12 #include <linux/module.h> 13 #include <linux/mtd/nand.h> 14 15 /** 16 * nanddev_isbad() - Check if a block is bad 17 * @nand: NAND device 18 * @pos: position pointing to the block we want to check 19 * 20 * Return: true if the block is bad, false otherwise. 21 */ 22 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos) 23 { 24 if (nanddev_bbt_is_initialized(nand)) { 25 unsigned int entry; 26 int status; 27 28 entry = nanddev_bbt_pos_to_entry(nand, pos); 29 status = nanddev_bbt_get_block_status(nand, entry); 30 /* Lazy block status retrieval */ 31 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) { 32 if (nand->ops->isbad(nand, pos)) 33 status = NAND_BBT_BLOCK_FACTORY_BAD; 34 else 35 status = NAND_BBT_BLOCK_GOOD; 36 37 nanddev_bbt_set_block_status(nand, entry, status); 38 } 39 40 if (status == NAND_BBT_BLOCK_WORN || 41 status == NAND_BBT_BLOCK_FACTORY_BAD) 42 return true; 43 44 return false; 45 } 46 47 return nand->ops->isbad(nand, pos); 48 } 49 EXPORT_SYMBOL_GPL(nanddev_isbad); 50 51 /** 52 * nanddev_markbad() - Mark a block as bad 53 * @nand: NAND device 54 * @pos: position of the block to mark bad 55 * 56 * Mark a block bad. This function is updating the BBT if available and 57 * calls the low-level markbad hook (nand->ops->markbad()). 58 * 59 * Return: 0 in case of success, a negative error code otherwise. 60 */ 61 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos) 62 { 63 struct mtd_info *mtd = nanddev_to_mtd(nand); 64 unsigned int entry; 65 int ret = 0; 66 67 if (nanddev_isbad(nand, pos)) 68 return 0; 69 70 ret = nand->ops->markbad(nand, pos); 71 if (ret) 72 pr_warn("failed to write BBM to block @%llx (err = %d)\n", 73 nanddev_pos_to_offs(nand, pos), ret); 74 75 if (!nanddev_bbt_is_initialized(nand)) 76 goto out; 77 78 entry = nanddev_bbt_pos_to_entry(nand, pos); 79 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN); 80 if (ret) 81 goto out; 82 83 ret = nanddev_bbt_update(nand); 84 85 out: 86 if (!ret) 87 mtd->ecc_stats.badblocks++; 88 89 return ret; 90 } 91 EXPORT_SYMBOL_GPL(nanddev_markbad); 92 93 /** 94 * nanddev_isreserved() - Check whether an eraseblock is reserved or not 95 * @nand: NAND device 96 * @pos: NAND position to test 97 * 98 * Checks whether the eraseblock pointed by @pos is reserved or not. 99 * 100 * Return: true if the eraseblock is reserved, false otherwise. 101 */ 102 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos) 103 { 104 unsigned int entry; 105 int status; 106 107 if (!nanddev_bbt_is_initialized(nand)) 108 return false; 109 110 /* Return info from the table */ 111 entry = nanddev_bbt_pos_to_entry(nand, pos); 112 status = nanddev_bbt_get_block_status(nand, entry); 113 return status == NAND_BBT_BLOCK_RESERVED; 114 } 115 EXPORT_SYMBOL_GPL(nanddev_isreserved); 116 117 /** 118 * nanddev_erase() - Erase a NAND portion 119 * @nand: NAND device 120 * @pos: position of the block to erase 121 * 122 * Erases the block if it's not bad. 123 * 124 * Return: 0 in case of success, a negative error code otherwise. 125 */ 126 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos) 127 { 128 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) { 129 pr_warn("attempt to erase a bad/reserved block @%llx\n", 130 nanddev_pos_to_offs(nand, pos)); 131 return -EIO; 132 } 133 134 return nand->ops->erase(nand, pos); 135 } 136 EXPORT_SYMBOL_GPL(nanddev_erase); 137 138 /** 139 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices 140 * @mtd: MTD device 141 * @einfo: erase request 142 * 143 * This is a simple mtd->_erase() implementation iterating over all blocks 144 * concerned by @einfo and calling nand->ops->erase() on each of them. 145 * 146 * Note that mtd->_erase should not be directly assigned to this helper, 147 * because there's no locking here. NAND specialized layers should instead 148 * implement there own wrapper around nanddev_mtd_erase() taking the 149 * appropriate lock before calling nanddev_mtd_erase(). 150 * 151 * Return: 0 in case of success, a negative error code otherwise. 152 */ 153 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo) 154 { 155 struct nand_device *nand = mtd_to_nanddev(mtd); 156 struct nand_pos pos, last; 157 int ret; 158 159 nanddev_offs_to_pos(nand, einfo->addr, &pos); 160 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last); 161 while (nanddev_pos_cmp(&pos, &last) <= 0) { 162 ret = nanddev_erase(nand, &pos); 163 if (ret) { 164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); 165 166 return ret; 167 } 168 169 nanddev_pos_next_eraseblock(nand, &pos); 170 } 171 172 return 0; 173 } 174 EXPORT_SYMBOL_GPL(nanddev_mtd_erase); 175 176 /** 177 * nanddev_init() - Initialize a NAND device 178 * @nand: NAND device 179 * @ops: NAND device operations 180 * @owner: NAND device owner 181 * 182 * Initializes a NAND device object. Consistency checks are done on @ops and 183 * @nand->memorg. Also takes care of initializing the BBT. 184 * 185 * Return: 0 in case of success, a negative error code otherwise. 186 */ 187 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, 188 struct module *owner) 189 { 190 struct mtd_info *mtd = nanddev_to_mtd(nand); 191 struct nand_memory_organization *memorg = nanddev_get_memorg(nand); 192 193 if (!nand || !ops) 194 return -EINVAL; 195 196 if (!ops->erase || !ops->markbad || !ops->isbad) 197 return -EINVAL; 198 199 if (!memorg->bits_per_cell || !memorg->pagesize || 200 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun || 201 !memorg->planes_per_lun || !memorg->luns_per_target || 202 !memorg->ntargets) 203 return -EINVAL; 204 205 nand->rowconv.eraseblock_addr_shift = 206 fls(memorg->pages_per_eraseblock - 1); 207 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) + 208 nand->rowconv.eraseblock_addr_shift; 209 210 nand->ops = ops; 211 212 mtd->type = memorg->bits_per_cell == 1 ? 213 MTD_NANDFLASH : MTD_MLCNANDFLASH; 214 mtd->flags = MTD_CAP_NANDFLASH; 215 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock; 216 mtd->writesize = memorg->pagesize; 217 mtd->writebufsize = memorg->pagesize; 218 mtd->oobsize = memorg->oobsize; 219 mtd->size = nanddev_size(nand); 220 mtd->owner = owner; 221 222 return nanddev_bbt_init(nand); 223 } 224 EXPORT_SYMBOL_GPL(nanddev_init); 225 226 /** 227 * nanddev_cleanup() - Release resources allocated in nanddev_init() 228 * @nand: NAND device 229 * 230 * Basically undoes what has been done in nanddev_init(). 231 */ 232 void nanddev_cleanup(struct nand_device *nand) 233 { 234 if (nanddev_bbt_is_initialized(nand)) 235 nanddev_bbt_cleanup(nand); 236 } 237 EXPORT_SYMBOL_GPL(nanddev_cleanup); 238 239 MODULE_DESCRIPTION("Generic NAND framework"); 240 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); 241 MODULE_LICENSE("GPL v2"); 242