1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2017 Free Electrons 4 * 5 * Authors: 6 * Boris Brezillon <boris.brezillon@free-electrons.com> 7 * Peter Pan <peterpandong@micron.com> 8 */ 9 10 #define pr_fmt(fmt) "nand: " fmt 11 12 #include <linux/module.h> 13 #include <linux/mtd/nand.h> 14 15 /** 16 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 17 * @buf: buffer to test 18 * @len: buffer length 19 * @bitflips_threshold: maximum number of bitflips 20 * 21 * Check if a buffer contains only 0xff, which means the underlying region 22 * has been erased and is ready to be programmed. 23 * The bitflips_threshold specify the maximum number of bitflips before 24 * considering the region is not erased. 25 * Note: The logic of this function has been extracted from the memweight 26 * implementation, except that nand_check_erased_buf function exit before 27 * testing the whole buffer if the number of bitflips exceed the 28 * bitflips_threshold value. 29 * 30 * Returns a positive number of bitflips less than or equal to 31 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 32 * threshold. 33 */ 34 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 35 { 36 const unsigned char *bitmap = buf; 37 int bitflips = 0; 38 int weight; 39 40 for (; len && ((uintptr_t)bitmap) % sizeof(long); 41 len--, bitmap++) { 42 weight = hweight8(*bitmap); 43 bitflips += BITS_PER_BYTE - weight; 44 if (unlikely(bitflips > bitflips_threshold)) 45 return -EBADMSG; 46 } 47 48 for (; len >= sizeof(long); 49 len -= sizeof(long), bitmap += sizeof(long)) { 50 unsigned long d = *((unsigned long *)bitmap); 51 if (d == ~0UL) 52 continue; 53 weight = hweight_long(d); 54 bitflips += BITS_PER_LONG - weight; 55 if (unlikely(bitflips > bitflips_threshold)) 56 return -EBADMSG; 57 } 58 59 for (; len > 0; len--, bitmap++) { 60 weight = hweight8(*bitmap); 61 bitflips += BITS_PER_BYTE - weight; 62 if (unlikely(bitflips > bitflips_threshold)) 63 return -EBADMSG; 64 } 65 66 return bitflips; 67 } 68 69 /** 70 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 71 * 0xff data 72 * @data: data buffer to test 73 * @datalen: data length 74 * @ecc: ECC buffer 75 * @ecclen: ECC length 76 * @extraoob: extra OOB buffer 77 * @extraooblen: extra OOB length 78 * @bitflips_threshold: maximum number of bitflips 79 * 80 * Check if a data buffer and its associated ECC and OOB data contains only 81 * 0xff pattern, which means the underlying region has been erased and is 82 * ready to be programmed. 83 * The bitflips_threshold specify the maximum number of bitflips before 84 * considering the region as not erased. 85 * 86 * Note: 87 * 1/ ECC algorithms are working on pre-defined block sizes which are usually 88 * different from the NAND page size. When fixing bitflips, ECC engines will 89 * report the number of errors per chunk, and the NAND core infrastructure 90 * expect you to return the maximum number of bitflips for the whole page. 91 * This is why you should always use this function on a single chunk and 92 * not on the whole page. After checking each chunk you should update your 93 * max_bitflips value accordingly. 94 * 2/ When checking for bitflips in erased pages you should not only check 95 * the payload data but also their associated ECC data, because a user might 96 * have programmed almost all bits to 1 but a few. In this case, we 97 * shouldn't consider the chunk as erased, and checking ECC bytes prevent 98 * this case. 99 * 3/ The extraoob argument is optional, and should be used if some of your OOB 100 * data are protected by the ECC engine. 101 * It could also be used if you support subpages and want to attach some 102 * extra OOB data to an ECC chunk. 103 * 104 * Returns a positive number of bitflips less than or equal to 105 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 106 * threshold. In case of success, the passed buffers are filled with 0xff. 107 */ 108 int nand_check_erased_ecc_chunk(void *data, int datalen, 109 void *ecc, int ecclen, 110 void *extraoob, int extraooblen, 111 int bitflips_threshold) 112 { 113 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 114 115 data_bitflips = nand_check_erased_buf(data, datalen, 116 bitflips_threshold); 117 if (data_bitflips < 0) 118 return data_bitflips; 119 120 bitflips_threshold -= data_bitflips; 121 122 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 123 if (ecc_bitflips < 0) 124 return ecc_bitflips; 125 126 bitflips_threshold -= ecc_bitflips; 127 128 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 129 bitflips_threshold); 130 if (extraoob_bitflips < 0) 131 return extraoob_bitflips; 132 133 if (data_bitflips) 134 memset(data, 0xff, datalen); 135 136 if (ecc_bitflips) 137 memset(ecc, 0xff, ecclen); 138 139 if (extraoob_bitflips) 140 memset(extraoob, 0xff, extraooblen); 141 142 return data_bitflips + ecc_bitflips + extraoob_bitflips; 143 } 144 EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 145 146 /** 147 * nanddev_isbad() - Check if a block is bad 148 * @nand: NAND device 149 * @pos: position pointing to the block we want to check 150 * 151 * Return: true if the block is bad, false otherwise. 152 */ 153 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos) 154 { 155 if (mtd_check_expert_analysis_mode()) 156 return false; 157 158 if (nanddev_bbt_is_initialized(nand)) { 159 unsigned int entry; 160 int status; 161 162 entry = nanddev_bbt_pos_to_entry(nand, pos); 163 status = nanddev_bbt_get_block_status(nand, entry); 164 /* Lazy block status retrieval */ 165 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) { 166 if (nand->ops->isbad(nand, pos)) 167 status = NAND_BBT_BLOCK_FACTORY_BAD; 168 else 169 status = NAND_BBT_BLOCK_GOOD; 170 171 nanddev_bbt_set_block_status(nand, entry, status); 172 } 173 174 if (status == NAND_BBT_BLOCK_WORN || 175 status == NAND_BBT_BLOCK_FACTORY_BAD) 176 return true; 177 178 return false; 179 } 180 181 return nand->ops->isbad(nand, pos); 182 } 183 EXPORT_SYMBOL_GPL(nanddev_isbad); 184 185 /** 186 * nanddev_markbad() - Mark a block as bad 187 * @nand: NAND device 188 * @pos: position of the block to mark bad 189 * 190 * Mark a block bad. This function is updating the BBT if available and 191 * calls the low-level markbad hook (nand->ops->markbad()). 192 * 193 * Return: 0 in case of success, a negative error code otherwise. 194 */ 195 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos) 196 { 197 struct mtd_info *mtd = nanddev_to_mtd(nand); 198 unsigned int entry; 199 int ret = 0; 200 201 if (nanddev_isbad(nand, pos)) 202 return 0; 203 204 ret = nand->ops->markbad(nand, pos); 205 if (ret) 206 pr_warn("failed to write BBM to block @%llx (err = %d)\n", 207 nanddev_pos_to_offs(nand, pos), ret); 208 209 if (!nanddev_bbt_is_initialized(nand)) 210 goto out; 211 212 entry = nanddev_bbt_pos_to_entry(nand, pos); 213 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN); 214 if (ret) 215 goto out; 216 217 ret = nanddev_bbt_update(nand); 218 219 out: 220 if (!ret) 221 mtd->ecc_stats.badblocks++; 222 223 return ret; 224 } 225 EXPORT_SYMBOL_GPL(nanddev_markbad); 226 227 /** 228 * nanddev_isreserved() - Check whether an eraseblock is reserved or not 229 * @nand: NAND device 230 * @pos: NAND position to test 231 * 232 * Checks whether the eraseblock pointed by @pos is reserved or not. 233 * 234 * Return: true if the eraseblock is reserved, false otherwise. 235 */ 236 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos) 237 { 238 unsigned int entry; 239 int status; 240 241 if (!nanddev_bbt_is_initialized(nand)) 242 return false; 243 244 /* Return info from the table */ 245 entry = nanddev_bbt_pos_to_entry(nand, pos); 246 status = nanddev_bbt_get_block_status(nand, entry); 247 return status == NAND_BBT_BLOCK_RESERVED; 248 } 249 EXPORT_SYMBOL_GPL(nanddev_isreserved); 250 251 /** 252 * nanddev_erase() - Erase a NAND portion 253 * @nand: NAND device 254 * @pos: position of the block to erase 255 * 256 * Erases the block if it's not bad. 257 * 258 * Return: 0 in case of success, a negative error code otherwise. 259 */ 260 static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos) 261 { 262 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) { 263 pr_warn("attempt to erase a bad/reserved block @%llx\n", 264 nanddev_pos_to_offs(nand, pos)); 265 return -EIO; 266 } 267 268 return nand->ops->erase(nand, pos); 269 } 270 271 /** 272 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices 273 * @mtd: MTD device 274 * @einfo: erase request 275 * 276 * This is a simple mtd->_erase() implementation iterating over all blocks 277 * concerned by @einfo and calling nand->ops->erase() on each of them. 278 * 279 * Note that mtd->_erase should not be directly assigned to this helper, 280 * because there's no locking here. NAND specialized layers should instead 281 * implement there own wrapper around nanddev_mtd_erase() taking the 282 * appropriate lock before calling nanddev_mtd_erase(). 283 * 284 * Return: 0 in case of success, a negative error code otherwise. 285 */ 286 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo) 287 { 288 struct nand_device *nand = mtd_to_nanddev(mtd); 289 struct nand_pos pos, last; 290 int ret; 291 292 nanddev_offs_to_pos(nand, einfo->addr, &pos); 293 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last); 294 while (nanddev_pos_cmp(&pos, &last) <= 0) { 295 ret = nanddev_erase(nand, &pos); 296 if (ret) { 297 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); 298 299 return ret; 300 } 301 302 nanddev_pos_next_eraseblock(nand, &pos); 303 } 304 305 return 0; 306 } 307 EXPORT_SYMBOL_GPL(nanddev_mtd_erase); 308 309 /** 310 * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on 311 * a specific region of the NAND device 312 * @mtd: MTD device 313 * @offs: offset of the NAND region 314 * @len: length of the NAND region 315 * 316 * Default implementation for mtd->_max_bad_blocks(). Only works if 317 * nand->memorg.max_bad_eraseblocks_per_lun is > 0. 318 * 319 * Return: a positive number encoding the maximum number of eraseblocks on a 320 * portion of memory, a negative error code otherwise. 321 */ 322 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len) 323 { 324 struct nand_device *nand = mtd_to_nanddev(mtd); 325 struct nand_pos pos, end; 326 unsigned int max_bb = 0; 327 328 if (!nand->memorg.max_bad_eraseblocks_per_lun) 329 return -ENOTSUPP; 330 331 nanddev_offs_to_pos(nand, offs, &pos); 332 nanddev_offs_to_pos(nand, offs + len, &end); 333 334 for (nanddev_offs_to_pos(nand, offs, &pos); 335 nanddev_pos_cmp(&pos, &end) < 0; 336 nanddev_pos_next_lun(nand, &pos)) 337 max_bb += nand->memorg.max_bad_eraseblocks_per_lun; 338 339 return max_bb; 340 } 341 EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks); 342 343 /** 344 * nanddev_get_ecc_engine() - Find and get a suitable ECC engine 345 * @nand: NAND device 346 */ 347 static int nanddev_get_ecc_engine(struct nand_device *nand) 348 { 349 int engine_type; 350 351 /* Read the user desires in terms of ECC engine/configuration */ 352 of_get_nand_ecc_user_config(nand); 353 354 engine_type = nand->ecc.user_conf.engine_type; 355 if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 356 engine_type = nand->ecc.defaults.engine_type; 357 358 switch (engine_type) { 359 case NAND_ECC_ENGINE_TYPE_NONE: 360 return 0; 361 case NAND_ECC_ENGINE_TYPE_SOFT: 362 nand->ecc.engine = nand_ecc_get_sw_engine(nand); 363 break; 364 case NAND_ECC_ENGINE_TYPE_ON_DIE: 365 nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand); 366 break; 367 case NAND_ECC_ENGINE_TYPE_ON_HOST: 368 nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand); 369 if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER) 370 return -EPROBE_DEFER; 371 break; 372 default: 373 pr_err("Missing ECC engine type\n"); 374 } 375 376 if (!nand->ecc.engine) 377 return -EINVAL; 378 379 return 0; 380 } 381 382 /** 383 * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine 384 * @nand: NAND device 385 */ 386 static int nanddev_put_ecc_engine(struct nand_device *nand) 387 { 388 switch (nand->ecc.ctx.conf.engine_type) { 389 case NAND_ECC_ENGINE_TYPE_ON_HOST: 390 nand_ecc_put_on_host_hw_engine(nand); 391 break; 392 case NAND_ECC_ENGINE_TYPE_NONE: 393 case NAND_ECC_ENGINE_TYPE_SOFT: 394 case NAND_ECC_ENGINE_TYPE_ON_DIE: 395 default: 396 break; 397 } 398 399 return 0; 400 } 401 402 /** 403 * nanddev_find_ecc_configuration() - Find a suitable ECC configuration 404 * @nand: NAND device 405 */ 406 static int nanddev_find_ecc_configuration(struct nand_device *nand) 407 { 408 int ret; 409 410 if (!nand->ecc.engine) 411 return -ENOTSUPP; 412 413 ret = nand_ecc_init_ctx(nand); 414 if (ret) 415 return ret; 416 417 if (!nand_ecc_is_strong_enough(nand)) 418 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", 419 nand->mtd.name); 420 421 return 0; 422 } 423 424 /** 425 * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip 426 * @nand: NAND device 427 */ 428 int nanddev_ecc_engine_init(struct nand_device *nand) 429 { 430 int ret; 431 432 /* Look for the ECC engine to use */ 433 ret = nanddev_get_ecc_engine(nand); 434 if (ret) { 435 if (ret != -EPROBE_DEFER) 436 pr_err("No ECC engine found\n"); 437 438 return ret; 439 } 440 441 /* No ECC engine requested */ 442 if (!nand->ecc.engine) 443 return 0; 444 445 /* Configure the engine: balance user input and chip requirements */ 446 ret = nanddev_find_ecc_configuration(nand); 447 if (ret) { 448 pr_err("No suitable ECC configuration\n"); 449 nanddev_put_ecc_engine(nand); 450 451 return ret; 452 } 453 454 return 0; 455 } 456 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init); 457 458 /** 459 * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations 460 * @nand: NAND device 461 */ 462 void nanddev_ecc_engine_cleanup(struct nand_device *nand) 463 { 464 if (nand->ecc.engine) 465 nand_ecc_cleanup_ctx(nand); 466 467 nanddev_put_ecc_engine(nand); 468 } 469 EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup); 470 471 /** 472 * nanddev_init() - Initialize a NAND device 473 * @nand: NAND device 474 * @ops: NAND device operations 475 * @owner: NAND device owner 476 * 477 * Initializes a NAND device object. Consistency checks are done on @ops and 478 * @nand->memorg. Also takes care of initializing the BBT. 479 * 480 * Return: 0 in case of success, a negative error code otherwise. 481 */ 482 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, 483 struct module *owner) 484 { 485 struct mtd_info *mtd = nanddev_to_mtd(nand); 486 struct nand_memory_organization *memorg = nanddev_get_memorg(nand); 487 488 if (!nand || !ops) 489 return -EINVAL; 490 491 if (!ops->erase || !ops->markbad || !ops->isbad) 492 return -EINVAL; 493 494 if (!memorg->bits_per_cell || !memorg->pagesize || 495 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun || 496 !memorg->planes_per_lun || !memorg->luns_per_target || 497 !memorg->ntargets) 498 return -EINVAL; 499 500 nand->rowconv.eraseblock_addr_shift = 501 fls(memorg->pages_per_eraseblock - 1); 502 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) + 503 nand->rowconv.eraseblock_addr_shift; 504 505 nand->ops = ops; 506 507 mtd->type = memorg->bits_per_cell == 1 ? 508 MTD_NANDFLASH : MTD_MLCNANDFLASH; 509 mtd->flags = MTD_CAP_NANDFLASH; 510 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock; 511 mtd->writesize = memorg->pagesize; 512 mtd->writebufsize = memorg->pagesize; 513 mtd->oobsize = memorg->oobsize; 514 mtd->size = nanddev_size(nand); 515 mtd->owner = owner; 516 517 return nanddev_bbt_init(nand); 518 } 519 EXPORT_SYMBOL_GPL(nanddev_init); 520 521 /** 522 * nanddev_cleanup() - Release resources allocated in nanddev_init() 523 * @nand: NAND device 524 * 525 * Basically undoes what has been done in nanddev_init(). 526 */ 527 void nanddev_cleanup(struct nand_device *nand) 528 { 529 if (nanddev_bbt_is_initialized(nand)) 530 nanddev_bbt_cleanup(nand); 531 } 532 EXPORT_SYMBOL_GPL(nanddev_cleanup); 533 534 MODULE_DESCRIPTION("Generic NAND framework"); 535 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); 536 MODULE_LICENSE("GPL v2"); 537