Lines Matching +full:0 +full:- +full:indexed

1 // SPDX-License-Identifier: GPL-2.0
5 * - Heavily based on MD badblocks code from Neil Brown
45 * +--------+
47 * +--------+
48 * +-------------+ +-------------+
50 * +-------------+ +-------------+
54 * +-------------+ +--------+ +-------------+
56 * +-------------+ +--------+ +-------------+
60 * +--------+
62 * +--------+
63 * +-------------+
65 * +-------------+
68 * +-------------+
70 * +-------------+
73 * +-------------+
75 * +-------------+
79 * +--------+----+
81 * +--------+----+
85 * +-------------+
87 * +-------------+
90 * +-------------+
92 * +-------------+
95 * +-------------+
97 * +-------------+
99 * +-------------------+
101 * +-------------------+
102 * +-------------+
104 * +-------------+
108 * +-------------+-----+ +-------------+ +-----+
110 * +-------------+-----+ ===> +-------------+ +-----+
111 * +-------------+ +-------------+
113 * +-------------+ +-------------+
119 * +-------------+
121 * +-------------+
122 * +-------------+
124 * +-------------+
128 * +----+---------+ +----+ +---------+
130 * +----+---------+ ===> +----+ +---------+
131 * +-------------+ +-------------+
133 * +-------------+ +-------------+
142 * +---------+
144 * +---------+
145 * +-------------+
147 * +-------------+
150 * +-------------+
152 * +-------------+
155 * +-------------+
157 * +-------------+
160 * +---+---------+
162 * +---+---------+
165 * +----+
167 * +----+
168 * +--------------+
170 * +--------------+
173 * +--------------+
175 * +--------------+
178 * +--------------+
180 * +--------------+
184 * +----+----+----+
186 * +----+----+----+
190 * +-------------------+
192 * +-------------------+
193 * +-------------+
195 * +-------------+
199 * +---------+---------+ +---------+ +---------+
201 * +---------+---------+ ===> +---------+ +---------+
202 * +-------------+ +-------------+
204 * +-------------+ +-------------+
212 * +------+
214 * +------+
215 * +-------+
217 * +-------+
221 * +--------------+
223 * +--------------+
227 * +--------+------+
229 * +--------+------+
232 * +-------------------------------------------------------+
234 * +-------------------------------------------------------+
235 * |<----- BB_MAX_LEN ----->|
236 * +-----+ +-----+ +-----+
238 * +-----+ +-----+ +-----+
244 * is available slot from bad blocks table, re-try again to handle more
246 * +------------------------+
248 * +------------------------+
249 * |<----- BB_MAX_LEN ----->|
250 * +-----+-----+-----+---+-----+--+
252 * +-----+-----+-----+---+-----+--+
254 * to no-space in bad blocks table, but the following E1, E2 and E3 ranges
257 * +------------------------+-----+-----+-----+---+-----+--+
259 * +------------------------+-----+-----+-----+---+-----+--+
260 * Since the bad blocks table is not full anymore, re-try again for the
268 * +--------+
270 * +--------+ S: 1
271 * +-------+-------------+ E1: 1
272 * | E1 | E2 | E2: 0
273 * +-------+-------------+
278 * +-------+--------+----+ S: 1
280 * +-------+--------+----+ E3: 0
287 * +----------------+----+ acknowledged
289 * +----------------+----+ E3: 0
297 * +------+
299 * +------+
300 * +-------+
302 * +-------+
309 * +--------------+
311 * +--------------+
315 * +------+-------+
317 * +------+-------+
345 * +-----+ | +-----+ | +-----+
347 * +-----+ or +-----+ or +-----+
348 * +---+ | +----+ +----+ | +---+
350 * +---+ | +----+ +----+ | +---+
352 * happens, simply returns 0.
355 * +---+
357 * +---+
358 * +-----------------+
360 * +-----------------+
363 * +------+ +------+
365 * +------+ +------+
369 * +------------+
371 * +------------+
372 * +-----------------+
374 * +-----------------+
376 * start LBA to end of C and shrink the range to BB_LEN(E) - BB_LEN(C). No
378 * +----+
380 * +----+
382 * +-----------------+
384 * +-----------------+
385 * +-----------------+
387 * +-----------------+
392 * +-------+
394 * +-------+
395 * +-----------------+
397 * +-----------------+
399 * end to the start of C, and reduce its length to BB_LEN(E) - BB_LEN(C).
401 * +---------+
403 * +---------+
408 * +----------+
410 * +----------+
411 * +------------+
413 * +------------+
417 * +----+-----+ +----+ +-----+
419 * +----+-----+ ===> +----+ +-----+
420 * +------------+ +------------+
422 * +------------+ +------------+
427 * +----------+
429 * +----------+
430 * +------------+
432 * +------------+
436 * +----+-----+ +----+ +-----+
438 * +----+-----+ ===> +----+ +-----+
439 * +------------+ +------------+
441 * +------------+ +------------+
448 * while-loop. The idea is similar to bad blocks range setting but much
453 * Find the range starts at-or-before 's' from bad table. The search
460 u64 *p = bb->page; in prev_by_hint()
461 int ret = -1; in prev_by_hint()
463 while ((hint < hint_end) && ((hint + 1) <= bb->count) && in prev_by_hint()
465 if ((hint + 1) == bb->count || BB_OFFSET(p[hint + 1]) > s) { in prev_by_hint()
476 * Find the range starts at-or-before bad->start. If 'hint' is provided
477 * (hint >= 0) then search in the bad table from hint firstly. It is
479 * then the unnecessary while-loop iteration can be avoided.
484 sector_t s = bad->start; in prev_badblocks()
485 int ret = -1; in prev_badblocks()
489 if (!bb->count) in prev_badblocks()
492 if (hint >= 0) { in prev_badblocks()
494 if (ret >= 0) in prev_badblocks()
498 lo = 0; in prev_badblocks()
499 hi = bb->count; in prev_badblocks()
500 p = bb->page; in prev_badblocks()
504 return -1; in prev_badblocks()
505 if (BB_OFFSET(p[hi - 1]) <= s) in prev_badblocks()
506 return hi - 1; in prev_badblocks()
509 while (hi - lo > 1) { in prev_badblocks()
537 sector_t sectors = bad->len; in can_merge_behind()
538 sector_t s = bad->start; in can_merge_behind()
539 u64 *p = bb->page; in can_merge_behind()
543 ((BB_END(p[behind]) - s) <= BB_MAX_LEN) && in can_merge_behind()
544 BB_ACK(p[behind]) == bad->ack) in can_merge_behind()
551 * (from the bad table) indexed by 'behind'. The return value is merged
552 * sectors from bad->len.
557 sector_t sectors = bad->len; in behind_merge()
558 sector_t s = bad->start; in behind_merge()
559 u64 *p = bb->page; in behind_merge()
560 int merged = 0; in behind_merge()
566 merged = BB_OFFSET(p[behind]) - s; in behind_merge()
567 p[behind] = BB_MAKE(s, BB_LEN(p[behind]) + merged, bad->ack); in behind_merge()
577 * merged with the bad range (from the bad table) indexed by 'prev'.
582 sector_t s = bad->start; in can_merge_front()
583 u64 *p = bb->page; in can_merge_front()
585 if (BB_ACK(p[prev]) == bad->ack && in can_merge_front()
594 * (from bad table) indexed by 'prev'. The return value is sectors
595 * merged from bad->len.
599 sector_t sectors = bad->len; in front_merge()
600 sector_t s = bad->start; in front_merge()
601 u64 *p = bb->page; in front_merge()
602 int merged = 0; in front_merge()
607 merged = min_t(sector_t, sectors, BB_END(p[prev]) - s); in front_merge()
609 merged = min_t(sector_t, sectors, BB_MAX_LEN - BB_LEN(p[prev])); in front_merge()
610 if ((prev + 1) < bb->count && in front_merge()
611 merged > (BB_OFFSET(p[prev + 1]) - BB_END(p[prev]))) { in front_merge()
612 merged = BB_OFFSET(p[prev + 1]) - BB_END(p[prev]); in front_merge()
616 BB_LEN(p[prev]) + merged, bad->ack); in front_merge()
624 * handle: If a bad range (indexed by 'prev' from bad table) exactly
625 * starts as bad->start, and the bad range ahead of 'prev' (indexed by
626 * 'prev - 1' from bad table) exactly ends at where 'prev' starts, and
630 * Return 'true' if bad ranges indexed by 'prev' and 'prev - 1' from bad
636 u64 *p = bb->page; in can_combine_front()
638 if ((prev > 0) && in can_combine_front()
639 (BB_OFFSET(p[prev]) == bad->start) && in can_combine_front()
640 (BB_END(p[prev - 1]) == BB_OFFSET(p[prev])) && in can_combine_front()
641 (BB_LEN(p[prev - 1]) + BB_LEN(p[prev]) <= BB_MAX_LEN) && in can_combine_front()
642 (BB_ACK(p[prev - 1]) == BB_ACK(p[prev]))) in can_combine_front()
648 * Combine the bad ranges indexed by 'prev' and 'prev - 1' (from bad
649 * table) into one larger bad range, and the new range is indexed by
650 * 'prev - 1'.
651 * The caller of front_combine() will decrease bb->count, therefore
656 u64 *p = bb->page; in front_combine()
658 p[prev - 1] = BB_MAKE(BB_OFFSET(p[prev - 1]), in front_combine()
659 BB_LEN(p[prev - 1]) + BB_LEN(p[prev]), in front_combine()
661 if ((prev + 1) < bb->count) in front_combine()
662 memmove(p + prev, p + prev + 1, (bb->count - prev - 1) * 8); in front_combine()
667 * overlapped with the bad range (from bad table) indexed by 'front'.
668 * Exactly forward overlap means the bad range (from bad table) indexed
674 u64 *p = bb->page; in overlap_front()
676 if (bad->start >= BB_OFFSET(p[front]) && in overlap_front()
677 bad->start < BB_END(p[front])) in overlap_front()
684 * overlapped with the bad range (from bad table) indexed by 'behind'.
689 u64 *p = bb->page; in overlap_behind()
691 if (bad->start < BB_OFFSET(p[behind]) && in overlap_behind()
692 (bad->start + bad->len) > BB_OFFSET(p[behind])) in overlap_behind()
699 * range (from bad table) indexed by 'prev'.
701 * The range indicated by 'bad' can overwrite the bad range indexed by
704 * range (from bad table) indexed by 'prev'.
709 * indexed by 'prev', new range might be split from existing bad range,
721 u64 *p = bb->page; in can_front_overwrite()
726 if (BB_ACK(p[prev]) >= bad->ack) in can_front_overwrite()
729 if (BB_END(p[prev]) <= (bad->start + bad->len)) { in can_front_overwrite()
730 len = BB_END(p[prev]) - bad->start; in can_front_overwrite()
731 if (BB_OFFSET(p[prev]) == bad->start) in can_front_overwrite()
732 *extra = 0; in can_front_overwrite()
736 bad->len = len; in can_front_overwrite()
738 if (BB_OFFSET(p[prev]) == bad->start) in can_front_overwrite()
748 if ((bb->count + (*extra)) >= MAX_BADBLOCKS) in can_front_overwrite()
756 * (from bad table) indexed by 'prev'.
764 u64 *p = bb->page; in front_overwrite()
769 case 0: in front_overwrite()
771 bad->ack); in front_overwrite()
774 if (BB_OFFSET(p[prev]) == bad->start) { in front_overwrite()
776 bad->len, bad->ack); in front_overwrite()
778 (bb->count - prev - 1) * 8); in front_overwrite()
779 p[prev + 1] = BB_MAKE(bad->start + bad->len, in front_overwrite()
780 orig_end - BB_END(p[prev]), in front_overwrite()
784 bad->start - BB_OFFSET(p[prev]), in front_overwrite()
787 * prev +2 -> prev + 1 + 1, which is for, in front_overwrite()
792 (bb->count - prev - 1) * 8); in front_overwrite()
793 p[prev + 1] = BB_MAKE(bad->start, bad->len, bad->ack); in front_overwrite()
798 bad->start - BB_OFFSET(p[prev]), in front_overwrite()
801 * prev + 3 -> prev + 1 + 2, which is for, in front_overwrite()
806 (bb->count - prev - 1) * 8); in front_overwrite()
807 p[prev + 1] = BB_MAKE(bad->start, bad->len, bad->ack); in front_overwrite()
809 orig_end - BB_END(p[prev + 1]), in front_overwrite()
816 return bad->len; in front_overwrite()
821 * the location is indexed by 'at'.
825 u64 *p = bb->page; in insert_at()
830 len = min_t(sector_t, bad->len, BB_MAX_LEN); in insert_at()
831 if (at < bb->count) in insert_at()
832 memmove(p + at + 1, p + at, (bb->count - at) * 8); in insert_at()
833 p[at] = BB_MAKE(bad->start, len, bad->ack); in insert_at()
841 u64 *p = bb->page; in badblocks_update_acked()
844 if (!bb->unacked_exist) in badblocks_update_acked()
847 for (i = 0; i < bb->count ; i++) { in badblocks_update_acked()
855 bb->unacked_exist = 0; in badblocks_update_acked()
862 int retried = 0, space_desired = 0; in _badblocks_set()
863 int orig_len, len = 0, added = 0; in _badblocks_set()
865 int prev = -1, hint = -1; in _badblocks_set()
868 int rv = 0; in _badblocks_set()
871 if (bb->shift < 0) in _badblocks_set()
875 if (sectors == 0) in _badblocks_set()
879 if (bb->shift) { in _badblocks_set()
883 rounddown(s, bb->shift); in _badblocks_set()
884 roundup(next, bb->shift); in _badblocks_set()
885 sectors = next - s; in _badblocks_set()
888 write_seqlock_irqsave(&bb->lock, flags); in _badblocks_set()
893 p = bb->page; in _badblocks_set()
898 len = 0; in _badblocks_set()
901 len = insert_at(bb, 0, &bad); in _badblocks_set()
902 bb->count++; in _badblocks_set()
910 if (prev < 0) { in _badblocks_set()
913 if (bad.len > (BB_OFFSET(p[0]) - bad.start)) in _badblocks_set()
914 bad.len = BB_OFFSET(p[0]) - bad.start; in _badblocks_set()
915 len = insert_at(bb, 0, &bad); in _badblocks_set()
916 bb->count++; in _badblocks_set()
918 hint = 0; in _badblocks_set()
923 if (overlap_behind(bb, &bad, 0)) { in _badblocks_set()
924 if (can_merge_behind(bb, &bad, 0)) { in _badblocks_set()
925 len = behind_merge(bb, &bad, 0); in _badblocks_set()
928 len = BB_OFFSET(p[0]) - s; in _badblocks_set()
931 hint = 0; in _badblocks_set()
939 /* in case p[prev-1] can be merged with p[prev] */ in _badblocks_set()
942 bb->count--; in _badblocks_set()
953 int extra = 0; in _badblocks_set()
957 BB_END(p[prev]) - s, sectors); in _badblocks_set()
964 bb->count += extra; in _badblocks_set()
968 bb->count--; in _badblocks_set()
984 /* skip the cannot-merge range */ in _badblocks_set()
985 if (((prev + 1) < bb->count) && in _badblocks_set()
988 len = BB_END(p[prev + 1]) - s; in _badblocks_set()
996 hint = -1; in _badblocks_set()
1001 if ((prev + 1) < bb->count && in _badblocks_set()
1004 bad.len, BB_OFFSET(p[prev + 1]) - bad.start); in _badblocks_set()
1007 bb->count++; in _badblocks_set()
1013 sectors -= len; in _badblocks_set()
1015 if (sectors > 0) in _badblocks_set()
1018 WARN_ON(sectors < 0); in _badblocks_set()
1022 * merged. (prev < 0) condition is not handled here, in _badblocks_set()
1025 if (prev >= 0 && in _badblocks_set()
1026 (prev + 1) < bb->count && in _badblocks_set()
1034 if ((prev + 2) < bb->count) in _badblocks_set()
1036 (bb->count - (prev + 2)) * 8); in _badblocks_set()
1037 bb->count--; in _badblocks_set()
1043 space_desired = 0; in _badblocks_set()
1053 bb->unacked_exist = 1; in _badblocks_set()
1058 write_sequnlock_irqrestore(&bb->lock, flags); in _badblocks_set()
1071 * the caller to reduce bb->count.
1076 sector_t sectors = bad->len; in front_clear()
1077 sector_t s = bad->start; in front_clear()
1078 u64 *p = bb->page; in front_clear()
1079 int cleared = 0; in front_clear()
1081 *deleted = 0; in front_clear()
1085 BB_LEN(p[prev]) - sectors, in front_clear()
1091 if ((prev + 1) < bb->count) in front_clear()
1093 (bb->count - prev - 1) * 8); in front_clear()
1098 cleared = BB_END(p[prev]) - s; in front_clear()
1100 s - BB_OFFSET(p[prev]), in front_clear()
1119 u64 *p = bb->page; in front_splitting_clear()
1122 sector_t sectors = bad->len; in front_splitting_clear()
1123 sector_t s = bad->start; in front_splitting_clear()
1126 s - BB_OFFSET(p[prev]), in front_splitting_clear()
1128 memmove(p + prev + 2, p + prev + 1, (bb->count - prev - 1) * 8); in front_splitting_clear()
1129 p[prev + 1] = BB_MAKE(s + sectors, end - s - sectors, ack); in front_splitting_clear()
1137 int prev = -1, hint = -1; in _badblocks_clear()
1138 int len = 0, cleared = 0; in _badblocks_clear()
1139 int rv = 0; in _badblocks_clear()
1142 if (bb->shift < 0) in _badblocks_clear()
1146 if (sectors == 0) in _badblocks_clear()
1150 if (bb->shift) { in _badblocks_clear()
1160 roundup(s, bb->shift); in _badblocks_clear()
1161 rounddown(target, bb->shift); in _badblocks_clear()
1162 sectors = target - s; in _badblocks_clear()
1165 write_seqlock_irq(&bb->lock); in _badblocks_clear()
1168 p = bb->page; in _badblocks_clear()
1184 if (prev < 0) { in _badblocks_clear()
1185 if (overlap_behind(bb, &bad, 0)) { in _badblocks_clear()
1186 len = BB_OFFSET(p[0]) - s; in _badblocks_clear()
1187 hint = 0; in _badblocks_clear()
1192 * Both situations are to clear non-bad range, in _badblocks_clear()
1200 if ((prev + 1) >= bb->count && !overlap_front(bb, prev, &bad)) { in _badblocks_clear()
1217 if ((bb->count + 1) < MAX_BADBLOCKS) { in _badblocks_clear()
1219 bb->count += 1; in _badblocks_clear()
1226 int deleted = 0; in _badblocks_clear()
1229 bb->count -= deleted; in _badblocks_clear()
1238 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) { in _badblocks_clear()
1239 len = BB_OFFSET(p[prev + 1]) - bad.start; in _badblocks_clear()
1241 /* Clear non-bad range should be treated as successful */ in _badblocks_clear()
1248 /* Clear non-bad range should be treated as successful */ in _badblocks_clear()
1253 sectors -= len; in _badblocks_clear()
1255 if (sectors > 0) in _badblocks_clear()
1258 WARN_ON(sectors < 0); in _badblocks_clear()
1265 write_sequnlock_irq(&bb->lock); in _badblocks_clear()
1278 int prev = -1, hint = -1, set = 0; in _badblocks_check()
1284 WARN_ON(bb->shift < 0 || sectors == 0); in _badblocks_check()
1286 if (bb->shift > 0) { in _badblocks_check()
1291 rounddown(s, bb->shift); in _badblocks_check()
1292 roundup(target, bb->shift); in _badblocks_check()
1293 sectors = target - s; in _badblocks_check()
1297 seq = read_seqbegin(&bb->lock); in _badblocks_check()
1299 p = bb->page; in _badblocks_check()
1300 unacked_badblocks = 0; in _badblocks_check()
1301 acked_badblocks = 0; in _badblocks_check()
1315 if ((prev >= 0) && in _badblocks_check()
1316 ((prev + 1) >= bb->count) && !overlap_front(bb, prev, &bad)) { in _badblocks_check()
1322 if ((prev >= 0) && overlap_front(bb, prev, &bad)) { in _badblocks_check()
1331 len = BB_END(p[prev]) - s; in _badblocks_check()
1333 if (set == 0) { in _badblocks_check()
1342 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) { in _badblocks_check()
1343 len = BB_OFFSET(p[prev + 1]) - bad.start; in _badblocks_check()
1353 sectors -= len; in _badblocks_check()
1355 if (sectors > 0) in _badblocks_check()
1358 WARN_ON(sectors < 0); in _badblocks_check()
1360 if (unacked_badblocks > 0) in _badblocks_check()
1361 rv = -1; in _badblocks_check()
1362 else if (acked_badblocks > 0) in _badblocks_check()
1365 rv = 0; in _badblocks_check()
1367 if (read_seqretry(&bb->lock, seq)) in _badblocks_check()
1374 * badblocks_check() - check a given range for bad sectors
1383 * Entries in the bad-block table are 64bits wide. This comprises:
1384 * Length of bad-range, in sectors: 0-511 for lengths 1-512
1385 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
1388 * 'Acknowledged' flag - 1 bit. - the most significant bit.
1390 * Locking of the bad-block table uses a seqlock so badblocks_check
1396 * know if any block in the range is bad. So we binary-search
1397 * to the last range that starts at-or-before the given endpoint,
1402 * 0: there are no known bad blocks in the range
1404 * -1: there are bad blocks which have not yet been acknowledged in metadata.
1415 * badblocks_set() - Add a range of bad blocks to the table.
1422 * can be merged. We binary-search to find the 'insertion' point, then
1426 * 0: success
1437 * badblocks_clear() - Remove a range of bad blocks to the table.
1447 * 0: success
1457 * ack_all_badblocks() - Acknowledge all bad blocks in a list.
1460 * This only succeeds if ->changed is clear. It is used by
1461 * in-kernel metadata updates
1465 if (bb->page == NULL || bb->changed) in ack_all_badblocks()
1468 write_seqlock_irq(&bb->lock); in ack_all_badblocks()
1470 if (bb->changed == 0 && bb->unacked_exist) { in ack_all_badblocks()
1471 u64 *p = bb->page; in ack_all_badblocks()
1474 for (i = 0; i < bb->count ; i++) { in ack_all_badblocks()
1482 bb->unacked_exist = 0; in ack_all_badblocks()
1484 write_sequnlock_irq(&bb->lock); in ack_all_badblocks()
1489 * badblocks_show() - sysfs access to bad-blocks list
1501 u64 *p = bb->page; in badblocks_show()
1504 if (bb->shift < 0) in badblocks_show()
1505 return 0; in badblocks_show()
1508 seq = read_seqbegin(&bb->lock); in badblocks_show()
1510 len = 0; in badblocks_show()
1511 i = 0; in badblocks_show()
1513 while (len < PAGE_SIZE && i < bb->count) { in badblocks_show()
1523 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", in badblocks_show()
1524 (unsigned long long)s << bb->shift, in badblocks_show()
1525 length << bb->shift); in badblocks_show()
1527 if (unack && len == 0) in badblocks_show()
1528 bb->unacked_exist = 0; in badblocks_show()
1530 if (read_seqretry(&bb->lock, seq)) in badblocks_show()
1538 * badblocks_store() - sysfs access to bad-blocks list
1545 * Length of the buffer processed or -ve error.
1557 return -EINVAL; in badblocks_store()
1560 if (length <= 0) in badblocks_store()
1561 return -EINVAL; in badblocks_store()
1564 return -EINVAL; in badblocks_store()
1568 return -ENOSPC; in badblocks_store()
1577 bb->dev = dev; in __badblocks_init()
1578 bb->count = 0; in __badblocks_init()
1580 bb->shift = 0; in __badblocks_init()
1582 bb->shift = -1; in __badblocks_init()
1584 bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL); in __badblocks_init()
1586 bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL); in __badblocks_init()
1587 if (!bb->page) { in __badblocks_init()
1588 bb->shift = -1; in __badblocks_init()
1589 return -ENOMEM; in __badblocks_init()
1591 seqlock_init(&bb->lock); in __badblocks_init()
1593 return 0; in __badblocks_init()
1597 * badblocks_init() - initialize the badblocks structure
1602 * 0: success
1603 * -ve errno: on error
1614 return -EINVAL; in devm_init_badblocks()
1620 * badblocks_exit() - free the badblocks structure
1627 if (bb->dev) in badblocks_exit()
1628 devm_kfree(bb->dev, bb->page); in badblocks_exit()
1630 kfree(bb->page); in badblocks_exit()
1631 bb->page = NULL; in badblocks_exit()