Lines Matching +full:index +full:- +full:starts +full:- +full:at +full:- +full:one
1 // SPDX-License-Identifier: GPL-2.0
5 * - Heavily based on MD badblocks code from Neil Brown
45 * +--------+
47 * +--------+
48 * +-------------+ +-------------+
50 * +-------------+ +-------------+
54 * +-------------+ +--------+ +-------------+
56 * +-------------+ +--------+ +-------------+
57 * 2) A setting range starts exactly at a start LBA of an already set bad blocks
60 * +--------+
62 * +--------+
63 * +-------------+
65 * +-------------+
68 * +-------------+
70 * +-------------+
73 * +-------------+
75 * +-------------+
79 * +--------+----+
81 * +--------+----+
85 * +-------------+
87 * +-------------+
90 * +-------------+
92 * +-------------+
95 * +-------------+
97 * +-------------+
99 * +-------------------+
101 * +-------------------+
102 * +-------------+
104 * +-------------+
108 * +-------------+-----+ +-------------+ +-----+
110 * +-------------+-----+ ===> +-------------+ +-----+
111 * +-------------+ +-------------+
113 * +-------------+ +-------------+
117 * 3) A setting range starts before the start LBA of an already set bad blocks
119 * +-------------+
121 * +-------------+
122 * +-------------+
124 * +-------------+
126 * first (S1) ends at the start LBA of already set range E, the second part
127 * (S2) starts exactly at a start LBA of the already set range E.
128 * +----+---------+ +----+ +---------+
130 * +----+---------+ ===> +----+ +---------+
131 * +-------------+ +-------------+
133 * +-------------+ +-------------+
136 * of the already set range E, they will be handled in next loop in one of
138 * 4) A setting range starts after the start LBA of an already set bad blocks
142 * +---------+
144 * +---------+
145 * +-------------+
147 * +-------------+
149 * they will be merged into one, the result is,
150 * +-------------+
152 * +-------------+
155 * +-------------+
157 * +-------------+
160 * +---+---------+
162 * +---+---------+
165 * +----+
167 * +----+
168 * +--------------+
170 * +--------------+
172 * they will be merged into one, the result is,
173 * +--------------+
175 * +--------------+
178 * +--------------+
180 * +--------------+
184 * +----+----+----+
186 * +----+----+----+
188 * blocks range E. The range S starts after the start LBA of range E, and
190 * +-------------------+
192 * +-------------------+
193 * +-------------+
195 * +-------------+
197 * part (S1) ends at end range E, and the second part (S2) has rest range of
199 * +---------+---------+ +---------+ +---------+
201 * +---------+---------+ ===> +---------+ +---------+
202 * +-------------+ +-------------+
204 * +-------------+ +-------------+
208 * 5) A setting bad blocks range S is adjacent to one or more already set bad
212 * +------+
214 * +------+
215 * +-------+
217 * +-------+
221 * +--------------+
223 * +--------------+
227 * +--------+------+
229 * +--------+------+
232 * +-------------------------------------------------------+
234 * +-------------------------------------------------------+
235 * |<----- BB_MAX_LEN ----->|
236 * +-----+ +-----+ +-----+
238 * +-----+ +-----+ +-----+
244 * is available slot from bad blocks table, re-try again to handle more
246 * +------------------------+
248 * +------------------------+
249 * |<----- BB_MAX_LEN ----->|
250 * +-----+-----+-----+---+-----+--+
252 * +-----+-----+-----+---+-----+--+
254 * to no-space in bad blocks table, but the following E1, E2 and E3 ranges
257 * +------------------------+-----+-----+-----+---+-----+--+
259 * +------------------------+-----+-----+-----+---+-----+--+
260 * Since the bad blocks table is not full anymore, re-try again for the
268 * +--------+
270 * +--------+ S: 1
271 * +-------+-------------+ E1: 1
273 * +-------+-------------+
278 * +-------+--------+----+ S: 1
280 * +-------+--------+----+ E3: 0
282 * blocks table are all acked, merging them into a larger one range may
287 * +----------------+----+ acknowledged
289 * +----------------+----+ E3: 0
297 * +------+
299 * +------+
300 * +-------+
302 * +-------+
309 * +--------------+
311 * +--------------+
315 * +------+-------+
317 * +------+-------+
324 * Inside badblocks_set() each loop starts by jumping to re_insert label, every
326 * which starts before or at current setting range. Since the setting bad blocks
332 * return correct bad blocks table index immediately.
338 * block range will split into two, and one more item should be added into the
345 * +-----+ | +-----+ | +-----+
347 * +-----+ or +-----+ or +-----+
348 * +---+ | +----+ +----+ | +---+
350 * +---+ | +----+ +----+ | +---+
355 * +---+
357 * +---+
358 * +-----------------+
360 * +-----------------+
363 * +------+ +------+
365 * +------+ +------+
366 * 3) The clearing range starts exactly at same LBA as an already set bad block range
368 * 3.1) Partially covered at head part
369 * +------------+
371 * +------------+
372 * +-----------------+
374 * +-----------------+
376 * start LBA to end of C and shrink the range to BB_LEN(E) - BB_LEN(C). No
378 * +----+
380 * +----+
382 * +-----------------+
384 * +-----------------+
385 * +-----------------+
387 * +-----------------+
390 * 4) The clearing range exactly ends at same LBA as an already set bad block
392 * +-------+
394 * +-------+
395 * +-----------------+
397 * +-----------------+
399 * end to the start of C, and reduce its length to BB_LEN(E) - BB_LEN(C).
401 * +---------+
403 * +---------+
408 * +----------+
410 * +----------+
411 * +------------+
413 * +------------+
415 * first part ends at the start LBA of range E, and the second part starts at
417 * +----+-----+ +----+ +-----+
419 * +----+-----+ ===> +----+ +-----+
420 * +------------+ +------------+
422 * +------------+ +------------+
427 * +----------+
429 * +----------+
430 * +------------+
432 * +------------+
434 * first part C1 ends at same end LBA of range E, and the second part starts
435 * at end LBA of range E.
436 * +----+-----+ +----+ +-----+
438 * +----+-----+ ===> +----+ +-----+
439 * +------------+ +------------+
441 * +------------+ +------------+
448 * while-loop. The idea is similar to bad blocks range setting but much
453 * Find the range starts at-or-before 's' from bad table. The search
454 * starts from index 'hint' and stops at index 'hint_end' from the bad
460 u64 *p = bb->page; in prev_by_hint()
461 int ret = -1; in prev_by_hint()
463 while ((hint < hint_end) && ((hint + 1) <= bb->count) && in prev_by_hint()
465 if ((hint + 1) == bb->count || BB_OFFSET(p[hint + 1]) > s) { in prev_by_hint()
476 * Find the range starts at-or-before bad->start. If 'hint' is provided
478 * very probably the wanted bad range can be found from the hint index,
479 * then the unnecessary while-loop iteration can be avoided.
484 sector_t s = bad->start; in prev_badblocks()
485 int ret = -1; in prev_badblocks()
489 if (!bb->count) in prev_badblocks()
499 hi = bb->count; in prev_badblocks()
500 p = bb->page; in prev_badblocks()
504 return -1; in prev_badblocks()
505 if (BB_OFFSET(p[hi - 1]) <= s) in prev_badblocks()
506 return hi - 1; in prev_badblocks()
509 while (hi - lo > 1) { in prev_badblocks()
537 sector_t s = bad->start; in can_merge_front()
538 u64 *p = bb->page; in can_merge_front()
540 if (BB_ACK(p[prev]) == bad->ack && in can_merge_front()
550 * merged from bad->len.
554 sector_t sectors = bad->len; in front_merge()
555 sector_t s = bad->start; in front_merge()
556 u64 *p = bb->page; in front_merge()
562 merged = min_t(sector_t, sectors, BB_END(p[prev]) - s); in front_merge()
564 merged = min_t(sector_t, sectors, BB_MAX_LEN - BB_LEN(p[prev])); in front_merge()
565 if ((prev + 1) < bb->count && in front_merge()
566 merged > (BB_OFFSET(p[prev + 1]) - BB_END(p[prev]))) { in front_merge()
567 merged = BB_OFFSET(p[prev + 1]) - BB_END(p[prev]); in front_merge()
571 BB_LEN(p[prev]) + merged, bad->ack); in front_merge()
580 * starts as bad->start, and the bad range ahead of 'prev' (indexed by
581 * 'prev - 1' from bad table) exactly ends at where 'prev' starts, and
585 * Return 'true' if bad ranges indexed by 'prev' and 'prev - 1' from bad
591 u64 *p = bb->page; in can_combine_front()
594 (BB_OFFSET(p[prev]) == bad->start) && in can_combine_front()
595 (BB_END(p[prev - 1]) == BB_OFFSET(p[prev])) && in can_combine_front()
596 (BB_LEN(p[prev - 1]) + BB_LEN(p[prev]) <= BB_MAX_LEN) && in can_combine_front()
597 (BB_ACK(p[prev - 1]) == BB_ACK(p[prev]))) in can_combine_front()
603 * Combine the bad ranges indexed by 'prev' and 'prev - 1' (from bad
604 * table) into one larger bad range, and the new range is indexed by
605 * 'prev - 1'.
606 * The caller of front_combine() will decrease bb->count, therefore
611 u64 *p = bb->page; in front_combine()
613 p[prev - 1] = BB_MAKE(BB_OFFSET(p[prev - 1]), in front_combine()
614 BB_LEN(p[prev - 1]) + BB_LEN(p[prev]), in front_combine()
616 if ((prev + 1) < bb->count) in front_combine()
617 memmove(p + prev, p + prev + 1, (bb->count - prev - 1) * 8); in front_combine()
629 u64 *p = bb->page; in overlap_front()
631 if (bad->start >= BB_OFFSET(p[front]) && in overlap_front()
632 bad->start < BB_END(p[front])) in overlap_front()
644 u64 *p = bb->page; in overlap_behind()
646 if (bad->start < BB_OFFSET(p[behind]) && in overlap_behind()
647 (bad->start + bad->len) > BB_OFFSET(p[behind])) in overlap_behind()
676 u64 *p = bb->page; in can_front_overwrite()
681 if (BB_ACK(p[prev]) >= bad->ack) in can_front_overwrite()
684 if (BB_END(p[prev]) <= (bad->start + bad->len)) { in can_front_overwrite()
685 len = BB_END(p[prev]) - bad->start; in can_front_overwrite()
686 if (BB_OFFSET(p[prev]) == bad->start) in can_front_overwrite()
691 bad->len = len; in can_front_overwrite()
693 if (BB_OFFSET(p[prev]) == bad->start) in can_front_overwrite()
698 * one, an extra slot needed from bad table. in can_front_overwrite()
703 if ((bb->count + (*extra)) > MAX_BADBLOCKS) in can_front_overwrite()
719 u64 *p = bb->page; in front_overwrite()
726 bad->ack); in front_overwrite()
729 if (BB_OFFSET(p[prev]) == bad->start) { in front_overwrite()
731 bad->len, bad->ack); in front_overwrite()
733 (bb->count - prev - 1) * 8); in front_overwrite()
734 p[prev + 1] = BB_MAKE(bad->start + bad->len, in front_overwrite()
735 orig_end - BB_END(p[prev]), in front_overwrite()
739 bad->start - BB_OFFSET(p[prev]), in front_overwrite()
742 * prev +2 -> prev + 1 + 1, which is for, in front_overwrite()
743 * 1) prev + 1: the slot index of the previous one in front_overwrite()
744 * 2) + 1: one more slot for extra being 1. in front_overwrite()
747 (bb->count - prev - 1) * 8); in front_overwrite()
748 p[prev + 1] = BB_MAKE(bad->start, bad->len, bad->ack); in front_overwrite()
753 bad->start - BB_OFFSET(p[prev]), in front_overwrite()
756 * prev + 3 -> prev + 1 + 2, which is for, in front_overwrite()
757 * 1) prev + 1: the slot index of the previous one in front_overwrite()
761 (bb->count - prev - 1) * 8); in front_overwrite()
762 p[prev + 1] = BB_MAKE(bad->start, bad->len, bad->ack); in front_overwrite()
764 orig_end - BB_END(p[prev + 1]), in front_overwrite()
771 return bad->len; in front_overwrite()
776 * the location is indexed by 'at'.
778 static int insert_at(struct badblocks *bb, int at, struct badblocks_context *bad) in insert_at() argument
780 u64 *p = bb->page; in insert_at()
785 len = min_t(sector_t, bad->len, BB_MAX_LEN); in insert_at()
786 if (at < bb->count) in insert_at()
787 memmove(p + at + 1, p + at, (bb->count - at) * 8); in insert_at()
788 p[at] = BB_MAKE(bad->start, len, bad->ack); in insert_at()
796 u64 *p = bb->page; in badblocks_update_acked()
799 if (!bb->unacked_exist) in badblocks_update_acked()
802 for (i = 0; i < bb->count ; i++) { in badblocks_update_acked()
810 bb->unacked_exist = 0; in badblocks_update_acked()
819 u64 *p = bb->page; in try_adjacent_combine()
821 if (prev >= 0 && (prev + 1) < bb->count && in try_adjacent_combine()
829 if ((prev + 2) < bb->count) in try_adjacent_combine()
831 (bb->count - (prev + 2)) * 8); in try_adjacent_combine()
832 bb->count--; in try_adjacent_combine()
844 int prev = -1, hint = -1; in _badblocks_set()
848 if (bb->shift < 0) in _badblocks_set()
856 if (bb->shift) { in _badblocks_set()
860 rounddown(s, 1 << bb->shift); in _badblocks_set()
861 roundup(next, 1 << bb->shift); in _badblocks_set()
862 sectors = next - s; in _badblocks_set()
865 write_seqlock_irqsave(&bb->lock, flags); in _badblocks_set()
868 p = bb->page; in _badblocks_set()
880 bb->count++; in _badblocks_set()
890 if (bad.len > (BB_OFFSET(p[0]) - bad.start)) in _badblocks_set()
891 bad.len = BB_OFFSET(p[0]) - bad.start; in _badblocks_set()
893 bb->count++; in _badblocks_set()
899 /* in case p[prev-1] can be merged with p[prev] */ in _badblocks_set()
902 bb->count--; in _badblocks_set()
923 BB_END(p[prev]) - s, sectors); in _badblocks_set()
930 bb->count += extra; in _badblocks_set()
934 bb->count--; in _badblocks_set()
942 if ((prev + 1) < bb->count && in _badblocks_set()
945 bad.len, BB_OFFSET(p[prev + 1]) - bad.start); in _badblocks_set()
948 bb->count++; in _badblocks_set()
954 sectors -= len; in _badblocks_set()
971 bb->unacked_exist = 1; in _badblocks_set()
976 write_sequnlock_irqrestore(&bb->lock, flags); in _badblocks_set()
986 * the caller to reduce bb->count.
991 sector_t sectors = bad->len; in front_clear()
992 sector_t s = bad->start; in front_clear()
993 u64 *p = bb->page; in front_clear()
1000 BB_LEN(p[prev]) - sectors, in front_clear()
1006 if ((prev + 1) < bb->count) in front_clear()
1008 (bb->count - prev - 1) * 8); in front_clear()
1013 cleared = BB_END(p[prev]) - s; in front_clear()
1015 s - BB_OFFSET(p[prev]), in front_clear()
1034 u64 *p = bb->page; in front_splitting_clear()
1037 sector_t sectors = bad->len; in front_splitting_clear()
1038 sector_t s = bad->start; in front_splitting_clear()
1041 s - BB_OFFSET(p[prev]), in front_splitting_clear()
1043 memmove(p + prev + 2, p + prev + 1, (bb->count - prev - 1) * 8); in front_splitting_clear()
1044 p[prev + 1] = BB_MAKE(s + sectors, end - s - sectors, ack); in front_splitting_clear()
1052 int prev = -1, hint = -1; in _badblocks_clear()
1056 if (bb->shift < 0) in _badblocks_clear()
1064 if (bb->shift) { in _badblocks_clear()
1074 roundup(s, 1 << bb->shift); in _badblocks_clear()
1075 rounddown(target, 1 << bb->shift); in _badblocks_clear()
1076 sectors = target - s; in _badblocks_clear()
1079 write_seqlock_irq(&bb->lock); in _badblocks_clear()
1082 p = bb->page; in _badblocks_clear()
1100 len = BB_OFFSET(p[0]) - s; in _badblocks_clear()
1106 * Both situations are to clear non-bad range, in _badblocks_clear()
1114 if ((prev + 1) >= bb->count && !overlap_front(bb, prev, &bad)) { in _badblocks_clear()
1131 if ((bb->count + 1) <= MAX_BADBLOCKS) { in _badblocks_clear()
1133 bb->count += 1; in _badblocks_clear()
1143 bb->count -= deleted; in _badblocks_clear()
1152 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) { in _badblocks_clear()
1153 len = BB_OFFSET(p[prev + 1]) - bad.start; in _badblocks_clear()
1155 /* Clear non-bad range should be treated as successful */ in _badblocks_clear()
1162 /* Clear non-bad range should be treated as successful */ in _badblocks_clear()
1167 sectors -= len; in _badblocks_clear()
1177 write_sequnlock_irq(&bb->lock); in _badblocks_clear()
1189 int prev = -1, hint = -1, set = 0; in _badblocks_check()
1193 u64 *p = bb->page; in _badblocks_check()
1209 ((prev + 1) >= bb->count) && !overlap_front(bb, prev, &bad)) { in _badblocks_check()
1224 len = BB_END(p[prev]) - s; in _badblocks_check()
1235 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) { in _badblocks_check()
1236 len = BB_OFFSET(p[prev + 1]) - bad.start; in _badblocks_check()
1249 sectors -= len; in _badblocks_check()
1255 rv = -1; in _badblocks_check()
1265 * badblocks_check() - check a given range for bad sectors
1267 * @s: sector (start) at which to check for badblocks
1274 * Entries in the bad-block table are 64bits wide. This comprises:
1275 * Length of bad-range, in sectors: 0-511 for lengths 1-512
1276 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
1279 * 'Acknowledged' flag - 1 bit. - the most significant bit.
1281 * Locking of the bad-block table uses a seqlock so badblocks_check
1287 * know if any block in the range is bad. So we binary-search
1288 * to the last range that starts at-or-before the given endpoint,
1295 * -1: there are bad blocks which have not yet been acknowledged in metadata.
1304 WARN_ON(bb->shift < 0 || sectors == 0); in badblocks_check()
1306 if (bb->shift > 0) { in badblocks_check()
1310 rounddown(s, 1 << bb->shift); in badblocks_check()
1311 roundup(target, 1 << bb->shift); in badblocks_check()
1312 sectors = target - s; in badblocks_check()
1316 seq = read_seqbegin(&bb->lock); in badblocks_check()
1318 if (read_seqretry(&bb->lock, seq)) in badblocks_check()
1326 * badblocks_set() - Add a range of bad blocks to the table.
1333 * can be merged. We binary-search to find the 'insertion' point, then
1349 * badblocks_clear() - Remove a range of bad blocks to the table.
1369 * ack_all_badblocks() - Acknowledge all bad blocks in a list.
1372 * This only succeeds if ->changed is clear. It is used by
1373 * in-kernel metadata updates
1377 if (bb->page == NULL || bb->changed) in ack_all_badblocks()
1380 write_seqlock_irq(&bb->lock); in ack_all_badblocks()
1382 if (bb->changed == 0 && bb->unacked_exist) { in ack_all_badblocks()
1383 u64 *p = bb->page; in ack_all_badblocks()
1386 for (i = 0; i < bb->count ; i++) { in ack_all_badblocks()
1395 for (i = 0; i < bb->count ; i++) in ack_all_badblocks()
1399 bb->unacked_exist = 0; in ack_all_badblocks()
1401 write_sequnlock_irq(&bb->lock); in ack_all_badblocks()
1406 * badblocks_show() - sysfs access to bad-blocks list
1418 u64 *p = bb->page; in badblocks_show()
1421 if (bb->shift < 0) in badblocks_show()
1425 seq = read_seqbegin(&bb->lock); in badblocks_show()
1430 while (len < PAGE_SIZE && i < bb->count) { in badblocks_show()
1440 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", in badblocks_show()
1441 (unsigned long long)s << bb->shift, in badblocks_show()
1442 length << bb->shift); in badblocks_show()
1445 bb->unacked_exist = 0; in badblocks_show()
1447 if (read_seqretry(&bb->lock, seq)) in badblocks_show()
1455 * badblocks_store() - sysfs access to bad-blocks list
1462 * Length of the buffer processed or -ve error.
1474 return -EINVAL; in badblocks_store()
1478 return -EINVAL; in badblocks_store()
1481 return -EINVAL; in badblocks_store()
1485 return -ENOSPC; in badblocks_store()
1494 bb->dev = dev; in __badblocks_init()
1495 bb->count = 0; in __badblocks_init()
1497 bb->shift = 0; in __badblocks_init()
1499 bb->shift = -1; in __badblocks_init()
1501 bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL); in __badblocks_init()
1503 bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL); in __badblocks_init()
1504 if (!bb->page) { in __badblocks_init()
1505 bb->shift = -1; in __badblocks_init()
1506 return -ENOMEM; in __badblocks_init()
1508 seqlock_init(&bb->lock); in __badblocks_init()
1514 * badblocks_init() - initialize the badblocks structure
1520 * -ve errno: on error
1531 return -EINVAL; in devm_init_badblocks()
1537 * badblocks_exit() - free the badblocks structure
1544 if (bb->dev) in badblocks_exit()
1545 devm_kfree(bb->dev, bb->page); in badblocks_exit()
1547 kfree(bb->page); in badblocks_exit()
1548 bb->page = NULL; in badblocks_exit()