Lines Matching +full:0 +full:- +full:indexed
1 // SPDX-License-Identifier: GPL-2.0
5 * - Heavily based on MD badblocks code from Neil Brown
45 * +--------+
47 * +--------+
48 * +-------------+ +-------------+
50 * +-------------+ +-------------+
54 * +-------------+ +--------+ +-------------+
56 * +-------------+ +--------+ +-------------+
60 * +--------+
62 * +--------+
63 * +-------------+
65 * +-------------+
68 * +-------------+
70 * +-------------+
73 * +-------------+
75 * +-------------+
79 * +--------+----+
81 * +--------+----+
85 * +-------------+
87 * +-------------+
90 * +-------------+
92 * +-------------+
95 * +-------------+
97 * +-------------+
99 * +-------------------+
101 * +-------------------+
102 * +-------------+
104 * +-------------+
108 * +-------------+-----+ +-------------+ +-----+
110 * +-------------+-----+ ===> +-------------+ +-----+
111 * +-------------+ +-------------+
113 * +-------------+ +-------------+
119 * +-------------+
121 * +-------------+
122 * +-------------+
124 * +-------------+
128 * +----+---------+ +----+ +---------+
130 * +----+---------+ ===> +----+ +---------+
131 * +-------------+ +-------------+
133 * +-------------+ +-------------+
142 * +---------+
144 * +---------+
145 * +-------------+
147 * +-------------+
150 * +-------------+
152 * +-------------+
155 * +-------------+
157 * +-------------+
160 * +---+---------+
162 * +---+---------+
165 * +----+
167 * +----+
168 * +--------------+
170 * +--------------+
173 * +--------------+
175 * +--------------+
178 * +--------------+
180 * +--------------+
184 * +----+----+----+
186 * +----+----+----+
190 * +-------------------+
192 * +-------------------+
193 * +-------------+
195 * +-------------+
199 * +---------+---------+ +---------+ +---------+
201 * +---------+---------+ ===> +---------+ +---------+
202 * +-------------+ +-------------+
204 * +-------------+ +-------------+
212 * +------+
214 * +------+
215 * +-------+
217 * +-------+
221 * +--------------+
223 * +--------------+
227 * +--------+------+
229 * +--------+------+
232 * +-------------------------------------------------------+
234 * +-------------------------------------------------------+
235 * |<----- BB_MAX_LEN ----->|
236 * +-----+ +-----+ +-----+
238 * +-----+ +-----+ +-----+
244 * is available slot from bad blocks table, re-try again to handle more
246 * +------------------------+
248 * +------------------------+
249 * |<----- BB_MAX_LEN ----->|
250 * +-----+-----+-----+---+-----+--+
252 * +-----+-----+-----+---+-----+--+
254 * to no-space in bad blocks table, but the following E1, E2 and E3 ranges
257 * +------------------------+-----+-----+-----+---+-----+--+
259 * +------------------------+-----+-----+-----+---+-----+--+
260 * Since the bad blocks table is not full anymore, re-try again for the
268 * +--------+
270 * +--------+ S: 1
271 * +-------+-------------+ E1: 1
272 * | E1 | E2 | E2: 0
273 * +-------+-------------+
278 * +-------+--------+----+ S: 1
280 * +-------+--------+----+ E3: 0
287 * +----------------+----+ acknowledged
289 * +----------------+----+ E3: 0
297 * +------+
299 * +------+
300 * +-------+
302 * +-------+
309 * +--------------+
311 * +--------------+
315 * +------+-------+
317 * +------+-------+
345 * +-----+ | +-----+ | +-----+
347 * +-----+ or +-----+ or +-----+
348 * +---+ | +----+ +----+ | +---+
350 * +---+ | +----+ +----+ | +---+
352 * happens, simply returns 0.
355 * +---+
357 * +---+
358 * +-----------------+
360 * +-----------------+
363 * +------+ +------+
365 * +------+ +------+
369 * +------------+
371 * +------------+
372 * +-----------------+
374 * +-----------------+
376 * start LBA to end of C and shrink the range to BB_LEN(E) - BB_LEN(C). No
378 * +----+
380 * +----+
382 * +-----------------+
384 * +-----------------+
385 * +-----------------+
387 * +-----------------+
392 * +-------+
394 * +-------+
395 * +-----------------+
397 * +-----------------+
399 * end to the start of C, and reduce its length to BB_LEN(E) - BB_LEN(C).
401 * +---------+
403 * +---------+
408 * +----------+
410 * +----------+
411 * +------------+
413 * +------------+
417 * +----+-----+ +----+ +-----+
419 * +----+-----+ ===> +----+ +-----+
420 * +------------+ +------------+
422 * +------------+ +------------+
427 * +----------+
429 * +----------+
430 * +------------+
432 * +------------+
436 * +----+-----+ +----+ +-----+
438 * +----+-----+ ===> +----+ +-----+
439 * +------------+ +------------+
441 * +------------+ +------------+
448 * while-loop. The idea is similar to bad blocks range setting but much
453 * Find the range starts at-or-before 's' from bad table. The search
460 u64 *p = bb->page; in prev_by_hint()
461 int ret = -1; in prev_by_hint()
463 while ((hint < hint_end) && ((hint + 1) <= bb->count) && in prev_by_hint()
465 if ((hint + 1) == bb->count || BB_OFFSET(p[hint + 1]) > s) { in prev_by_hint()
476 * Find the range starts at-or-before bad->start. If 'hint' is provided
477 * (hint >= 0) then search in the bad table from hint firstly. It is
479 * then the unnecessary while-loop iteration can be avoided.
484 sector_t s = bad->start; in prev_badblocks()
485 int ret = -1; in prev_badblocks()
489 if (!bb->count) in prev_badblocks()
492 if (hint >= 0) { in prev_badblocks()
494 if (ret >= 0) in prev_badblocks()
498 lo = 0; in prev_badblocks()
499 hi = bb->count; in prev_badblocks()
500 p = bb->page; in prev_badblocks()
504 return -1; in prev_badblocks()
505 if (BB_OFFSET(p[hi - 1]) <= s) in prev_badblocks()
506 return hi - 1; in prev_badblocks()
509 while (hi - lo > 1) { in prev_badblocks()
532 * merged with the bad range (from the bad table) indexed by 'prev'.
537 sector_t s = bad->start; in can_merge_front()
538 u64 *p = bb->page; in can_merge_front()
540 if (BB_ACK(p[prev]) == bad->ack && in can_merge_front()
549 * (from bad table) indexed by 'prev'. The return value is sectors
550 * merged from bad->len.
554 sector_t sectors = bad->len; in front_merge()
555 sector_t s = bad->start; in front_merge()
556 u64 *p = bb->page; in front_merge()
557 int merged = 0; in front_merge()
562 merged = min_t(sector_t, sectors, BB_END(p[prev]) - s); in front_merge()
564 merged = min_t(sector_t, sectors, BB_MAX_LEN - BB_LEN(p[prev])); in front_merge()
565 if ((prev + 1) < bb->count && in front_merge()
566 merged > (BB_OFFSET(p[prev + 1]) - BB_END(p[prev]))) { in front_merge()
567 merged = BB_OFFSET(p[prev + 1]) - BB_END(p[prev]); in front_merge()
571 BB_LEN(p[prev]) + merged, bad->ack); in front_merge()
579 * handle: If a bad range (indexed by 'prev' from bad table) exactly
580 * starts as bad->start, and the bad range ahead of 'prev' (indexed by
581 * 'prev - 1' from bad table) exactly ends at where 'prev' starts, and
585 * Return 'true' if bad ranges indexed by 'prev' and 'prev - 1' from bad
591 u64 *p = bb->page; in can_combine_front()
593 if ((prev > 0) && in can_combine_front()
594 (BB_OFFSET(p[prev]) == bad->start) && in can_combine_front()
595 (BB_END(p[prev - 1]) == BB_OFFSET(p[prev])) && in can_combine_front()
596 (BB_LEN(p[prev - 1]) + BB_LEN(p[prev]) <= BB_MAX_LEN) && in can_combine_front()
597 (BB_ACK(p[prev - 1]) == BB_ACK(p[prev]))) in can_combine_front()
603 * Combine the bad ranges indexed by 'prev' and 'prev - 1' (from bad
604 * table) into one larger bad range, and the new range is indexed by
605 * 'prev - 1'.
606 * The caller of front_combine() will decrease bb->count, therefore
611 u64 *p = bb->page; in front_combine()
613 p[prev - 1] = BB_MAKE(BB_OFFSET(p[prev - 1]), in front_combine()
614 BB_LEN(p[prev - 1]) + BB_LEN(p[prev]), in front_combine()
616 if ((prev + 1) < bb->count) in front_combine()
617 memmove(p + prev, p + prev + 1, (bb->count - prev - 1) * 8); in front_combine()
622 * overlapped with the bad range (from bad table) indexed by 'front'.
623 * Exactly forward overlap means the bad range (from bad table) indexed
629 u64 *p = bb->page; in overlap_front()
631 if (bad->start >= BB_OFFSET(p[front]) && in overlap_front()
632 bad->start < BB_END(p[front])) in overlap_front()
639 * overlapped with the bad range (from bad table) indexed by 'behind'.
644 u64 *p = bb->page; in overlap_behind()
646 if (bad->start < BB_OFFSET(p[behind]) && in overlap_behind()
647 (bad->start + bad->len) > BB_OFFSET(p[behind])) in overlap_behind()
654 * range (from bad table) indexed by 'prev'.
656 * The range indicated by 'bad' can overwrite the bad range indexed by
659 * range (from bad table) indexed by 'prev'.
664 * indexed by 'prev', new range might be split from existing bad range,
676 u64 *p = bb->page; in can_front_overwrite()
681 if (BB_ACK(p[prev]) >= bad->ack) in can_front_overwrite()
684 if (BB_END(p[prev]) <= (bad->start + bad->len)) { in can_front_overwrite()
685 len = BB_END(p[prev]) - bad->start; in can_front_overwrite()
686 if (BB_OFFSET(p[prev]) == bad->start) in can_front_overwrite()
687 *extra = 0; in can_front_overwrite()
691 bad->len = len; in can_front_overwrite()
693 if (BB_OFFSET(p[prev]) == bad->start) in can_front_overwrite()
703 if ((bb->count + (*extra)) > MAX_BADBLOCKS) in can_front_overwrite()
711 * (from bad table) indexed by 'prev'.
719 u64 *p = bb->page; in front_overwrite()
724 case 0: in front_overwrite()
726 bad->ack); in front_overwrite()
729 if (BB_OFFSET(p[prev]) == bad->start) { in front_overwrite()
731 bad->len, bad->ack); in front_overwrite()
733 (bb->count - prev - 1) * 8); in front_overwrite()
734 p[prev + 1] = BB_MAKE(bad->start + bad->len, in front_overwrite()
735 orig_end - BB_END(p[prev]), in front_overwrite()
739 bad->start - BB_OFFSET(p[prev]), in front_overwrite()
742 * prev +2 -> prev + 1 + 1, which is for, in front_overwrite()
747 (bb->count - prev - 1) * 8); in front_overwrite()
748 p[prev + 1] = BB_MAKE(bad->start, bad->len, bad->ack); in front_overwrite()
753 bad->start - BB_OFFSET(p[prev]), in front_overwrite()
756 * prev + 3 -> prev + 1 + 2, which is for, in front_overwrite()
761 (bb->count - prev - 1) * 8); in front_overwrite()
762 p[prev + 1] = BB_MAKE(bad->start, bad->len, bad->ack); in front_overwrite()
764 orig_end - BB_END(p[prev + 1]), in front_overwrite()
771 return bad->len; in front_overwrite()
776 * the location is indexed by 'at'.
780 u64 *p = bb->page; in insert_at()
785 len = min_t(sector_t, bad->len, BB_MAX_LEN); in insert_at()
786 if (at < bb->count) in insert_at()
787 memmove(p + at + 1, p + at, (bb->count - at) * 8); in insert_at()
788 p[at] = BB_MAKE(bad->start, len, bad->ack); in insert_at()
796 u64 *p = bb->page; in badblocks_update_acked()
799 if (!bb->unacked_exist) in badblocks_update_acked()
802 for (i = 0; i < bb->count ; i++) { in badblocks_update_acked()
810 bb->unacked_exist = 0; in badblocks_update_acked()
815 * overlapped with the bad range (from bad table) indexed by 'behind'.
819 u64 *p = bb->page; in try_adjacent_combine()
821 if (prev >= 0 && (prev + 1) < bb->count && in try_adjacent_combine()
829 if ((prev + 2) < bb->count) in try_adjacent_combine()
831 (bb->count - (prev + 2)) * 8); in try_adjacent_combine()
832 bb->count--; in try_adjacent_combine()
842 int len = 0, added = 0; in _badblocks_set()
844 int prev = -1, hint = -1; in _badblocks_set()
848 if (bb->shift < 0) in _badblocks_set()
852 if (sectors == 0) in _badblocks_set()
856 if (bb->shift) { in _badblocks_set()
860 rounddown(s, 1 << bb->shift); in _badblocks_set()
861 roundup(next, 1 << bb->shift); in _badblocks_set()
862 sectors = next - s; in _badblocks_set()
865 write_seqlock_irqsave(&bb->lock, flags); in _badblocks_set()
868 p = bb->page; in _badblocks_set()
873 len = 0; in _badblocks_set()
879 len = insert_at(bb, 0, &bad); in _badblocks_set()
880 bb->count++; in _badblocks_set()
888 if (prev < 0) { in _badblocks_set()
890 if (bad.len > (BB_OFFSET(p[0]) - bad.start)) in _badblocks_set()
891 bad.len = BB_OFFSET(p[0]) - bad.start; in _badblocks_set()
892 len = insert_at(bb, 0, &bad); in _badblocks_set()
893 bb->count++; in _badblocks_set()
899 /* in case p[prev-1] can be merged with p[prev] */ in _badblocks_set()
902 bb->count--; in _badblocks_set()
913 int extra = 0; in _badblocks_set()
916 if (extra > 0) in _badblocks_set()
920 BB_END(p[prev]) - s, sectors); in _badblocks_set()
927 bb->count += extra; in _badblocks_set()
931 bb->count--; in _badblocks_set()
946 if ((prev + 1) < bb->count && in _badblocks_set()
949 bad.len, BB_OFFSET(p[prev + 1]) - bad.start); in _badblocks_set()
952 bb->count++; in _badblocks_set()
958 sectors -= len; in _badblocks_set()
960 if (sectors > 0) in _badblocks_set()
965 * merged. (prev < 0) condition is not handled here, in _badblocks_set()
975 bb->unacked_exist = 1; in _badblocks_set()
980 write_sequnlock_irqrestore(&bb->lock, flags); in _badblocks_set()
982 return sectors == 0; in _badblocks_set()
990 * the caller to reduce bb->count.
995 sector_t sectors = bad->len; in front_clear()
996 sector_t s = bad->start; in front_clear()
997 u64 *p = bb->page; in front_clear()
998 int cleared = 0; in front_clear()
1000 *deleted = 0; in front_clear()
1004 BB_LEN(p[prev]) - sectors, in front_clear()
1010 if ((prev + 1) < bb->count) in front_clear()
1012 (bb->count - prev - 1) * 8); in front_clear()
1017 cleared = BB_END(p[prev]) - s; in front_clear()
1019 s - BB_OFFSET(p[prev]), in front_clear()
1038 u64 *p = bb->page; in front_splitting_clear()
1041 sector_t sectors = bad->len; in front_splitting_clear()
1042 sector_t s = bad->start; in front_splitting_clear()
1045 s - BB_OFFSET(p[prev]), in front_splitting_clear()
1047 memmove(p + prev + 2, p + prev + 1, (bb->count - prev - 1) * 8); in front_splitting_clear()
1048 p[prev + 1] = BB_MAKE(s + sectors, end - s - sectors, ack); in front_splitting_clear()
1056 int prev = -1, hint = -1; in _badblocks_clear()
1057 int len = 0, cleared = 0; in _badblocks_clear()
1060 if (bb->shift < 0) in _badblocks_clear()
1064 if (sectors == 0) in _badblocks_clear()
1068 if (bb->shift) { in _badblocks_clear()
1078 roundup(s, 1 << bb->shift); in _badblocks_clear()
1079 rounddown(target, 1 << bb->shift); in _badblocks_clear()
1080 sectors = target - s; in _badblocks_clear()
1083 write_seqlock_irq(&bb->lock); in _badblocks_clear()
1086 p = bb->page; in _badblocks_clear()
1102 if (prev < 0) { in _badblocks_clear()
1103 if (overlap_behind(bb, &bad, 0)) { in _badblocks_clear()
1104 len = BB_OFFSET(p[0]) - s; in _badblocks_clear()
1105 hint = 0; in _badblocks_clear()
1110 * Both situations are to clear non-bad range, in _badblocks_clear()
1118 if ((prev + 1) >= bb->count && !overlap_front(bb, prev, &bad)) { in _badblocks_clear()
1135 if ((bb->count + 1) <= MAX_BADBLOCKS) { in _badblocks_clear()
1137 bb->count += 1; in _badblocks_clear()
1144 int deleted = 0; in _badblocks_clear()
1147 bb->count -= deleted; in _badblocks_clear()
1156 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) { in _badblocks_clear()
1157 len = BB_OFFSET(p[prev + 1]) - bad.start; in _badblocks_clear()
1159 /* Clear non-bad range should be treated as successful */ in _badblocks_clear()
1166 /* Clear non-bad range should be treated as successful */ in _badblocks_clear()
1171 sectors -= len; in _badblocks_clear()
1173 if (sectors > 0) in _badblocks_clear()
1181 write_sequnlock_irq(&bb->lock); in _badblocks_clear()
1193 int prev = -1, hint = -1, set = 0; in _badblocks_check()
1195 int unacked_badblocks = 0; in _badblocks_check()
1196 int acked_badblocks = 0; in _badblocks_check()
1197 u64 *p = bb->page; in _badblocks_check()
1212 if ((prev >= 0) && in _badblocks_check()
1213 ((prev + 1) >= bb->count) && !overlap_front(bb, prev, &bad)) { in _badblocks_check()
1219 if ((prev >= 0) && overlap_front(bb, prev, &bad)) { in _badblocks_check()
1228 len = BB_END(p[prev]) - s; in _badblocks_check()
1230 if (set == 0) { in _badblocks_check()
1239 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) { in _badblocks_check()
1240 len = BB_OFFSET(p[prev + 1]) - bad.start; in _badblocks_check()
1250 sectors -= len; in _badblocks_check()
1252 if (sectors > 0) in _badblocks_check()
1255 WARN_ON(sectors < 0); in _badblocks_check()
1257 if (unacked_badblocks > 0) in _badblocks_check()
1258 rv = -1; in _badblocks_check()
1259 else if (acked_badblocks > 0) in _badblocks_check()
1262 rv = 0; in _badblocks_check()
1268 * badblocks_check() - check a given range for bad sectors
1277 * Entries in the bad-block table are 64bits wide. This comprises:
1278 * Length of bad-range, in sectors: 0-511 for lengths 1-512
1279 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
1282 * 'Acknowledged' flag - 1 bit. - the most significant bit.
1284 * Locking of the bad-block table uses a seqlock so badblocks_check
1290 * know if any block in the range is bad. So we binary-search
1291 * to the last range that starts at-or-before the given endpoint,
1296 * 0: there are no known bad blocks in the range
1298 * -1: there are bad blocks which have not yet been acknowledged in metadata.
1307 WARN_ON(bb->shift < 0 || sectors == 0); in badblocks_check()
1309 if (bb->shift > 0) { in badblocks_check()
1313 rounddown(s, 1 << bb->shift); in badblocks_check()
1314 roundup(target, 1 << bb->shift); in badblocks_check()
1315 sectors = target - s; in badblocks_check()
1319 seq = read_seqbegin(&bb->lock); in badblocks_check()
1321 if (read_seqretry(&bb->lock, seq)) in badblocks_check()
1329 * badblocks_set() - Add a range of bad blocks to the table.
1336 * can be merged. We binary-search to find the 'insertion' point, then
1352 * badblocks_clear() - Remove a range of bad blocks to the table.
1372 * ack_all_badblocks() - Acknowledge all bad blocks in a list.
1375 * This only succeeds if ->changed is clear. It is used by
1376 * in-kernel metadata updates
1380 if (bb->page == NULL || bb->changed) in ack_all_badblocks()
1383 write_seqlock_irq(&bb->lock); in ack_all_badblocks()
1385 if (bb->changed == 0 && bb->unacked_exist) { in ack_all_badblocks()
1386 u64 *p = bb->page; in ack_all_badblocks()
1389 for (i = 0; i < bb->count ; i++) { in ack_all_badblocks()
1398 for (i = 0; i < bb->count ; i++) in ack_all_badblocks()
1402 bb->unacked_exist = 0; in ack_all_badblocks()
1404 write_sequnlock_irq(&bb->lock); in ack_all_badblocks()
1409 * badblocks_show() - sysfs access to bad-blocks list
1421 u64 *p = bb->page; in badblocks_show()
1424 if (bb->shift < 0) in badblocks_show()
1425 return 0; in badblocks_show()
1428 seq = read_seqbegin(&bb->lock); in badblocks_show()
1430 len = 0; in badblocks_show()
1431 i = 0; in badblocks_show()
1433 while (len < PAGE_SIZE && i < bb->count) { in badblocks_show()
1443 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", in badblocks_show()
1444 (unsigned long long)s << bb->shift, in badblocks_show()
1445 length << bb->shift); in badblocks_show()
1447 if (unack && len == 0) in badblocks_show()
1448 bb->unacked_exist = 0; in badblocks_show()
1450 if (read_seqretry(&bb->lock, seq)) in badblocks_show()
1458 * badblocks_store() - sysfs access to bad-blocks list
1465 * Length of the buffer processed or -ve error.
1477 return -EINVAL; in badblocks_store()
1480 if (length <= 0) in badblocks_store()
1481 return -EINVAL; in badblocks_store()
1484 return -EINVAL; in badblocks_store()
1488 return -ENOSPC; in badblocks_store()
1497 bb->dev = dev; in __badblocks_init()
1498 bb->count = 0; in __badblocks_init()
1500 bb->shift = 0; in __badblocks_init()
1502 bb->shift = -1; in __badblocks_init()
1504 bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL); in __badblocks_init()
1506 bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL); in __badblocks_init()
1507 if (!bb->page) { in __badblocks_init()
1508 bb->shift = -1; in __badblocks_init()
1509 return -ENOMEM; in __badblocks_init()
1511 seqlock_init(&bb->lock); in __badblocks_init()
1513 return 0; in __badblocks_init()
1517 * badblocks_init() - initialize the badblocks structure
1522 * 0: success
1523 * -ve errno: on error
1534 return -EINVAL; in devm_init_badblocks()
1540 * badblocks_exit() - free the badblocks structure
1547 if (bb->dev) in badblocks_exit()
1548 devm_kfree(bb->dev, bb->page); in badblocks_exit()
1550 kfree(bb->page); in badblocks_exit()
1551 bb->page = NULL; in badblocks_exit()