1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <[email protected]>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_btree.h"
15 #include "xfs_rmap.h"
16 #include "xfs_refcount.h"
17 #include "xfs_inode.h"
18 #include "xfs_rtbitmap.h"
19 #include "xfs_rtgroup.h"
20 #include "xfs_metafile.h"
21 #include "xfs_rtrefcount_btree.h"
22 #include "xfs_rtalloc.h"
23 #include "scrub/scrub.h"
24 #include "scrub/common.h"
25 #include "scrub/btree.h"
26 #include "scrub/repair.h"
27
28 /* Set us up with the realtime refcount metadata locked. */
29 int
xchk_setup_rtrefcountbt(struct xfs_scrub * sc)30 xchk_setup_rtrefcountbt(
31 struct xfs_scrub *sc)
32 {
33 int error;
34
35 if (xchk_need_intent_drain(sc))
36 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
37
38 if (xchk_could_repair(sc)) {
39 error = xrep_setup_rtrefcountbt(sc);
40 if (error)
41 return error;
42 }
43
44 error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
45 if (error)
46 return error;
47
48 error = xchk_setup_rt(sc);
49 if (error)
50 return error;
51
52 error = xchk_install_live_inode(sc, rtg_refcount(sc->sr.rtg));
53 if (error)
54 return error;
55
56 return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
57 }
58
59 /* Realtime Reference count btree scrubber. */
60
61 /*
62 * Confirming Reference Counts via Reverse Mappings
63 *
64 * We want to count the reverse mappings overlapping a refcount record
65 * (bno, len, refcount), allowing for the possibility that some of the
66 * overlap may come from smaller adjoining reverse mappings, while some
67 * comes from single extents which overlap the range entirely. The
68 * outer loop is as follows:
69 *
70 * 1. For all reverse mappings overlapping the refcount extent,
71 * a. If a given rmap completely overlaps, mark it as seen.
72 * b. Otherwise, record the fragment (in agbno order) for later
73 * processing.
74 *
75 * Once we've seen all the rmaps, we know that for all blocks in the
76 * refcount record we want to find $refcount owners and we've already
77 * visited $seen extents that overlap all the blocks. Therefore, we
78 * need to find ($refcount - $seen) owners for every block in the
79 * extent; call that quantity $target_nr. Proceed as follows:
80 *
81 * 2. Pull the first $target_nr fragments from the list; all of them
82 * should start at or before the start of the extent.
83 * Call this subset of fragments the working set.
84 * 3. Until there are no more unprocessed fragments,
85 * a. Find the shortest fragments in the set and remove them.
86 * b. Note the block number of the end of these fragments.
87 * c. Pull the same number of fragments from the list. All of these
88 * fragments should start at the block number recorded in the
89 * previous step.
90 * d. Put those fragments in the set.
91 * 4. Check that there are $target_nr fragments remaining in the list,
92 * and that they all end at or beyond the end of the refcount extent.
93 *
94 * If the refcount is correct, all the check conditions in the algorithm
95 * should always hold true. If not, the refcount is incorrect.
96 */
97 struct xchk_rtrefcnt_frag {
98 struct list_head list;
99 struct xfs_rmap_irec rm;
100 };
101
102 struct xchk_rtrefcnt_check {
103 struct xfs_scrub *sc;
104 struct list_head fragments;
105
106 /* refcount extent we're examining */
107 xfs_rgblock_t bno;
108 xfs_extlen_t len;
109 xfs_nlink_t refcount;
110
111 /* number of owners seen */
112 xfs_nlink_t seen;
113 };
114
115 /*
116 * Decide if the given rmap is large enough that we can redeem it
117 * towards refcount verification now, or if it's a fragment, in
118 * which case we'll hang onto it in the hopes that we'll later
119 * discover that we've collected exactly the correct number of
120 * fragments as the rtrefcountbt says we should have.
121 */
122 STATIC int
xchk_rtrefcountbt_rmap_check(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)123 xchk_rtrefcountbt_rmap_check(
124 struct xfs_btree_cur *cur,
125 const struct xfs_rmap_irec *rec,
126 void *priv)
127 {
128 struct xchk_rtrefcnt_check *refchk = priv;
129 struct xchk_rtrefcnt_frag *frag;
130 xfs_rgblock_t rm_last;
131 xfs_rgblock_t rc_last;
132 int error = 0;
133
134 if (xchk_should_terminate(refchk->sc, &error))
135 return error;
136
137 rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
138 rc_last = refchk->bno + refchk->len - 1;
139
140 /* Confirm that a single-owner refc extent is a CoW stage. */
141 if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
142 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
143 return 0;
144 }
145
146 if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
147 /*
148 * The rmap overlaps the refcount record, so we can confirm
149 * one refcount owner seen.
150 */
151 refchk->seen++;
152 } else {
153 /*
154 * This rmap covers only part of the refcount record, so
155 * save the fragment for later processing. If the rmapbt
156 * is healthy each rmap_irec we see will be in agbno order
157 * so we don't need insertion sort here.
158 */
159 frag = kmalloc(sizeof(struct xchk_rtrefcnt_frag),
160 XCHK_GFP_FLAGS);
161 if (!frag)
162 return -ENOMEM;
163 memcpy(&frag->rm, rec, sizeof(frag->rm));
164 list_add_tail(&frag->list, &refchk->fragments);
165 }
166
167 return 0;
168 }
169
170 /*
171 * Given a bunch of rmap fragments, iterate through them, keeping
172 * a running tally of the refcount. If this ever deviates from
173 * what we expect (which is the rtrefcountbt's refcount minus the
174 * number of extents that totally covered the rtrefcountbt extent),
175 * we have a rtrefcountbt error.
176 */
177 STATIC void
xchk_rtrefcountbt_process_rmap_fragments(struct xchk_rtrefcnt_check * refchk)178 xchk_rtrefcountbt_process_rmap_fragments(
179 struct xchk_rtrefcnt_check *refchk)
180 {
181 struct list_head worklist;
182 struct xchk_rtrefcnt_frag *frag;
183 struct xchk_rtrefcnt_frag *n;
184 xfs_rgblock_t bno;
185 xfs_rgblock_t rbno;
186 xfs_rgblock_t next_rbno;
187 xfs_nlink_t nr;
188 xfs_nlink_t target_nr;
189
190 target_nr = refchk->refcount - refchk->seen;
191 if (target_nr == 0)
192 return;
193
194 /*
195 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
196 * references we haven't found yet. Pull that many off the
197 * fragment list and figure out where the smallest rmap ends
198 * (and therefore the next rmap should start). All the rmaps
199 * we pull off should start at or before the beginning of the
200 * refcount record's range.
201 */
202 INIT_LIST_HEAD(&worklist);
203 rbno = NULLRGBLOCK;
204
205 /* Make sure the fragments actually /are/ in bno order. */
206 bno = 0;
207 list_for_each_entry(frag, &refchk->fragments, list) {
208 if (frag->rm.rm_startblock < bno)
209 goto done;
210 bno = frag->rm.rm_startblock;
211 }
212
213 /*
214 * Find all the rmaps that start at or before the refc extent,
215 * and put them on the worklist.
216 */
217 nr = 0;
218 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
219 if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
220 break;
221 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
222 if (bno < rbno)
223 rbno = bno;
224 list_move_tail(&frag->list, &worklist);
225 nr++;
226 }
227
228 /*
229 * We should have found exactly $target_nr rmap fragments starting
230 * at or before the refcount extent.
231 */
232 if (nr != target_nr)
233 goto done;
234
235 while (!list_empty(&refchk->fragments)) {
236 /* Discard any fragments ending at rbno from the worklist. */
237 nr = 0;
238 next_rbno = NULLRGBLOCK;
239 list_for_each_entry_safe(frag, n, &worklist, list) {
240 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
241 if (bno != rbno) {
242 if (bno < next_rbno)
243 next_rbno = bno;
244 continue;
245 }
246 list_del(&frag->list);
247 kfree(frag);
248 nr++;
249 }
250
251 /* Try to add nr rmaps starting at rbno to the worklist. */
252 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
253 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
254 if (frag->rm.rm_startblock != rbno)
255 goto done;
256 list_move_tail(&frag->list, &worklist);
257 if (next_rbno > bno)
258 next_rbno = bno;
259 nr--;
260 if (nr == 0)
261 break;
262 }
263
264 /*
265 * If we get here and nr > 0, this means that we added fewer
266 * items to the worklist than we discarded because the fragment
267 * list ran out of items. Therefore, we cannot maintain the
268 * required refcount. Something is wrong, so we're done.
269 */
270 if (nr)
271 goto done;
272
273 rbno = next_rbno;
274 }
275
276 /*
277 * Make sure the last extent we processed ends at or beyond
278 * the end of the refcount extent.
279 */
280 if (rbno < refchk->bno + refchk->len)
281 goto done;
282
283 /* Actually record us having seen the remaining refcount. */
284 refchk->seen = refchk->refcount;
285 done:
286 /* Delete fragments and work list. */
287 list_for_each_entry_safe(frag, n, &worklist, list) {
288 list_del(&frag->list);
289 kfree(frag);
290 }
291 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
292 list_del(&frag->list);
293 kfree(frag);
294 }
295 }
296
297 /* Use the rmap entries covering this extent to verify the refcount. */
298 STATIC void
xchk_rtrefcountbt_xref_rmap(struct xfs_scrub * sc,const struct xfs_refcount_irec * irec)299 xchk_rtrefcountbt_xref_rmap(
300 struct xfs_scrub *sc,
301 const struct xfs_refcount_irec *irec)
302 {
303 struct xchk_rtrefcnt_check refchk = {
304 .sc = sc,
305 .bno = irec->rc_startblock,
306 .len = irec->rc_blockcount,
307 .refcount = irec->rc_refcount,
308 .seen = 0,
309 };
310 struct xfs_rmap_irec low;
311 struct xfs_rmap_irec high;
312 struct xchk_rtrefcnt_frag *frag;
313 struct xchk_rtrefcnt_frag *n;
314 int error;
315
316 if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
317 return;
318
319 /* Cross-reference with the rmapbt to confirm the refcount. */
320 memset(&low, 0, sizeof(low));
321 low.rm_startblock = irec->rc_startblock;
322 memset(&high, 0xFF, sizeof(high));
323 high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1;
324
325 INIT_LIST_HEAD(&refchk.fragments);
326 error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
327 xchk_rtrefcountbt_rmap_check, &refchk);
328 if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
329 goto out_free;
330
331 xchk_rtrefcountbt_process_rmap_fragments(&refchk);
332 if (irec->rc_refcount != refchk.seen)
333 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
334
335 out_free:
336 list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
337 list_del(&frag->list);
338 kfree(frag);
339 }
340 }
341
342 /* Cross-reference with the other btrees. */
343 STATIC void
xchk_rtrefcountbt_xref(struct xfs_scrub * sc,const struct xfs_refcount_irec * irec)344 xchk_rtrefcountbt_xref(
345 struct xfs_scrub *sc,
346 const struct xfs_refcount_irec *irec)
347 {
348 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
349 return;
350
351 xchk_xref_is_used_rt_space(sc,
352 xfs_rgbno_to_rtb(sc->sr.rtg, irec->rc_startblock),
353 irec->rc_blockcount);
354 xchk_rtrefcountbt_xref_rmap(sc, irec);
355 }
356
357 struct xchk_rtrefcbt_records {
358 /* Previous refcount record. */
359 struct xfs_refcount_irec prev_rec;
360
361 /* The next rtgroup block where we aren't expecting shared extents. */
362 xfs_rgblock_t next_unshared_rgbno;
363
364 /* Number of CoW blocks we expect. */
365 xfs_extlen_t cow_blocks;
366
367 /* Was the last record a shared or CoW staging extent? */
368 enum xfs_refc_domain prev_domain;
369 };
370
371 static inline bool
xchk_rtrefcount_mergeable(struct xchk_rtrefcbt_records * rrc,const struct xfs_refcount_irec * r2)372 xchk_rtrefcount_mergeable(
373 struct xchk_rtrefcbt_records *rrc,
374 const struct xfs_refcount_irec *r2)
375 {
376 const struct xfs_refcount_irec *r1 = &rrc->prev_rec;
377
378 /* Ignore if prev_rec is not yet initialized. */
379 if (r1->rc_blockcount > 0)
380 return false;
381
382 if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock)
383 return false;
384 if (r1->rc_refcount != r2->rc_refcount)
385 return false;
386 if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
387 XFS_REFC_LEN_MAX)
388 return false;
389
390 return true;
391 }
392
393 /* Flag failures for records that could be merged. */
394 STATIC void
xchk_rtrefcountbt_check_mergeable(struct xchk_btree * bs,struct xchk_rtrefcbt_records * rrc,const struct xfs_refcount_irec * irec)395 xchk_rtrefcountbt_check_mergeable(
396 struct xchk_btree *bs,
397 struct xchk_rtrefcbt_records *rrc,
398 const struct xfs_refcount_irec *irec)
399 {
400 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
401 return;
402
403 if (xchk_rtrefcount_mergeable(rrc, irec))
404 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
405
406 memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec));
407 }
408
409 STATIC int
xchk_rtrefcountbt_rmap_check_gap(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)410 xchk_rtrefcountbt_rmap_check_gap(
411 struct xfs_btree_cur *cur,
412 const struct xfs_rmap_irec *rec,
413 void *priv)
414 {
415 xfs_rgblock_t *next_bno = priv;
416
417 if (*next_bno != NULLRGBLOCK && rec->rm_startblock < *next_bno)
418 return -ECANCELED;
419
420 *next_bno = rec->rm_startblock + rec->rm_blockcount;
421 return 0;
422 }
423
424 /*
425 * Make sure that a gap in the reference count records does not correspond to
426 * overlapping records (i.e. shared extents) in the reverse mappings.
427 */
428 static inline void
xchk_rtrefcountbt_xref_gaps(struct xfs_scrub * sc,struct xchk_rtrefcbt_records * rrc,xfs_rtblock_t bno)429 xchk_rtrefcountbt_xref_gaps(
430 struct xfs_scrub *sc,
431 struct xchk_rtrefcbt_records *rrc,
432 xfs_rtblock_t bno)
433 {
434 struct xfs_rmap_irec low;
435 struct xfs_rmap_irec high;
436 xfs_rgblock_t next_bno = NULLRGBLOCK;
437 int error;
438
439 if (bno <= rrc->next_unshared_rgbno || !sc->sr.rmap_cur ||
440 xchk_skip_xref(sc->sm))
441 return;
442
443 memset(&low, 0, sizeof(low));
444 low.rm_startblock = rrc->next_unshared_rgbno;
445 memset(&high, 0xFF, sizeof(high));
446 high.rm_startblock = bno - 1;
447
448 error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
449 xchk_rtrefcountbt_rmap_check_gap, &next_bno);
450 if (error == -ECANCELED)
451 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
452 else
453 xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur);
454 }
455
456 /* Scrub a rtrefcountbt record. */
457 STATIC int
xchk_rtrefcountbt_rec(struct xchk_btree * bs,const union xfs_btree_rec * rec)458 xchk_rtrefcountbt_rec(
459 struct xchk_btree *bs,
460 const union xfs_btree_rec *rec)
461 {
462 struct xfs_mount *mp = bs->cur->bc_mp;
463 struct xchk_rtrefcbt_records *rrc = bs->private;
464 struct xfs_refcount_irec irec;
465 u32 mod;
466
467 xfs_refcount_btrec_to_irec(rec, &irec);
468 if (xfs_rtrefcount_check_irec(to_rtg(bs->cur->bc_group), &irec) !=
469 NULL) {
470 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
471 return 0;
472 }
473
474 /* We can only share full rt extents. */
475 mod = xfs_rgbno_to_rtxoff(mp, irec.rc_startblock);
476 if (mod)
477 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
478 mod = xfs_extlen_to_rtxmod(mp, irec.rc_blockcount);
479 if (mod)
480 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
481
482 if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
483 rrc->cow_blocks += irec.rc_blockcount;
484
485 /* Shared records always come before CoW records. */
486 if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
487 rrc->prev_domain == XFS_REFC_DOMAIN_COW)
488 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
489 rrc->prev_domain = irec.rc_domain;
490
491 xchk_rtrefcountbt_check_mergeable(bs, rrc, &irec);
492 xchk_rtrefcountbt_xref(bs->sc, &irec);
493
494 /*
495 * If this is a record for a shared extent, check that all blocks
496 * between the previous record and this one have at most one reverse
497 * mapping.
498 */
499 if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
500 xchk_rtrefcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
501 rrc->next_unshared_rgbno = irec.rc_startblock +
502 irec.rc_blockcount;
503 }
504
505 return 0;
506 }
507
508 /* Make sure we have as many refc blocks as the rmap says. */
509 STATIC void
xchk_refcount_xref_rmap(struct xfs_scrub * sc,const struct xfs_owner_info * btree_oinfo,xfs_extlen_t cow_blocks)510 xchk_refcount_xref_rmap(
511 struct xfs_scrub *sc,
512 const struct xfs_owner_info *btree_oinfo,
513 xfs_extlen_t cow_blocks)
514 {
515 xfs_filblks_t refcbt_blocks = 0;
516 xfs_filblks_t blocks;
517 int error;
518
519 if (!sc->sr.rmap_cur || !sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
520 return;
521
522 /* Check that we saw as many refcbt blocks as the rmap knows about. */
523 error = xfs_btree_count_blocks(sc->sr.refc_cur, &refcbt_blocks);
524 if (!xchk_btree_process_error(sc, sc->sr.refc_cur, 0, &error))
525 return;
526 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, btree_oinfo,
527 &blocks);
528 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
529 return;
530 if (blocks != refcbt_blocks)
531 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
532
533 /* Check that we saw as many cow blocks as the rmap knows about. */
534 error = xchk_count_rmap_ownedby_ag(sc, sc->sr.rmap_cur,
535 &XFS_RMAP_OINFO_COW, &blocks);
536 if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
537 return;
538 if (blocks != cow_blocks)
539 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
540 }
541
542 /* Scrub the refcount btree for some AG. */
543 int
xchk_rtrefcountbt(struct xfs_scrub * sc)544 xchk_rtrefcountbt(
545 struct xfs_scrub *sc)
546 {
547 struct xfs_owner_info btree_oinfo;
548 struct xchk_rtrefcbt_records rrc = {
549 .cow_blocks = 0,
550 .next_unshared_rgbno = 0,
551 .prev_domain = XFS_REFC_DOMAIN_SHARED,
552 };
553 int error;
554
555 error = xchk_metadata_inode_forks(sc);
556 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
557 return error;
558
559 xfs_rmap_ino_bmbt_owner(&btree_oinfo, rtg_refcount(sc->sr.rtg)->i_ino,
560 XFS_DATA_FORK);
561 error = xchk_btree(sc, sc->sr.refc_cur, xchk_rtrefcountbt_rec,
562 &btree_oinfo, &rrc);
563 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
564 return error;
565
566 /*
567 * Check that all blocks between the last refcount > 1 record and the
568 * end of the rt volume have at most one reverse mapping.
569 */
570 xchk_rtrefcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_rblocks);
571
572 xchk_refcount_xref_rmap(sc, &btree_oinfo, rrc.cow_blocks);
573
574 return 0;
575 }
576
577 /* xref check that a cow staging extent is marked in the rtrefcountbt. */
578 void
xchk_xref_is_rt_cow_staging(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)579 xchk_xref_is_rt_cow_staging(
580 struct xfs_scrub *sc,
581 xfs_rgblock_t bno,
582 xfs_extlen_t len)
583 {
584 struct xfs_refcount_irec rc;
585 int has_refcount;
586 int error;
587
588 if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
589 return;
590
591 /* Find the CoW staging extent. */
592 error = xfs_refcount_lookup_le(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
593 bno, &has_refcount);
594 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
595 return;
596 if (!has_refcount) {
597 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
598 return;
599 }
600
601 error = xfs_refcount_get_rec(sc->sr.refc_cur, &rc, &has_refcount);
602 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
603 return;
604 if (!has_refcount) {
605 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
606 return;
607 }
608
609 /* CoW lookup returned a shared extent record? */
610 if (rc.rc_domain != XFS_REFC_DOMAIN_COW)
611 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
612
613 /* Must be at least as long as what was passed in */
614 if (rc.rc_blockcount < len)
615 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
616 }
617
618 /*
619 * xref check that the extent is not shared. Only file data blocks
620 * can have multiple owners.
621 */
622 void
xchk_xref_is_not_rt_shared(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)623 xchk_xref_is_not_rt_shared(
624 struct xfs_scrub *sc,
625 xfs_rgblock_t bno,
626 xfs_extlen_t len)
627 {
628 enum xbtree_recpacking outcome;
629 int error;
630
631 if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
632 return;
633
634 error = xfs_refcount_has_records(sc->sr.refc_cur,
635 XFS_REFC_DOMAIN_SHARED, bno, len, &outcome);
636 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
637 return;
638 if (outcome != XBTREE_RECPACKING_EMPTY)
639 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
640 }
641
642 /* xref check that the extent is not being used for CoW staging. */
643 void
xchk_xref_is_not_rt_cow_staging(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)644 xchk_xref_is_not_rt_cow_staging(
645 struct xfs_scrub *sc,
646 xfs_rgblock_t bno,
647 xfs_extlen_t len)
648 {
649 enum xbtree_recpacking outcome;
650 int error;
651
652 if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
653 return;
654
655 error = xfs_refcount_has_records(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
656 bno, len, &outcome);
657 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
658 return;
659 if (outcome != XBTREE_RECPACKING_EMPTY)
660 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
661 }
662