Lines Matching +full:scrubber +full:- +full:done
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
37 return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER); in xchk_setup_ag_iallocbt()
40 /* Inode btree scrubber. */
57 * - The finobt need not have a record if all inodes in the inobt record are
59 * - The finobt need not have a record if all inodes in the inobt record are
61 * - The finobt need not have a record if the inobt record says this is a hole.
73 struct xfs_btree_cur *cur = sc->sa.fino_cur; in xchk_inobt_xref_finobt()
79 ASSERT(xfs_btree_is_fino(cur->bc_ops)); in xchk_inobt_xref_finobt()
89 return -EFSCORRUPTED; in xchk_inobt_xref_finobt()
95 frec_idx = agino - frec.ir_startino; in xchk_inobt_xref_finobt()
108 if (irec->ir_free == 0) in xchk_inobt_xref_finobt()
112 if (irec->ir_free == XFS_INOBT_ALL_FREE) in xchk_inobt_xref_finobt()
142 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT); in xchk_inobt_chunk_xref_finobt()
144 if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm)) in xchk_inobt_chunk_xref_finobt()
147 for (i = agino, rec_idx = agino - irec->ir_startino; in xchk_inobt_chunk_xref_finobt()
153 free = irec->ir_free & (1ULL << rec_idx); in xchk_inobt_chunk_xref_finobt()
155 hole = irec->ir_holemask & (1U << hole_idx); in xchk_inobt_chunk_xref_finobt()
158 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) in xchk_inobt_chunk_xref_finobt()
176 struct xfs_btree_cur *cur = sc->sa.ino_cur; in xchk_finobt_xref_inobt()
182 ASSERT(xfs_btree_is_ino(cur->bc_ops)); in xchk_finobt_xref_inobt()
192 return -EFSCORRUPTED; in xchk_finobt_xref_inobt()
198 rec_idx = agino - irec.ir_startino; in xchk_finobt_xref_inobt()
230 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT); in xchk_finobt_chunk_xref_inobt()
232 if (!sc->sa.ino_cur || xchk_skip_xref(sc->sm)) in xchk_finobt_chunk_xref_inobt()
235 for (i = agino, rec_idx = agino - frec->ir_startino; in xchk_finobt_chunk_xref_inobt()
241 ffree = frec->ir_free & (1ULL << rec_idx); in xchk_finobt_chunk_xref_inobt()
243 fhole = frec->ir_holemask & (1U << hole_idx); in xchk_finobt_chunk_xref_inobt()
246 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) in xchk_finobt_chunk_xref_inobt()
251 /* Is this chunk worth checking and cross-referencing? */
259 struct xfs_scrub *sc = bs->sc; in xchk_iallocbt_chunk()
260 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_iallocbt_chunk()
261 struct xfs_perag *pag = to_perag(bs->cur->bc_group); in xchk_iallocbt_chunk()
266 len = XFS_B_TO_FSB(mp, nr_inodes * mp->m_sb.sb_inodesize); in xchk_iallocbt_chunk()
269 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_chunk()
271 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_iallocbt_chunk()
275 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT) in xchk_iallocbt_chunk()
287 * record. First we try querying the in-core inode state, and if the inode
288 * isn't loaded we examine the on-disk inode directly.
297 * @dip is the on-disk inode.
313 if (xchk_should_terminate(bs->sc, &error)) in xchk_iallocbt_check_cluster_ifree()
320 agino = irec->ir_startino + irec_ino; in xchk_iallocbt_check_cluster_ifree()
321 fsino = xfs_agino_to_ino(to_perag(bs->cur->bc_group), agino); in xchk_iallocbt_check_cluster_ifree()
322 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino)); in xchk_iallocbt_check_cluster_ifree()
324 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || in xchk_iallocbt_check_cluster_ifree()
325 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) { in xchk_iallocbt_check_cluster_ifree()
326 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster_ifree()
330 error = xchk_inode_is_allocated(bs->sc, agino, &ino_inuse); in xchk_iallocbt_check_cluster_ifree()
331 if (error == -ENODATA) { in xchk_iallocbt_check_cluster_ifree()
333 freemask_ok = irec_free ^ !!(dip->di_mode); in xchk_iallocbt_check_cluster_ifree()
334 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok) in xchk_iallocbt_check_cluster_ifree()
335 return -EDEADLOCK; in xchk_iallocbt_check_cluster_ifree()
340 * The inode scrubber can deal with this. in xchk_iallocbt_check_cluster_ifree()
348 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster_ifree()
367 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_iallocbt_check_cluster()
377 M_IGEO(mp)->inodes_per_cluster); in xchk_iallocbt_check_cluster()
380 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base); in xchk_iallocbt_check_cluster()
396 ir_holemask = (irec->ir_holemask & cluster_mask); in xchk_iallocbt_check_cluster()
397 imap.im_blkno = xfs_agbno_to_daddr(to_perag(bs->cur->bc_group), agbno); in xchk_iallocbt_check_cluster()
398 imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); in xchk_iallocbt_check_cluster()
399 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) << in xchk_iallocbt_check_cluster()
400 mp->m_sb.sb_inodelog; in xchk_iallocbt_check_cluster()
404 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster()
408 trace_xchk_iallocbt_check_cluster(to_perag(bs->cur->bc_group), in xchk_iallocbt_check_cluster()
409 irec->ir_startino, imap.im_blkno, imap.im_len, in xchk_iallocbt_check_cluster()
411 XFS_INO_TO_OFFSET(mp, irec->ir_startino + in xchk_iallocbt_check_cluster()
416 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster()
422 xchk_xref_is_not_owned_by(bs->sc, agbno, in xchk_iallocbt_check_cluster()
423 M_IGEO(mp)->blocks_per_cluster, in xchk_iallocbt_check_cluster()
428 xchk_xref_is_only_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster, in xchk_iallocbt_check_cluster()
432 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp); in xchk_iallocbt_check_cluster()
433 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) in xchk_iallocbt_check_cluster()
440 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) { in xchk_iallocbt_check_cluster()
441 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster()
450 imap.im_boffset += mp->m_sb.sb_inodesize; in xchk_iallocbt_check_cluster()
453 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp); in xchk_iallocbt_check_cluster()
479 cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) { in xchk_iallocbt_check_clusters()
499 struct xfs_mount *mp = bs->sc->mp; in xchk_iallocbt_rec_alignment()
500 struct xchk_iallocbt *iabt = bs->private; in xchk_iallocbt_rec_alignment()
515 if (xfs_btree_is_fino(bs->cur->bc_ops)) { in xchk_iallocbt_rec_alignment()
519 igeo->cluster_align_inodes) - 1; in xchk_iallocbt_rec_alignment()
520 if (irec->ir_startino & imask) in xchk_iallocbt_rec_alignment()
521 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
525 if (iabt->next_startino != NULLAGINO) { in xchk_iallocbt_rec_alignment()
531 if (irec->ir_startino != iabt->next_startino) { in xchk_iallocbt_rec_alignment()
532 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
536 iabt->next_startino += XFS_INODES_PER_CHUNK; in xchk_iallocbt_rec_alignment()
538 /* Are we done with the cluster? */ in xchk_iallocbt_rec_alignment()
539 if (iabt->next_startino >= iabt->next_cluster_ino) { in xchk_iallocbt_rec_alignment()
540 iabt->next_startino = NULLAGINO; in xchk_iallocbt_rec_alignment()
541 iabt->next_cluster_ino = NULLAGINO; in xchk_iallocbt_rec_alignment()
547 if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) { in xchk_iallocbt_rec_alignment()
548 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
552 if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) { in xchk_iallocbt_rec_alignment()
553 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
557 if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK) in xchk_iallocbt_rec_alignment()
565 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK; in xchk_iallocbt_rec_alignment()
566 iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster; in xchk_iallocbt_rec_alignment()
575 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_iallocbt_rec()
576 struct xchk_iallocbt *iabt = bs->private; in xchk_iallocbt_rec()
586 if (xfs_inobt_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) { in xchk_iallocbt_rec()
587 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
594 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_iallocbt_rec()
597 iabt->inodes += irec.ir_count; in xchk_iallocbt_rec()
599 /* Handle non-sparse inodes */ in xchk_iallocbt_rec()
602 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
616 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
630 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
633 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_iallocbt_rec()
657 if (!sc->sa.ino_cur || !sc->sa.rmap_cur || in xchk_iallocbt_xref_rmap_btreeblks()
658 (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) || in xchk_iallocbt_xref_rmap_btreeblks()
659 xchk_skip_xref(sc->sm)) in xchk_iallocbt_xref_rmap_btreeblks()
663 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); in xchk_iallocbt_xref_rmap_btreeblks()
667 if (sc->sa.fino_cur) { in xchk_iallocbt_xref_rmap_btreeblks()
668 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); in xchk_iallocbt_xref_rmap_btreeblks()
673 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, in xchk_iallocbt_xref_rmap_btreeblks()
675 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) in xchk_iallocbt_xref_rmap_btreeblks()
678 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0); in xchk_iallocbt_xref_rmap_btreeblks()
694 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) in xchk_iallocbt_xref_rmap_inodes()
698 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, in xchk_iallocbt_xref_rmap_inodes()
700 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) in xchk_iallocbt_xref_rmap_inodes()
702 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize); in xchk_iallocbt_xref_rmap_inodes()
704 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); in xchk_iallocbt_xref_rmap_inodes()
720 switch (sc->sm->sm_type) { in xchk_iallocbt()
722 cur = sc->sa.ino_cur; in xchk_iallocbt()
725 cur = sc->sa.fino_cur; in xchk_iallocbt()
729 return -EIO; in xchk_iallocbt()
746 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT) in xchk_iallocbt()
763 if (!(*icur) || xchk_skip_xref(sc->sm)) in xchk_xref_inode_check()
780 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, in xchk_xref_is_not_inode_chunk()
782 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, in xchk_xref_is_not_inode_chunk()
793 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, in xchk_xref_is_inode_chunk()