From 739a2fe0428f24c11fe652252c2f19ef7a697209 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 18:59:56 -0700 Subject: xfs: fix author and spdx headers on scrub/ files Fix the spdx tags to match current practice, and update the author contact information. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/scrub/bitmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/xfs/scrub/bitmap.c') diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index a255f09e9f0a..55b2af10aae9 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -1,7 +1,7 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Oracle. All Rights Reserved. - * Author: Darrick J. Wong + * Author: Darrick J. Wong */ #include "xfs.h" #include "xfs_fs.h" -- cgit v1.2.3 From ecc73f8a58c7844b04186726f8699ba97cec2ef9 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 18:59:57 -0700 Subject: xfs: update copyright years for scrub/ files Update the copyright years in the scrub/ source code files. This isn't required, but it's helpful to remind myself just how long it's taken to develop this feature. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/scrub/agheader.c | 2 +- fs/xfs/scrub/agheader_repair.c | 2 +- fs/xfs/scrub/alloc.c | 2 +- fs/xfs/scrub/attr.c | 2 +- fs/xfs/scrub/attr.h | 2 +- fs/xfs/scrub/bitmap.c | 2 +- fs/xfs/scrub/bitmap.h | 2 +- fs/xfs/scrub/bmap.c | 2 +- fs/xfs/scrub/btree.c | 2 +- fs/xfs/scrub/btree.h | 2 +- fs/xfs/scrub/common.c | 2 +- fs/xfs/scrub/common.h | 2 +- fs/xfs/scrub/dabtree.c | 2 +- fs/xfs/scrub/dabtree.h | 2 +- fs/xfs/scrub/dir.c | 2 +- fs/xfs/scrub/fscounters.c | 2 +- fs/xfs/scrub/health.c | 2 +- fs/xfs/scrub/health.h | 2 +- fs/xfs/scrub/ialloc.c | 2 +- fs/xfs/scrub/inode.c | 2 +- fs/xfs/scrub/parent.c | 2 +- fs/xfs/scrub/quota.c | 2 +- fs/xfs/scrub/refcount.c | 2 +- fs/xfs/scrub/repair.c | 2 +- fs/xfs/scrub/repair.h | 2 +- fs/xfs/scrub/rmap.c | 2 +- fs/xfs/scrub/rtbitmap.c | 2 +- fs/xfs/scrub/scrub.c | 2 +- fs/xfs/scrub/scrub.h | 2 +- fs/xfs/scrub/symlink.c | 2 +- fs/xfs/scrub/trace.c | 2 +- fs/xfs/scrub/trace.h | 2 +- fs/xfs/scrub/xfs_scrub.h | 2 +- 33 files changed, 33 insertions(+), 33 deletions(-) (limited to 'fs/xfs/scrub/bitmap.c') diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c index ad8c592e11cf..c91819da1f5f 100644 --- a/fs/xfs/scrub/agheader.c +++ b/fs/xfs/scrub/agheader.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index 703e27cec327..edfb1dfb80a9 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2018 Oracle. All Rights Reserved. + * Copyright (C) 2018-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c index 16c4d57992b9..39e79b9536bc 100644 --- a/fs/xfs/scrub/alloc.c +++ b/fs/xfs/scrub/alloc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c index e0ed2123b5e2..5573be3a3dfe 100644 --- a/fs/xfs/scrub/attr.c +++ b/fs/xfs/scrub/attr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/attr.h b/fs/xfs/scrub/attr.h index f9680cb02a30..bc6321552251 100644 --- a/fs/xfs/scrub/attr.h +++ b/fs/xfs/scrub/attr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Copyright (C) 2019 Oracle. All Rights Reserved. + * Copyright (C) 2019-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_ATTR_H__ diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index 55b2af10aae9..ce8b17d76c0b 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2018 Oracle. All Rights Reserved. + * Copyright (C) 2018-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index c5db165d397e..85ec0e2792c5 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2018 Oracle. All Rights Reserved. + * Copyright (C) 2018-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_BITMAP_H__ diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index d7a941936ceb..f6d8cb938a02 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c index 1a32b1fb75a2..e54c1cfe64bf 100644 --- a/fs/xfs/scrub/btree.c +++ b/fs/xfs/scrub/btree.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h index 1e3093d340c1..70461885c6c7 100644 --- a/fs/xfs/scrub/btree.h +++ b/fs/xfs/scrub/btree.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_BTREE_H__ diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c index 6eec71f92310..597e6aca8628 100644 --- a/fs/xfs/scrub/common.c +++ b/fs/xfs/scrub/common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h index 5fc0faeef18b..273a4331da05 100644 --- a/fs/xfs/scrub/common.h +++ b/fs/xfs/scrub/common.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_COMMON_H__ diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c index 245971a7bb1d..c392c0765e5c 100644 --- a/fs/xfs/scrub/dabtree.c +++ b/fs/xfs/scrub/dabtree.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/dabtree.h b/fs/xfs/scrub/dabtree.h index a7ac9bf16db9..4f8c2138a1ec 100644 --- a/fs/xfs/scrub/dabtree.h +++ b/fs/xfs/scrub/dabtree.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_DABTREE_H__ diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c index 24c6a967c67d..b6081a3e1b91 100644 --- a/fs/xfs/scrub/dir.c +++ b/fs/xfs/scrub/dir.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index df57e0314b46..a38006c71bff 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright (C) 2019 Oracle. All Rights Reserved. + * Copyright (C) 2019-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c index d416b8701c9a..66e99b0f6049 100644 --- a/fs/xfs/scrub/health.c +++ b/fs/xfs/scrub/health.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2019 Oracle. All Rights Reserved. + * Copyright (C) 2019-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/health.h b/fs/xfs/scrub/health.h index 2ef83db88a72..66a273f8585b 100644 --- a/fs/xfs/scrub/health.h +++ b/fs/xfs/scrub/health.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2019 Oracle. All Rights Reserved. + * Copyright (C) 2019-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_HEALTH_H__ diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c index 9d5a51b4af4b..b14270bd1c62 100644 --- a/fs/xfs/scrub/ialloc.c +++ b/fs/xfs/scrub/ialloc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c index 0d8d697ca265..dc66a1465f1b 100644 --- a/fs/xfs/scrub/inode.c +++ b/fs/xfs/scrub/inode.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c index c641b7d55a1d..d1db18250ee3 100644 --- a/fs/xfs/scrub/parent.c +++ b/fs/xfs/scrub/parent.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c index a79e4c2cbd61..b019c70c065a 100644 --- a/fs/xfs/scrub/quota.c +++ b/fs/xfs/scrub/quota.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c index 2db52a56c38e..a5005b1d010d 100644 --- a/fs/xfs/scrub/refcount.c +++ b/fs/xfs/scrub/refcount.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 0b740f533959..b800341aae69 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2018 Oracle. All Rights Reserved. + * Copyright (C) 2018-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index b86cdfe506d8..4fbb52228c48 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2018 Oracle. All Rights Reserved. + * Copyright (C) 2018-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_REPAIR_H__ diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c index 2b16c9192447..4dc79e1a675d 100644 --- a/fs/xfs/scrub/rmap.c +++ b/fs/xfs/scrub/rmap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c index 924a45778a0f..e7dace7b4be8 100644 --- a/fs/xfs/scrub/rtbitmap.c +++ b/fs/xfs/scrub/rtbitmap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 67dcc5efcbb1..e8e2bee001e5 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h index d72f2ccda091..72a5a8a64a87 100644 --- a/fs/xfs/scrub/scrub.h +++ b/fs/xfs/scrub/scrub.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_SCRUB_H__ diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c index 624f5e864c6f..38708fb9a5d7 100644 --- a/fs/xfs/scrub/symlink.c +++ b/fs/xfs/scrub/symlink.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c index 315f872e1c91..0a975439d2b6 100644 --- a/fs/xfs/scrub/trace.c +++ b/fs/xfs/scrub/trace.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #include "xfs.h" diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 9679ef7c3f01..81f7c3051a1a 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong * * NOTE: none of these tracepoints shall be considered a stable kernel ABI diff --git a/fs/xfs/scrub/xfs_scrub.h b/fs/xfs/scrub/xfs_scrub.h index 76c209c74fff..a39befa743ce 100644 --- a/fs/xfs/scrub/xfs_scrub.h +++ b/fs/xfs/scrub/xfs_scrub.h @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright (C) 2017 Oracle. All Rights Reserved. + * Copyright (C) 2017-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong */ #ifndef __XFS_SCRUB_H__ -- cgit v1.2.3 From 178b48d588ea5424a54423dc9c406416de0547c8 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 19:00:35 -0700 Subject: xfs: remove the for_each_xbitmap_ helpers Remove the for_each_xbitmap_ macros in favor of proper iterator functions. We'll soon be switching this data structure over to an interval tree implementation, which means that we can't allow callers to modify the bitmap during iteration without telling us. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/scrub/agheader_repair.c | 89 +++++++++++++++++++---------------- fs/xfs/scrub/bitmap.c | 59 +++++++++++++++++++++++ fs/xfs/scrub/bitmap.h | 22 ++++++--- fs/xfs/scrub/repair.c | 104 ++++++++++++++++++++++------------------- 4 files changed, 180 insertions(+), 94 deletions(-) (limited to 'fs/xfs/scrub/bitmap.c') diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index edfb1dfb80a9..997ddcd1f124 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -487,10 +487,11 @@ xrep_agfl_walk_rmap( /* Strike out the blocks that are cross-linked according to the rmapbt. */ STATIC int xrep_agfl_check_extent( - struct xrep_agfl *ra, uint64_t start, - uint64_t len) + uint64_t len, + void *priv) { + struct xrep_agfl *ra = priv; xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(ra->sc->mp, start); xfs_agblock_t last_agbno = agbno + len - 1; int error; @@ -538,7 +539,6 @@ xrep_agfl_collect_blocks( struct xrep_agfl ra; struct xfs_mount *mp = sc->mp; struct xfs_btree_cur *cur; - struct xbitmap_range *br, *n; int error; ra.sc = sc; @@ -579,11 +579,7 @@ xrep_agfl_collect_blocks( /* Strike out the blocks that are cross-linked. */ ra.rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); - for_each_xbitmap_extent(br, n, agfl_extents) { - error = xrep_agfl_check_extent(&ra, br->start, br->len); - if (error) - break; - } + error = xbitmap_walk(agfl_extents, xrep_agfl_check_extent, &ra); xfs_btree_del_cursor(ra.rmap_cur, error); if (error) goto out_bmp; @@ -629,6 +625,43 @@ xrep_agfl_update_agf( XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); } +struct xrep_agfl_fill { + struct xbitmap used_extents; + struct xfs_scrub *sc; + __be32 *agfl_bno; + xfs_agblock_t flcount; + unsigned int fl_off; +}; + +/* Fill the AGFL with whatever blocks are in this extent. */ +static int +xrep_agfl_fill( + uint64_t start, + uint64_t len, + void *priv) +{ + struct xrep_agfl_fill *af = priv; + struct xfs_scrub *sc = af->sc; + xfs_fsblock_t fsbno = start; + int error; + + while (fsbno < start + len && af->fl_off < af->flcount) + af->agfl_bno[af->fl_off++] = + cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++)); + + trace_xrep_agfl_insert(sc->mp, sc->sa.pag->pag_agno, + XFS_FSB_TO_AGBNO(sc->mp, start), len); + + error = xbitmap_set(&af->used_extents, start, fsbno - 1); + if (error) + return error; + + if (af->fl_off == af->flcount) + return -ECANCELED; + + return 0; +} + /* Write out a totally new AGFL. */ STATIC void xrep_agfl_init_header( @@ -637,13 +670,12 @@ xrep_agfl_init_header( struct xbitmap *agfl_extents, xfs_agblock_t flcount) { + struct xrep_agfl_fill af = { + .sc = sc, + .flcount = flcount, + }; struct xfs_mount *mp = sc->mp; - __be32 *agfl_bno; - struct xbitmap_range *br; - struct xbitmap_range *n; struct xfs_agfl *agfl; - xfs_agblock_t agbno; - unsigned int fl_off; ASSERT(flcount <= xfs_agfl_size(mp)); @@ -662,36 +694,15 @@ xrep_agfl_init_header( * blocks than fit in the AGFL, they will be freed in a subsequent * step. */ - fl_off = 0; - agfl_bno = xfs_buf_to_agfl_bno(agfl_bp); - for_each_xbitmap_extent(br, n, agfl_extents) { - agbno = XFS_FSB_TO_AGBNO(mp, br->start); - - trace_xrep_agfl_insert(mp, sc->sa.pag->pag_agno, agbno, - br->len); - - while (br->len > 0 && fl_off < flcount) { - agfl_bno[fl_off] = cpu_to_be32(agbno); - fl_off++; - agbno++; - - /* - * We've now used br->start by putting it in the AGFL, - * so bump br so that we don't reap the block later. - */ - br->start++; - br->len--; - } - - if (br->len) - break; - list_del(&br->list); - kfree(br); - } + xbitmap_init(&af.used_extents); + af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp), + xbitmap_walk(agfl_extents, xrep_agfl_fill, &af); + xbitmap_disunion(agfl_extents, &af.used_extents); /* Write new AGFL to disk. */ xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF); xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1); + xbitmap_destroy(&af.used_extents); } /* Repair the AGFL. */ diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index ce8b17d76c0b..315b7c5844a3 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -13,6 +13,9 @@ #include "scrub/scrub.h" #include "scrub/bitmap.h" +#define for_each_xbitmap_extent(bex, n, bitmap) \ + list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) + /* * Set a range of this bitmap. Caller must ensure the range is not set. * @@ -313,3 +316,59 @@ xbitmap_hweight( return ret; } + +/* Call a function for every run of set bits in this bitmap. */ +int +xbitmap_walk( + struct xbitmap *bitmap, + xbitmap_walk_fn fn, + void *priv) +{ + struct xbitmap_range *bex, *n; + int error = 0; + + for_each_xbitmap_extent(bex, n, bitmap) { + error = fn(bex->start, bex->len, priv); + if (error) + break; + } + + return error; +} + +struct xbitmap_walk_bits { + xbitmap_walk_bits_fn fn; + void *priv; +}; + +/* Walk all the bits in a run. */ +static int +xbitmap_walk_bits_in_run( + uint64_t start, + uint64_t len, + void *priv) +{ + struct xbitmap_walk_bits *wb = priv; + uint64_t i; + int error = 0; + + for (i = start; i < start + len; i++) { + error = wb->fn(i, wb->priv); + if (error) + break; + } + + return error; +} + +/* Call a function for every set bit in this bitmap. */ +int +xbitmap_walk_bits( + struct xbitmap *bitmap, + xbitmap_walk_bits_fn fn, + void *priv) +{ + struct xbitmap_walk_bits wb = {.fn = fn, .priv = priv}; + + return xbitmap_walk(bitmap, xbitmap_walk_bits_in_run, &wb); +} diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index 85ec0e2792c5..01e37173dc34 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -19,13 +19,6 @@ struct xbitmap { void xbitmap_init(struct xbitmap *bitmap); void xbitmap_destroy(struct xbitmap *bitmap); -#define for_each_xbitmap_extent(bex, n, bitmap) \ - list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) - -#define for_each_xbitmap_block(b, bex, n, bitmap) \ - list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \ - for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++) - int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len); int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub); int xbitmap_set_btcur_path(struct xbitmap *bitmap, @@ -34,4 +27,19 @@ int xbitmap_set_btblocks(struct xbitmap *bitmap, struct xfs_btree_cur *cur); uint64_t xbitmap_hweight(struct xbitmap *bitmap); +/* + * Return codes for the bitmap iterator functions are 0 to continue iterating, + * and non-zero to stop iterating. Any non-zero value will be passed up to the + * iteration caller. The special value -ECANCELED can be used to stop + * iteration, because neither bitmap iterator ever generates that error code on + * its own. Callers must not modify the bitmap while walking it. + */ +typedef int (*xbitmap_walk_fn)(uint64_t start, uint64_t len, void *priv); +int xbitmap_walk(struct xbitmap *bitmap, xbitmap_walk_fn fn, + void *priv); + +typedef int (*xbitmap_walk_bits_fn)(uint64_t bit, void *priv); +int xbitmap_walk_bits(struct xbitmap *bitmap, xbitmap_walk_bits_fn fn, + void *priv); + #endif /* __XFS_SCRUB_BITMAP_H__ */ diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index ab0758308f57..ac6d8803e660 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -445,6 +445,30 @@ xrep_init_btblock( * buffers associated with @bitmap. */ +static int +xrep_invalidate_block( + uint64_t fsbno, + void *priv) +{ + struct xfs_scrub *sc = priv; + struct xfs_buf *bp; + int error; + + /* Skip AG headers and post-EOFS blocks */ + if (!xfs_verify_fsbno(sc->mp, fsbno)) + return 0; + + error = xfs_buf_incore(sc->mp->m_ddev_targp, + XFS_FSB_TO_DADDR(sc->mp, fsbno), + XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); + if (error) + return 0; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); + return 0; +} + /* * Invalidate buffers for per-AG btree blocks we're dumping. This function * is not intended for use with file data repairs; we have bunmapi for that. @@ -454,11 +478,6 @@ xrep_invalidate_blocks( struct xfs_scrub *sc, struct xbitmap *bitmap) { - struct xbitmap_range *bmr; - struct xbitmap_range *n; - struct xfs_buf *bp; - xfs_fsblock_t fsbno; - /* * For each block in each extent, see if there's an incore buffer for * exactly that block; if so, invalidate it. The buffer cache only @@ -467,23 +486,7 @@ xrep_invalidate_blocks( * because we never own those; and if we can't TRYLOCK the buffer we * assume it's owned by someone else. */ - for_each_xbitmap_block(fsbno, bmr, n, bitmap) { - int error; - - /* Skip AG headers and post-EOFS blocks */ - if (!xfs_verify_fsbno(sc->mp, fsbno)) - continue; - error = xfs_buf_incore(sc->mp->m_ddev_targp, - XFS_FSB_TO_DADDR(sc->mp, fsbno), - XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); - if (error) - continue; - - xfs_trans_bjoin(sc->tp, bp); - xfs_trans_binval(sc->tp, bp); - } - - return 0; + return xbitmap_walk_bits(bitmap, xrep_invalidate_block, sc); } /* Ensure the freelist is the correct size. */ @@ -504,6 +507,15 @@ xrep_fix_freelist( can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK); } +/* Information about reaping extents after a repair. */ +struct xrep_reap_state { + struct xfs_scrub *sc; + + /* Reverse mapping owner and metadata reservation type. */ + const struct xfs_owner_info *oinfo; + enum xfs_ag_resv_type resv; +}; + /* * Put a block back on the AGFL. */ @@ -548,17 +560,23 @@ xrep_put_freelist( /* Dispose of a single block. */ STATIC int xrep_reap_block( - struct xfs_scrub *sc, - xfs_fsblock_t fsbno, - const struct xfs_owner_info *oinfo, - enum xfs_ag_resv_type resv) + uint64_t fsbno, + void *priv) { + struct xrep_reap_state *rs = priv; + struct xfs_scrub *sc = rs->sc; struct xfs_btree_cur *cur; struct xfs_buf *agf_bp = NULL; xfs_agblock_t agbno; bool has_other_rmap; int error; + ASSERT(sc->ip != NULL || + XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); + trace_xrep_dispose_btree_extent(sc->mp, + XFS_FSB_TO_AGNO(sc->mp, fsbno), + XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); + agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); @@ -577,7 +595,8 @@ xrep_reap_block( cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); /* Can we find any other rmappings? */ - error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap); + error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo, + &has_other_rmap); xfs_btree_del_cursor(cur, error); if (error) goto out_free; @@ -597,12 +616,12 @@ xrep_reap_block( */ if (has_other_rmap) error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, - 1, oinfo); - else if (resv == XFS_AG_RESV_AGFL) + 1, rs->oinfo); + else if (rs->resv == XFS_AG_RESV_AGFL) error = xrep_put_freelist(sc, agbno); else - error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, oinfo, - resv); + error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, rs->oinfo, + rs->resv); if (agf_bp != sc->sa.agf_bp) xfs_trans_brelse(sc->tp, agf_bp); if (error) @@ -626,26 +645,15 @@ xrep_reap_extents( const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type) { - struct xbitmap_range *bmr; - struct xbitmap_range *n; - xfs_fsblock_t fsbno; - int error = 0; + struct xrep_reap_state rs = { + .sc = sc, + .oinfo = oinfo, + .resv = type, + }; ASSERT(xfs_has_rmapbt(sc->mp)); - for_each_xbitmap_block(fsbno, bmr, n, bitmap) { - ASSERT(sc->ip != NULL || - XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); - trace_xrep_dispose_btree_extent(sc->mp, - XFS_FSB_TO_AGNO(sc->mp, fsbno), - XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); - - error = xrep_reap_block(sc, fsbno, oinfo, type); - if (error) - break; - } - - return error; + return xbitmap_walk_bits(bitmap, xrep_reap_block, &rs); } /* -- cgit v1.2.3 From 7296a6d6fb8fef515352dac5d8af2ffe7a78e5cf Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 19:00:36 -0700 Subject: xfs: drop the _safe behavior from the xbitmap foreach macro It's not safe to edit bitmap intervals while we're iterating them with for_each_xbitmap_extent. None of the existing callers actually need that ability anyway, so drop the safe variable. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/scrub/bitmap.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'fs/xfs/scrub/bitmap.c') diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index 315b7c5844a3..9927634a2c54 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -13,8 +13,9 @@ #include "scrub/scrub.h" #include "scrub/bitmap.h" -#define for_each_xbitmap_extent(bex, n, bitmap) \ - list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) +/* Iterate each interval of a bitmap. Do not change the bitmap. */ +#define for_each_xbitmap_extent(bex, bitmap) \ + list_for_each_entry((bex), &(bitmap)->list, list) /* * Set a range of this bitmap. Caller must ensure the range is not set. @@ -46,10 +47,9 @@ void xbitmap_destroy( struct xbitmap *bitmap) { - struct xbitmap_range *bmr; - struct xbitmap_range *n; + struct xbitmap_range *bmr, *n; - for_each_xbitmap_extent(bmr, n, bitmap) { + list_for_each_entry_safe(bmr, n, &bitmap->list, list) { list_del(&bmr->list); kfree(bmr); } @@ -308,10 +308,9 @@ xbitmap_hweight( struct xbitmap *bitmap) { struct xbitmap_range *bmr; - struct xbitmap_range *n; uint64_t ret = 0; - for_each_xbitmap_extent(bmr, n, bitmap) + for_each_xbitmap_extent(bmr, bitmap) ret += bmr->len; return ret; @@ -324,10 +323,10 @@ xbitmap_walk( xbitmap_walk_fn fn, void *priv) { - struct xbitmap_range *bex, *n; + struct xbitmap_range *bex; int error = 0; - for_each_xbitmap_extent(bex, n, bitmap) { + for_each_xbitmap_extent(bex, bitmap) { error = fn(bex->start, bex->len, priv); if (error) break; -- cgit v1.2.3 From 6772fcc8890ae34595253fcfb8196c1aea65e111 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 19:00:36 -0700 Subject: xfs: convert xbitmap to interval tree Convert the xbitmap code to use interval trees instead of linked lists. This reduces the amount of coding required to handle the disunion operation and in the future will make it easier to set bits in arbitrary order yet later be able to extract maximally sized extents, which we'll need for rebuilding certain structures. We define our own interval tree type so that it can deal with 64-bit indices even on 32-bit machines. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/scrub/agheader_repair.c | 12 +- fs/xfs/scrub/bitmap.c | 319 ++++++++++++++++++++++------------------- fs/xfs/scrub/bitmap.h | 11 +- 3 files changed, 185 insertions(+), 157 deletions(-) (limited to 'fs/xfs/scrub/bitmap.c') diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index 997ddcd1f124..bbaa65422c4f 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -663,7 +663,7 @@ xrep_agfl_fill( } /* Write out a totally new AGFL. */ -STATIC void +STATIC int xrep_agfl_init_header( struct xfs_scrub *sc, struct xfs_buf *agfl_bp, @@ -676,6 +676,7 @@ xrep_agfl_init_header( }; struct xfs_mount *mp = sc->mp; struct xfs_agfl *agfl; + int error; ASSERT(flcount <= xfs_agfl_size(mp)); @@ -697,12 +698,15 @@ xrep_agfl_init_header( xbitmap_init(&af.used_extents); af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp), xbitmap_walk(agfl_extents, xrep_agfl_fill, &af); - xbitmap_disunion(agfl_extents, &af.used_extents); + error = xbitmap_disunion(agfl_extents, &af.used_extents); + if (error) + return error; /* Write new AGFL to disk. */ xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF); xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1); xbitmap_destroy(&af.used_extents); + return 0; } /* Repair the AGFL. */ @@ -755,7 +759,9 @@ xrep_agfl( * buffers until we know that part works. */ xrep_agfl_update_agf(sc, agf_bp, flcount); - xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount); + error = xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount); + if (error) + goto err; /* * Ok, the AGFL should be ready to go now. Roll the transaction to diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index 9927634a2c54..dc139f0031dc 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -13,31 +13,160 @@ #include "scrub/scrub.h" #include "scrub/bitmap.h" -/* Iterate each interval of a bitmap. Do not change the bitmap. */ -#define for_each_xbitmap_extent(bex, bitmap) \ - list_for_each_entry((bex), &(bitmap)->list, list) +#include + +struct xbitmap_node { + struct rb_node bn_rbnode; + + /* First set bit of this interval and subtree. */ + uint64_t bn_start; + + /* Last set bit of this interval. */ + uint64_t bn_last; + + /* Last set bit of this subtree. Do not touch this. */ + uint64_t __bn_subtree_last; +}; + +/* Define our own interval tree type with uint64_t parameters. */ + +#define START(node) ((node)->bn_start) +#define LAST(node) ((node)->bn_last) /* - * Set a range of this bitmap. Caller must ensure the range is not set. - * - * This is the logical equivalent of bitmap |= mask(start, len). + * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll + * forward-declare them anyway for clarity. */ +static inline void +xbitmap_tree_insert(struct xbitmap_node *node, struct rb_root_cached *root); + +static inline void +xbitmap_tree_remove(struct xbitmap_node *node, struct rb_root_cached *root); + +static inline struct xbitmap_node * +xbitmap_tree_iter_first(struct rb_root_cached *root, uint64_t start, + uint64_t last); + +static inline struct xbitmap_node * +xbitmap_tree_iter_next(struct xbitmap_node *node, uint64_t start, + uint64_t last); + +INTERVAL_TREE_DEFINE(struct xbitmap_node, bn_rbnode, uint64_t, + __bn_subtree_last, START, LAST, static inline, xbitmap_tree) + +/* Iterate each interval of a bitmap. Do not change the bitmap. */ +#define for_each_xbitmap_extent(bn, bitmap) \ + for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \ + struct xbitmap_node, bn_rbnode); \ + (bn) != NULL; \ + (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \ + struct xbitmap_node, bn_rbnode)) + +/* Clear a range of this bitmap. */ +int +xbitmap_clear( + struct xbitmap *bitmap, + uint64_t start, + uint64_t len) +{ + struct xbitmap_node *bn; + struct xbitmap_node *new_bn; + uint64_t last = start + len - 1; + + while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last))) { + if (bn->bn_start < start && bn->bn_last > last) { + uint64_t old_last = bn->bn_last; + + /* overlaps with the entire clearing range */ + xbitmap_tree_remove(bn, &bitmap->xb_root); + bn->bn_last = start - 1; + xbitmap_tree_insert(bn, &bitmap->xb_root); + + /* add an extent */ + new_bn = kmalloc(sizeof(struct xbitmap_node), + XCHK_GFP_FLAGS); + if (!new_bn) + return -ENOMEM; + new_bn->bn_start = last + 1; + new_bn->bn_last = old_last; + xbitmap_tree_insert(new_bn, &bitmap->xb_root); + } else if (bn->bn_start < start) { + /* overlaps with the left side of the clearing range */ + xbitmap_tree_remove(bn, &bitmap->xb_root); + bn->bn_last = start - 1; + xbitmap_tree_insert(bn, &bitmap->xb_root); + } else if (bn->bn_last > last) { + /* overlaps with the right side of the clearing range */ + xbitmap_tree_remove(bn, &bitmap->xb_root); + bn->bn_start = last + 1; + xbitmap_tree_insert(bn, &bitmap->xb_root); + break; + } else { + /* in the middle of the clearing range */ + xbitmap_tree_remove(bn, &bitmap->xb_root); + kfree(bn); + } + } + + return 0; +} + +/* Set a range of this bitmap. */ int xbitmap_set( struct xbitmap *bitmap, uint64_t start, uint64_t len) { - struct xbitmap_range *bmr; + struct xbitmap_node *left; + struct xbitmap_node *right; + uint64_t last = start + len - 1; + int error; - bmr = kmalloc(sizeof(struct xbitmap_range), XCHK_GFP_FLAGS); - if (!bmr) - return -ENOMEM; + /* Is this whole range already set? */ + left = xbitmap_tree_iter_first(&bitmap->xb_root, start, last); + if (left && left->bn_start <= start && left->bn_last >= last) + return 0; - INIT_LIST_HEAD(&bmr->list); - bmr->start = start; - bmr->len = len; - list_add_tail(&bmr->list, &bitmap->list); + /* Clear out everything in the range we want to set. */ + error = xbitmap_clear(bitmap, start, len); + if (error) + return error; + + /* Do we have a left-adjacent extent? */ + left = xbitmap_tree_iter_first(&bitmap->xb_root, start - 1, start - 1); + ASSERT(!left || left->bn_last + 1 == start); + + /* Do we have a right-adjacent extent? */ + right = xbitmap_tree_iter_first(&bitmap->xb_root, last + 1, last + 1); + ASSERT(!right || right->bn_start == last + 1); + + if (left && right) { + /* combine left and right adjacent extent */ + xbitmap_tree_remove(left, &bitmap->xb_root); + xbitmap_tree_remove(right, &bitmap->xb_root); + left->bn_last = right->bn_last; + xbitmap_tree_insert(left, &bitmap->xb_root); + kfree(right); + } else if (left) { + /* combine with left extent */ + xbitmap_tree_remove(left, &bitmap->xb_root); + left->bn_last = last; + xbitmap_tree_insert(left, &bitmap->xb_root); + } else if (right) { + /* combine with right extent */ + xbitmap_tree_remove(right, &bitmap->xb_root); + right->bn_start = start; + xbitmap_tree_insert(right, &bitmap->xb_root); + } else { + /* add an extent */ + left = kmalloc(sizeof(struct xbitmap_node), XCHK_GFP_FLAGS); + if (!left) + return -ENOMEM; + left->bn_start = start; + left->bn_last = last; + xbitmap_tree_insert(left, &bitmap->xb_root); + } return 0; } @@ -47,11 +176,11 @@ void xbitmap_destroy( struct xbitmap *bitmap) { - struct xbitmap_range *bmr, *n; + struct xbitmap_node *bn; - list_for_each_entry_safe(bmr, n, &bitmap->list, list) { - list_del(&bmr->list); - kfree(bmr); + while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) { + xbitmap_tree_remove(bn, &bitmap->xb_root); + kfree(bn); } } @@ -60,27 +189,7 @@ void xbitmap_init( struct xbitmap *bitmap) { - INIT_LIST_HEAD(&bitmap->list); -} - -/* Compare two btree extents. */ -static int -xbitmap_range_cmp( - void *priv, - const struct list_head *a, - const struct list_head *b) -{ - struct xbitmap_range *ap; - struct xbitmap_range *bp; - - ap = container_of(a, struct xbitmap_range, list); - bp = container_of(b, struct xbitmap_range, list); - - if (ap->start > bp->start) - return 1; - if (ap->start < bp->start) - return -1; - return 0; + bitmap->xb_root = RB_ROOT_CACHED; } /* @@ -97,118 +206,26 @@ xbitmap_range_cmp( * * This is the logical equivalent of bitmap &= ~sub. */ -#define LEFT_ALIGNED (1 << 0) -#define RIGHT_ALIGNED (1 << 1) int xbitmap_disunion( struct xbitmap *bitmap, struct xbitmap *sub) { - struct list_head *lp; - struct xbitmap_range *br; - struct xbitmap_range *new_br; - struct xbitmap_range *sub_br; - uint64_t sub_start; - uint64_t sub_len; - int state; - int error = 0; + struct xbitmap_node *bn; + int error; - if (list_empty(&bitmap->list) || list_empty(&sub->list)) + if (xbitmap_empty(bitmap) || xbitmap_empty(sub)) return 0; - ASSERT(!list_empty(&sub->list)); - - list_sort(NULL, &bitmap->list, xbitmap_range_cmp); - list_sort(NULL, &sub->list, xbitmap_range_cmp); - - /* - * Now that we've sorted both lists, we iterate bitmap once, rolling - * forward through sub and/or bitmap as necessary until we find an - * overlap or reach the end of either list. We do not reset lp to the - * head of bitmap nor do we reset sub_br to the head of sub. The - * list traversal is similar to merge sort, but we're deleting - * instead. In this manner we avoid O(n^2) operations. - */ - sub_br = list_first_entry(&sub->list, struct xbitmap_range, - list); - lp = bitmap->list.next; - while (lp != &bitmap->list) { - br = list_entry(lp, struct xbitmap_range, list); - - /* - * Advance sub_br and/or br until we find a pair that - * intersect or we run out of extents. - */ - while (sub_br->start + sub_br->len <= br->start) { - if (list_is_last(&sub_br->list, &sub->list)) - goto out; - sub_br = list_next_entry(sub_br, list); - } - if (sub_br->start >= br->start + br->len) { - lp = lp->next; - continue; - } - /* trim sub_br to fit the extent we have */ - sub_start = sub_br->start; - sub_len = sub_br->len; - if (sub_br->start < br->start) { - sub_len -= br->start - sub_br->start; - sub_start = br->start; - } - if (sub_len > br->len) - sub_len = br->len; - - state = 0; - if (sub_start == br->start) - state |= LEFT_ALIGNED; - if (sub_start + sub_len == br->start + br->len) - state |= RIGHT_ALIGNED; - switch (state) { - case LEFT_ALIGNED: - /* Coincides with only the left. */ - br->start += sub_len; - br->len -= sub_len; - break; - case RIGHT_ALIGNED: - /* Coincides with only the right. */ - br->len -= sub_len; - lp = lp->next; - break; - case LEFT_ALIGNED | RIGHT_ALIGNED: - /* Total overlap, just delete ex. */ - lp = lp->next; - list_del(&br->list); - kfree(br); - break; - case 0: - /* - * Deleting from the middle: add the new right extent - * and then shrink the left extent. - */ - new_br = kmalloc(sizeof(struct xbitmap_range), - XCHK_GFP_FLAGS); - if (!new_br) { - error = -ENOMEM; - goto out; - } - INIT_LIST_HEAD(&new_br->list); - new_br->start = sub_start + sub_len; - new_br->len = br->start + br->len - new_br->start; - list_add(&new_br->list, &br->list); - br->len = sub_start - br->start; - lp = lp->next; - break; - default: - ASSERT(0); - break; - } + for_each_xbitmap_extent(bn, sub) { + error = xbitmap_clear(bitmap, bn->bn_start, + bn->bn_last - bn->bn_start + 1); + if (error) + return error; } -out: - return error; + return 0; } -#undef LEFT_ALIGNED -#undef RIGHT_ALIGNED /* * Record all btree blocks seen while iterating all records of a btree. @@ -307,11 +324,11 @@ uint64_t xbitmap_hweight( struct xbitmap *bitmap) { - struct xbitmap_range *bmr; + struct xbitmap_node *bn; uint64_t ret = 0; - for_each_xbitmap_extent(bmr, bitmap) - ret += bmr->len; + for_each_xbitmap_extent(bn, bitmap) + ret += bn->bn_last - bn->bn_start + 1; return ret; } @@ -320,14 +337,14 @@ xbitmap_hweight( int xbitmap_walk( struct xbitmap *bitmap, - xbitmap_walk_fn fn, + xbitmap_walk_fn fn, void *priv) { - struct xbitmap_range *bex; + struct xbitmap_node *bn; int error = 0; - for_each_xbitmap_extent(bex, bitmap) { - error = fn(bex->start, bex->len, priv); + for_each_xbitmap_extent(bn, bitmap) { + error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv); if (error) break; } @@ -371,3 +388,11 @@ xbitmap_walk_bits( return xbitmap_walk(bitmap, xbitmap_walk_bits_in_run, &wb); } + +/* Does this bitmap have no bits set at all? */ +bool +xbitmap_empty( + struct xbitmap *bitmap) +{ + return bitmap->xb_root.rb_root.rb_node == NULL; +} diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index 01e37173dc34..2ec4e1f3f24c 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -6,19 +6,14 @@ #ifndef __XFS_SCRUB_BITMAP_H__ #define __XFS_SCRUB_BITMAP_H__ -struct xbitmap_range { - struct list_head list; - uint64_t start; - uint64_t len; -}; - struct xbitmap { - struct list_head list; + struct rb_root_cached xb_root; }; void xbitmap_init(struct xbitmap *bitmap); void xbitmap_destroy(struct xbitmap *bitmap); +int xbitmap_clear(struct xbitmap *bitmap, uint64_t start, uint64_t len); int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len); int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub); int xbitmap_set_btcur_path(struct xbitmap *bitmap, @@ -42,4 +37,6 @@ typedef int (*xbitmap_walk_bits_fn)(uint64_t bit, void *priv); int xbitmap_walk_bits(struct xbitmap *bitmap, xbitmap_walk_bits_fn fn, void *priv); +bool xbitmap_empty(struct xbitmap *bitmap); + #endif /* __XFS_SCRUB_BITMAP_H__ */ -- cgit v1.2.3 From fed050f3452da070fa90fc1b02c2bc2219d687a7 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 19:00:38 -0700 Subject: xfs: cross-reference rmap records with ag btrees Strengthen the rmap btree record checker a little more by comparing OWN_FS and OWN_LOG reverse mappings against the AG headers and internal logs, respectively. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/Makefile | 2 +- fs/xfs/scrub/bitmap.c | 22 +++++++ fs/xfs/scrub/bitmap.h | 21 +++++++ fs/xfs/scrub/rmap.c | 159 +++++++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 202 insertions(+), 2 deletions(-) (limited to 'fs/xfs/scrub/bitmap.c') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index ac9d03cd2623..16e4eb431230 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -148,6 +148,7 @@ xfs-y += $(addprefix scrub/, \ agheader.o \ alloc.o \ attr.o \ + bitmap.o \ bmap.o \ btree.o \ common.o \ @@ -172,7 +173,6 @@ xfs-$(CONFIG_XFS_QUOTA) += scrub/quota.o ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y) xfs-y += $(addprefix scrub/, \ agheader_repair.o \ - bitmap.o \ repair.o \ ) endif diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index dc139f0031dc..85e5beda186f 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -396,3 +396,25 @@ xbitmap_empty( { return bitmap->xb_root.rb_root.rb_node == NULL; } + +/* Is the start of the range set or clear? And for how long? */ +bool +xbitmap_test( + struct xbitmap *bitmap, + uint64_t start, + uint64_t *len) +{ + struct xbitmap_node *bn; + uint64_t last = start + *len - 1; + + bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last); + if (!bn) + return false; + if (bn->bn_start <= start) { + if (bn->bn_last < last) + *len = bn->bn_last - start + 1; + return true; + } + *len = bn->bn_start - start; + return false; +} diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index 972d5445cdb6..55441feb039f 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -38,6 +38,7 @@ int xbitmap_walk_bits(struct xbitmap *bitmap, xbitmap_walk_bits_fn fn, void *priv); bool xbitmap_empty(struct xbitmap *bitmap); +bool xbitmap_test(struct xbitmap *bitmap, uint64_t start, uint64_t *len); /* Bitmaps, but for type-checked for xfs_agblock_t */ @@ -66,6 +67,26 @@ static inline int xagb_bitmap_set(struct xagb_bitmap *bitmap, return xbitmap_set(&bitmap->agbitmap, start, len); } +static inline bool +xagb_bitmap_test( + struct xagb_bitmap *bitmap, + xfs_agblock_t start, + xfs_extlen_t *len) +{ + uint64_t biglen = *len; + bool ret; + + ret = xbitmap_test(&bitmap->agbitmap, start, &biglen); + + if (start + biglen >= UINT_MAX) { + ASSERT(0); + biglen = UINT_MAX - start; + } + + *len = biglen; + return ret; +} + static inline int xagb_bitmap_disunion(struct xagb_bitmap *bitmap, struct xagb_bitmap *sub) { diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c index 6d7e294110a2..759349ccca26 100644 --- a/fs/xfs/scrub/rmap.c +++ b/fs/xfs/scrub/rmap.c @@ -12,10 +12,12 @@ #include "xfs_btree.h" #include "xfs_rmap.h" #include "xfs_refcount.h" +#include "xfs_ag.h" +#include "xfs_bit.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/btree.h" -#include "xfs_ag.h" +#include "scrub/bitmap.h" /* * Set us up to scrub reverse mapping btrees. @@ -45,6 +47,13 @@ struct xchk_rmap { * that could be one. */ struct xfs_rmap_irec prev_rec; + + /* Bitmaps containing all blocks for each type of AG metadata. */ + struct xagb_bitmap fs_owned; + struct xagb_bitmap log_owned; + + /* Did we complete the AG space metadata bitmaps? */ + bool bitmaps_complete; }; /* Cross-reference a rmap against the refcount btree. */ @@ -249,6 +258,68 @@ xchk_rmapbt_check_mergeable( memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec)); } +/* Compare an rmap for AG metadata against the metadata walk. */ +STATIC int +xchk_rmapbt_mark_bitmap( + struct xchk_btree *bs, + struct xchk_rmap *cr, + const struct xfs_rmap_irec *irec) +{ + struct xfs_scrub *sc = bs->sc; + struct xagb_bitmap *bmp = NULL; + xfs_extlen_t fsbcount = irec->rm_blockcount; + + /* + * Skip corrupt records. It is essential that we detect records in the + * btree that cannot overlap but do, flag those as CORRUPT, and skip + * the bitmap comparison to avoid generating false XCORRUPT reports. + */ + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return 0; + + /* + * If the AG metadata walk didn't complete, there's no point in + * comparing against partial results. + */ + if (!cr->bitmaps_complete) + return 0; + + switch (irec->rm_owner) { + case XFS_RMAP_OWN_FS: + bmp = &cr->fs_owned; + break; + case XFS_RMAP_OWN_LOG: + bmp = &cr->log_owned; + break; + } + + if (!bmp) + return 0; + + if (xagb_bitmap_test(bmp, irec->rm_startblock, &fsbcount)) { + /* + * The start of this reverse mapping corresponds to a set + * region in the bitmap. If the mapping covers more area than + * the set region, then it covers space that wasn't found by + * the AG metadata walk. + */ + if (fsbcount < irec->rm_blockcount) + xchk_btree_xref_set_corrupt(bs->sc, + bs->sc->sa.rmap_cur, 0); + } else { + /* + * The start of this reverse mapping does not correspond to a + * completely set region in the bitmap. The region wasn't + * fully set by walking the AG metadata, so this is a + * cross-referencing corruption. + */ + xchk_btree_xref_set_corrupt(bs->sc, bs->sc->sa.rmap_cur, 0); + } + + /* Unset the region so that we can detect missing rmap records. */ + return xagb_bitmap_clear(bmp, irec->rm_startblock, irec->rm_blockcount); +} + /* Scrub an rmapbt record. */ STATIC int xchk_rmapbt_rec( @@ -268,9 +339,80 @@ xchk_rmapbt_rec( xchk_rmapbt_check_mergeable(bs, cr, &irec); xchk_rmapbt_check_overlapping(bs, cr, &irec); xchk_rmapbt_xref(bs->sc, &irec); + + return xchk_rmapbt_mark_bitmap(bs, cr, &irec); +} + +/* + * Set up bitmaps mapping all the AG metadata to compare with the rmapbt + * records. + */ +STATIC int +xchk_rmapbt_walk_ag_metadata( + struct xfs_scrub *sc, + struct xchk_rmap *cr) +{ + struct xfs_mount *mp = sc->mp; + int error; + + /* OWN_FS: AG headers */ + error = xagb_bitmap_set(&cr->fs_owned, XFS_SB_BLOCK(mp), + XFS_AGFL_BLOCK(mp) - XFS_SB_BLOCK(mp) + 1); + if (error) + goto out; + + /* OWN_LOG: Internal log */ + if (xfs_ag_contains_log(mp, sc->sa.pag->pag_agno)) { + error = xagb_bitmap_set(&cr->log_owned, + XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart), + mp->m_sb.sb_logblocks); + if (error) + goto out; + } + +out: + /* + * If there's an error, set XFAIL and disable the bitmap + * cross-referencing checks, but proceed with the scrub anyway. + */ + if (error) + xchk_btree_xref_process_error(sc, sc->sa.rmap_cur, + sc->sa.rmap_cur->bc_nlevels - 1, &error); + else + cr->bitmaps_complete = true; return 0; } +/* + * Check for set regions in the bitmaps; if there are any, the rmap records do + * not describe all the AG metadata. + */ +STATIC void +xchk_rmapbt_check_bitmaps( + struct xfs_scrub *sc, + struct xchk_rmap *cr) +{ + struct xfs_btree_cur *cur = sc->sa.rmap_cur; + unsigned int level; + + if (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | + XFS_SCRUB_OFLAG_XFAIL)) + return; + if (!cur) + return; + level = cur->bc_nlevels - 1; + + /* + * Any bitmap with bits still set indicates that the reverse mapping + * doesn't cover the entire primary structure. + */ + if (xagb_bitmap_hweight(&cr->fs_owned) != 0) + xchk_btree_xref_set_corrupt(sc, cur, level); + + if (xagb_bitmap_hweight(&cr->log_owned) != 0) + xchk_btree_xref_set_corrupt(sc, cur, level); +} + /* Scrub the rmap btree for some AG. */ int xchk_rmapbt( @@ -283,8 +425,23 @@ xchk_rmapbt( if (!cr) return -ENOMEM; + xagb_bitmap_init(&cr->fs_owned); + xagb_bitmap_init(&cr->log_owned); + + error = xchk_rmapbt_walk_ag_metadata(sc, cr); + if (error) + goto out; + error = xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec, &XFS_RMAP_OINFO_AG, cr); + if (error) + goto out; + + xchk_rmapbt_check_bitmaps(sc, cr); + +out: + xagb_bitmap_destroy(&cr->log_owned); + xagb_bitmap_destroy(&cr->fs_owned); kfree(cr); return error; } -- cgit v1.2.3 From 3a3108ea8c1d4f33ca49fa9fc80e6a6e450654cf Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 19:00:38 -0700 Subject: xfs: cross-reference rmap records with free space btrees Strengthen the rmap btree record checker a little more by comparing OWN_AG reverse mappings against the free space btrees, the rmap btree, and the AGFL. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/scrub/bitmap.c | 33 ++++++++++++++++++++++++++ fs/xfs/scrub/bitmap.h | 3 +++ fs/xfs/scrub/rmap.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+) (limited to 'fs/xfs/scrub/bitmap.c') diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c index 85e5beda186f..0c959be396ea 100644 --- a/fs/xfs/scrub/bitmap.c +++ b/fs/xfs/scrub/bitmap.c @@ -6,6 +6,7 @@ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" +#include "xfs_bit.h" #include "xfs_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" @@ -262,6 +263,38 @@ xbitmap_disunion( * For the 300th record we just exit, with the list being [1, 4, 2, 3]. */ +/* Mark a btree block to the agblock bitmap. */ +STATIC int +xagb_bitmap_visit_btblock( + struct xfs_btree_cur *cur, + int level, + void *priv) +{ + struct xagb_bitmap *bitmap = priv; + struct xfs_buf *bp; + xfs_fsblock_t fsbno; + xfs_agblock_t agbno; + + xfs_btree_get_block(cur, level, &bp); + if (!bp) + return 0; + + fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); + agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); + + return xagb_bitmap_set(bitmap, agbno, 1); +} + +/* Mark all (per-AG) btree blocks in the agblock bitmap. */ +int +xagb_bitmap_set_btblocks( + struct xagb_bitmap *bitmap, + struct xfs_btree_cur *cur) +{ + return xfs_btree_visit_blocks(cur, xagb_bitmap_visit_btblock, + XFS_BTREE_VISIT_ALL, bitmap); +} + /* * Record all the buffers pointed to by the btree cursor. Callers already * engaged in a btree walk should call this function to capture the list of diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h index 55441feb039f..84981724ecaf 100644 --- a/fs/xfs/scrub/bitmap.h +++ b/fs/xfs/scrub/bitmap.h @@ -108,4 +108,7 @@ static inline int xagb_bitmap_walk(struct xagb_bitmap *bitmap, return xbitmap_walk(&bitmap->agbitmap, fn, priv); } +int xagb_bitmap_set_btblocks(struct xagb_bitmap *bitmap, + struct xfs_btree_cur *cur); + #endif /* __XFS_SCRUB_BITMAP_H__ */ diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c index 759349ccca26..1febadb269c5 100644 --- a/fs/xfs/scrub/rmap.c +++ b/fs/xfs/scrub/rmap.c @@ -7,13 +7,17 @@ #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" +#include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" +#include "xfs_trans.h" #include "xfs_btree.h" #include "xfs_rmap.h" #include "xfs_refcount.h" #include "xfs_ag.h" #include "xfs_bit.h" +#include "xfs_alloc.h" +#include "xfs_alloc_btree.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/btree.h" @@ -51,6 +55,7 @@ struct xchk_rmap { /* Bitmaps containing all blocks for each type of AG metadata. */ struct xagb_bitmap fs_owned; struct xagb_bitmap log_owned; + struct xagb_bitmap ag_owned; /* Did we complete the AG space metadata bitmaps? */ bool bitmaps_complete; @@ -291,6 +296,9 @@ xchk_rmapbt_mark_bitmap( case XFS_RMAP_OWN_LOG: bmp = &cr->log_owned; break; + case XFS_RMAP_OWN_AG: + bmp = &cr->ag_owned; + break; } if (!bmp) @@ -343,9 +351,26 @@ xchk_rmapbt_rec( return xchk_rmapbt_mark_bitmap(bs, cr, &irec); } +/* Add an AGFL block to the rmap list. */ +STATIC int +xchk_rmapbt_walk_agfl( + struct xfs_mount *mp, + xfs_agblock_t agbno, + void *priv) +{ + struct xagb_bitmap *bitmap = priv; + + return xagb_bitmap_set(bitmap, agbno, 1); +} + /* * Set up bitmaps mapping all the AG metadata to compare with the rmapbt * records. + * + * Grab our own btree cursors here if the scrub setup function didn't give us a + * btree cursor due to reports of poor health. We need to find out if the + * rmapbt disagrees with primary metadata btrees to tag the rmapbt as being + * XCORRUPT. */ STATIC int xchk_rmapbt_walk_ag_metadata( @@ -353,6 +378,9 @@ xchk_rmapbt_walk_ag_metadata( struct xchk_rmap *cr) { struct xfs_mount *mp = sc->mp; + struct xfs_buf *agfl_bp; + struct xfs_agf *agf = sc->sa.agf_bp->b_addr; + struct xfs_btree_cur *cur; int error; /* OWN_FS: AG headers */ @@ -370,6 +398,39 @@ xchk_rmapbt_walk_ag_metadata( goto out; } + /* OWN_AG: bnobt, cntbt, rmapbt, and AGFL */ + cur = sc->sa.bno_cur; + if (!cur) + cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, + sc->sa.pag, XFS_BTNUM_BNO); + error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); + if (cur != sc->sa.bno_cur) + xfs_btree_del_cursor(cur, error); + if (error) + goto out; + + cur = sc->sa.cnt_cur; + if (!cur) + cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, + sc->sa.pag, XFS_BTNUM_CNT); + error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); + if (cur != sc->sa.cnt_cur) + xfs_btree_del_cursor(cur, error); + if (error) + goto out; + + error = xagb_bitmap_set_btblocks(&cr->ag_owned, sc->sa.rmap_cur); + if (error) + goto out; + + error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); + if (error) + goto out; + + error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xchk_rmapbt_walk_agfl, + &cr->ag_owned); + xfs_trans_brelse(sc->tp, agfl_bp); + out: /* * If there's an error, set XFAIL and disable the bitmap @@ -411,6 +472,9 @@ xchk_rmapbt_check_bitmaps( if (xagb_bitmap_hweight(&cr->log_owned) != 0) xchk_btree_xref_set_corrupt(sc, cur, level); + + if (xagb_bitmap_hweight(&cr->ag_owned) != 0) + xchk_btree_xref_set_corrupt(sc, cur, level); } /* Scrub the rmap btree for some AG. */ @@ -427,6 +491,7 @@ xchk_rmapbt( xagb_bitmap_init(&cr->fs_owned); xagb_bitmap_init(&cr->log_owned); + xagb_bitmap_init(&cr->ag_owned); error = xchk_rmapbt_walk_ag_metadata(sc, cr); if (error) @@ -440,6 +505,7 @@ xchk_rmapbt( xchk_rmapbt_check_bitmaps(sc, cr); out: + xagb_bitmap_destroy(&cr->ag_owned); xagb_bitmap_destroy(&cr->log_owned); xagb_bitmap_destroy(&cr->fs_owned); kfree(cr); -- cgit v1.2.3