summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/Kconfig26
-rw-r--r--fs/btrfs/Makefile3
-rw-r--r--fs/btrfs/async-thread.c6
-rw-r--r--fs/btrfs/backref.c175
-rw-r--r--fs/btrfs/backref.h16
-rw-r--r--fs/btrfs/bio.c83
-rw-r--r--fs/btrfs/bio.h3
-rw-r--r--fs/btrfs/block-group.c68
-rw-r--r--fs/btrfs/block-rsv.c10
-rw-r--r--fs/btrfs/btrfs_inode.h17
-rw-r--r--fs/btrfs/compression.c14
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c217
-rw-r--r--fs/btrfs/ctree.h53
-rw-r--r--fs/btrfs/defrag.c10
-rw-r--r--fs/btrfs/delalloc-space.c2
-rw-r--r--fs/btrfs/delayed-inode.c49
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/delayed-ref.c439
-rw-r--r--fs/btrfs/delayed-ref.h71
-rw-r--r--fs/btrfs/dev-replace.c7
-rw-r--r--fs/btrfs/dir-item.c17
-rw-r--r--fs/btrfs/dir-item.h3
-rw-r--r--fs/btrfs/direct-io.c5
-rw-r--r--fs/btrfs/disk-io.c179
-rw-r--r--fs/btrfs/disk-io.h9
-rw-r--r--fs/btrfs/extent-tree.c340
-rw-r--r--fs/btrfs/extent-tree.h7
-rw-r--r--fs/btrfs/extent_io.c395
-rw-r--r--fs/btrfs/extent_io.h16
-rw-r--r--fs/btrfs/extent_map.c160
-rw-r--r--fs/btrfs/extent_map.h3
-rw-r--r--fs/btrfs/fiemap.c6
-rw-r--r--fs/btrfs/file-item.c3
-rw-r--r--fs/btrfs/file.c461
-rw-r--r--fs/btrfs/file.h7
-rw-r--r--fs/btrfs/free-space-cache.c33
-rw-r--r--fs/btrfs/free-space-cache.h6
-rw-r--r--fs/btrfs/free-space-tree.c11
-rw-r--r--fs/btrfs/fs.c130
-rw-r--r--fs/btrfs/fs.h47
-rw-r--r--fs/btrfs/inode-item.c5
-rw-r--r--fs/btrfs/inode.c974
-rw-r--r--fs/btrfs/ioctl.c723
-rw-r--r--fs/btrfs/ioctl.h3
-rw-r--r--fs/btrfs/locking.c15
-rw-r--r--fs/btrfs/locking.h16
-rw-r--r--fs/btrfs/lzo.c2
-rw-r--r--fs/btrfs/ordered-data.c14
-rw-r--r--fs/btrfs/qgroup.c147
-rw-r--r--fs/btrfs/qgroup.h19
-rw-r--r--fs/btrfs/raid-stripe-tree.c218
-rw-r--r--fs/btrfs/raid-stripe-tree.h5
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/ref-verify.c1
-rw-r--r--fs/btrfs/relocation.c371
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/scrub.c41
-rw-r--r--fs/btrfs/send.c74
-rw-r--r--fs/btrfs/send.h2
-rw-r--r--fs/btrfs/space-info.c81
-rw-r--r--fs/btrfs/space-info.h15
-rw-r--r--fs/btrfs/subpage.c251
-rw-r--r--fs/btrfs/subpage.h52
-rw-r--r--fs/btrfs/super.c157
-rw-r--r--fs/btrfs/sysfs.c188
-rw-r--r--fs/btrfs/sysfs.h6
-rw-r--r--fs/btrfs/tests/btrfs-tests.c22
-rw-r--r--fs/btrfs/tests/btrfs-tests.h8
-rw-r--r--fs/btrfs/tests/delayed-refs-tests.c1015
-rw-r--r--fs/btrfs/tests/raid-stripe-tree-tests.c1161
-rw-r--r--fs/btrfs/transaction.c11
-rw-r--r--fs/btrfs/transaction.h20
-rw-r--r--fs/btrfs/tree-checker.c139
-rw-r--r--fs/btrfs/tree-checker.h11
-rw-r--r--fs/btrfs/tree-log.c13
-rw-r--r--fs/btrfs/tree-mod-log.c1
-rw-r--r--fs/btrfs/tree-mod-log.h1
-rw-r--r--fs/btrfs/uuid-tree.c2
-rw-r--r--fs/btrfs/volumes.c396
-rw-r--r--fs/btrfs/volumes.h38
-rw-r--r--fs/btrfs/xattr.c6
-rw-r--r--fs/btrfs/zlib.c6
-rw-r--r--fs/btrfs/zoned.c148
-rw-r--r--fs/btrfs/zoned.h7
-rw-r--r--fs/btrfs/zstd.c4
86 files changed, 6557 insertions, 2948 deletions
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 4fb925e8c981..fa8515598341 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -78,6 +78,32 @@ config BTRFS_ASSERT
If unsure, say N.
+config BTRFS_EXPERIMENTAL
+ bool "Btrfs experimental features"
+ depends on BTRFS_FS
+ default n
+ help
+ Enable experimental features. These features may not be stable enough
+ for end users. This is meant for btrfs developers or users who wish
+ to test the functionality and report problems.
+
+ Current list:
+
+ - extent map shrinker - performance problems with too frequent shrinks
+
+ - send stream protocol v3 - fs-verity support
+
+ - checksum offload mode - sysfs knob to affect when checksums are
+ calculated (at IO time, or in a thread)
+
+ - raid-stripe-tree - additional mapping of extents to devices to
+ support RAID1* profiles on zoned devices,
+ RAID56 not yet supported
+
+ - extent tree v2 - complex rework of extent tracking
+
+ If unsure, say N.
+
config BTRFS_FS_REF_VERIFY
bool "Btrfs with the ref verify tool compiled in"
depends on BTRFS_FS
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 87617f2968bc..2d5f0482678b 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -43,4 +43,5 @@ btrfs-$(CONFIG_FS_VERITY) += verity.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
- tests/free-space-tree-tests.o tests/extent-map-tests.o
+ tests/free-space-tree-tests.o tests/extent-map-tests.o \
+ tests/raid-stripe-tree-tests.o tests/delayed-refs-tests.o
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 361a866c1995..a4c51600a408 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -18,7 +18,7 @@ enum {
};
#define NO_THRESHOLD (-1)
-#define DFT_THRESHOLD (32)
+#define DEFAULT_THRESHOLD (32)
struct btrfs_workqueue {
struct workqueue_struct *normal_wq;
@@ -94,9 +94,9 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
ret->limit_active = limit_active;
if (thresh == 0)
- thresh = DFT_THRESHOLD;
+ thresh = DEFAULT_THRESHOLD;
/* For low threshold, disabling threshold is a better choice */
- if (thresh < DFT_THRESHOLD) {
+ if (thresh < DEFAULT_THRESHOLD) {
ret->current_active = limit_active;
ret->thresh = NO_THRESHOLD;
} else {
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f8e1d5b2c512..3d3923cfc357 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -250,6 +250,21 @@ static int prelim_ref_compare(const struct prelim_ref *ref1,
return 0;
}
+static int prelim_ref_rb_add_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct prelim_ref *ref_new =
+ rb_entry(new, struct prelim_ref, rbnode);
+ const struct prelim_ref *ref_exist =
+ rb_entry(exist, struct prelim_ref, rbnode);
+
+ /*
+ * prelim_ref_compare() expects the first parameter as the existing one,
+ * different from the rb_find_add_cached() order.
+ */
+ return prelim_ref_compare(ref_exist, ref_new);
+}
+
static void update_share_count(struct share_check *sc, int oldcount,
int newcount, const struct prelim_ref *newref)
{
@@ -278,55 +293,39 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
struct share_check *sc)
{
struct rb_root_cached *root;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct prelim_ref *ref;
- int result;
- bool leftmost = true;
+ struct rb_node *exist;
root = &preftree->root;
- p = &root->rb_root.rb_node;
+ exist = rb_find_add_cached(&newref->rbnode, root, prelim_ref_rb_add_cmp);
+ if (exist) {
+ struct prelim_ref *ref = rb_entry(exist, struct prelim_ref, rbnode);
+ /* Identical refs, merge them and free @newref */
+ struct extent_inode_elem *eie = ref->inode_list;
- while (*p) {
- parent = *p;
- ref = rb_entry(parent, struct prelim_ref, rbnode);
- result = prelim_ref_compare(ref, newref);
- if (result < 0) {
- p = &(*p)->rb_left;
- } else if (result > 0) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- /* Identical refs, merge them and free @newref */
- struct extent_inode_elem *eie = ref->inode_list;
-
- while (eie && eie->next)
- eie = eie->next;
+ while (eie && eie->next)
+ eie = eie->next;
- if (!eie)
- ref->inode_list = newref->inode_list;
- else
- eie->next = newref->inode_list;
- trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
- preftree->count);
- /*
- * A delayed ref can have newref->count < 0.
- * The ref->count is updated to follow any
- * BTRFS_[ADD|DROP]_DELAYED_REF actions.
- */
- update_share_count(sc, ref->count,
- ref->count + newref->count, newref);
- ref->count += newref->count;
- free_pref(newref);
- return;
- }
+ if (!eie)
+ ref->inode_list = newref->inode_list;
+ else
+ eie->next = newref->inode_list;
+ trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
+ preftree->count);
+ /*
+ * A delayed ref can have newref->count < 0.
+ * The ref->count is updated to follow any
+ * BTRFS_[ADD|DROP]_DELAYED_REF actions.
+ */
+ update_share_count(sc, ref->count,
+ ref->count + newref->count, newref);
+ ref->count += newref->count;
+ free_pref(newref);
+ return;
}
update_share_count(sc, 0, newref->count, newref);
preftree->count++;
trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
- rb_link_node(&newref->rbnode, parent, p);
- rb_insert_color_cached(&newref->rbnode, root, leftmost);
}
/*
@@ -1442,7 +1441,8 @@ again:
*/
delayed_refs = &ctx->trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
+ head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs,
+ ctx->bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
refcount_inc(&head->refs);
@@ -3021,9 +3021,6 @@ void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
cache->rb_root = RB_ROOT;
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
INIT_LIST_HEAD(&cache->pending[i]);
- INIT_LIST_HEAD(&cache->changed);
- INIT_LIST_HEAD(&cache->detached);
- INIT_LIST_HEAD(&cache->leaves);
INIT_LIST_HEAD(&cache->pending_edge);
INIT_LIST_HEAD(&cache->useless_node);
cache->fs_info = fs_info;
@@ -3131,29 +3128,17 @@ void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
struct btrfs_backref_node *node)
{
- struct btrfs_backref_node *upper;
struct btrfs_backref_edge *edge;
if (!node)
return;
- BUG_ON(!node->lowest && !node->detached);
while (!list_empty(&node->upper)) {
edge = list_entry(node->upper.next, struct btrfs_backref_edge,
list[LOWER]);
- upper = edge->node[UPPER];
list_del(&edge->list[LOWER]);
list_del(&edge->list[UPPER]);
btrfs_backref_free_edge(cache, edge);
-
- /*
- * Add the node to leaf node list if no other child block
- * cached.
- */
- if (list_empty(&upper->lower)) {
- list_add_tail(&upper->lower, &cache->leaves);
- upper->lowest = 1;
- }
}
btrfs_backref_drop_node(cache, node);
@@ -3165,33 +3150,13 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
{
struct btrfs_backref_node *node;
- int i;
-
- while (!list_empty(&cache->detached)) {
- node = list_entry(cache->detached.next,
- struct btrfs_backref_node, list);
- btrfs_backref_cleanup_node(cache, node);
- }
- while (!list_empty(&cache->leaves)) {
- node = list_entry(cache->leaves.next,
- struct btrfs_backref_node, lower);
+ while ((node = rb_entry_safe(rb_first(&cache->rb_root),
+ struct btrfs_backref_node, rb_node)))
btrfs_backref_cleanup_node(cache, node);
- }
- for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
- while (!list_empty(&cache->pending[i])) {
- node = list_first_entry(&cache->pending[i],
- struct btrfs_backref_node,
- list);
- btrfs_backref_cleanup_node(cache, node);
- }
- }
ASSERT(list_empty(&cache->pending_edge));
ASSERT(list_empty(&cache->useless_node));
- ASSERT(list_empty(&cache->changed));
- ASSERT(list_empty(&cache->detached));
- ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
ASSERT(!cache->nr_nodes);
ASSERT(!cache->nr_edges);
}
@@ -3315,8 +3280,12 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
if (IS_ERR(root))
return PTR_ERR(root);
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- cur->cowonly = 1;
+
+ /* We shouldn't be using backref cache for non-shareable roots. */
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_put_root(root);
+ return -EUCLEAN;
+ }
if (btrfs_root_level(&root->root_item) == cur->level) {
/* Tree root */
@@ -3402,8 +3371,15 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
goto out;
}
upper->owner = btrfs_header_owner(eb);
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- upper->cowonly = 1;
+
+ /* We shouldn't be using backref cache for non shareable roots. */
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_put_root(root);
+ btrfs_backref_free_edge(cache, edge);
+ btrfs_backref_free_node(cache, upper);
+ ret = -EUCLEAN;
+ goto out;
+ }
/*
* If we know the block isn't shared we can avoid
@@ -3594,15 +3570,9 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
ASSERT(start->checked);
- /* Insert this node to cache if it's not COW-only */
- if (!start->cowonly) {
- rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
- &start->rb_node);
- if (rb_node)
- btrfs_backref_panic(cache->fs_info, start->bytenr,
- -EEXIST);
- list_add_tail(&start->lower, &cache->leaves);
- }
+ rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, &start->rb_node);
+ if (rb_node)
+ btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST);
/*
* Use breadth first search to iterate all related edges.
@@ -3641,11 +3611,6 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
* parents have already been linked.
*/
if (!RB_EMPTY_NODE(&upper->rb_node)) {
- if (upper->lowest) {
- list_del_init(&upper->lower);
- upper->lowest = 0;
- }
-
list_add_tail(&edge->list[UPPER], &upper->lower);
continue;
}
@@ -3656,23 +3621,13 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
return -EUCLEAN;
}
- /* Sanity check, COW-only node has non-COW-only parent */
- if (start->cowonly != upper->cowonly) {
- ASSERT(0);
+ rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
+ &upper->rb_node);
+ if (unlikely(rb_node)) {
+ btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST);
return -EUCLEAN;
}
- /* Only cache non-COW-only (subvolume trees) tree blocks */
- if (!upper->cowonly) {
- rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
- &upper->rb_node);
- if (rb_node) {
- btrfs_backref_panic(cache->fs_info,
- upper->bytenr, -EEXIST);
- return -EUCLEAN;
- }
- }
-
list_add_tail(&edge->list[UPPER], &upper->lower);
/*
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index e8c22cccb5c1..74e614031274 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -318,6 +318,12 @@ struct btrfs_backref_node {
u64 bytenr;
}; /* Use rb_simple_node for search/insert */
+ /*
+ * This is a sanity check, whenever we COW a block we will update
+ * new_bytenr with it's current location, and we will check this in
+ * various places to validate that the cache makes sense, it shouldn't
+ * be used for anything else.
+ */
u64 new_bytenr;
/* Objectid of tree block owner, can be not uptodate */
u64 owner;
@@ -335,10 +341,6 @@ struct btrfs_backref_node {
struct extent_buffer *eb;
/* Level of the tree block */
unsigned int level:8;
- /* Is the block in a non-shareable tree */
- unsigned int cowonly:1;
- /* 1 if no child node is in the cache */
- unsigned int lowest:1;
/* Is the extent buffer locked */
unsigned int locked:1;
/* Has the block been processed */
@@ -391,12 +393,6 @@ struct btrfs_backref_cache {
* level blocks may not reflect the new location
*/
struct list_head pending[BTRFS_MAX_LEVEL];
- /* List of backref nodes with no child node */
- struct list_head leaves;
- /* List of blocks that have been COWed in current transaction */
- struct list_head changed;
- /* List of detached backref node. */
- struct list_head detached;
u64 last_trans;
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index fec5c6cde0a7..bc2555c44a12 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -49,6 +49,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
bbio->end_io = end_io;
bbio->private = private;
atomic_set(&bbio->pending_ios, 1);
+ WRITE_ONCE(bbio->status, BLK_STS_OK);
}
/*
@@ -80,6 +81,9 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
&btrfs_clone_bioset);
+ if (IS_ERR(bio))
+ return ERR_CAST(bio);
+
bbio = btrfs_bio(bio);
btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
bbio->inode = orig_bbio->inode;
@@ -113,41 +117,29 @@ static void __btrfs_bio_end_io(struct btrfs_bio *bbio)
}
}
-static void btrfs_orig_write_end_io(struct bio *bio);
-
-static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
- struct btrfs_bio *orig_bbio)
-{
- /*
- * For writes we tolerate nr_mirrors - 1 write failures, so we can't
- * just blindly propagate a write failure here. Instead increment the
- * error count in the original I/O context so that it is guaranteed to
- * be larger than the error tolerance.
- */
- if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) {
- struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private;
- struct btrfs_io_context *orig_bioc = orig_stripe->bioc;
-
- atomic_add(orig_bioc->max_errors, &orig_bioc->error);
- } else {
- orig_bbio->bio.bi_status = bbio->bio.bi_status;
- }
-}
-
void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{
bbio->bio.bi_status = status;
if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
struct btrfs_bio *orig_bbio = bbio->private;
- if (bbio->bio.bi_status)
- btrfs_bbio_propagate_error(bbio, orig_bbio);
btrfs_cleanup_bio(bbio);
bbio = orig_bbio;
}
- if (atomic_dec_and_test(&bbio->pending_ios))
+ /*
+ * At this point, bbio always points to the original btrfs_bio. Save
+ * the first error in it.
+ */
+ if (status != BLK_STS_OK)
+ cmpxchg(&bbio->status, BLK_STS_OK, status);
+
+ if (atomic_dec_and_test(&bbio->pending_ios)) {
+ /* Load split bio's error which might be set above. */
+ if (status == BLK_STS_OK)
+ bbio->bio.bi_status = READ_ONCE(bbio->status);
__btrfs_bio_end_io(bbio);
+ }
}
static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
@@ -366,7 +358,7 @@ static void btrfs_simple_end_io(struct bio *bio)
INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
} else {
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ if (bio_is_zone_append(bio) && !bio->bi_status)
btrfs_record_physical_zoned(bbio);
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
@@ -409,7 +401,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
else
bio->bi_status = BLK_STS_OK;
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ if (bio_is_zone_append(bio) && !bio->bi_status)
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
@@ -423,7 +415,7 @@ static void btrfs_clone_write_end_io(struct bio *bio)
if (bio->bi_status) {
atomic_inc(&stripe->bioc->error);
btrfs_log_dev_io_error(bio, stripe->dev);
- } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ } else if (bio_is_zone_append(bio)) {
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
}
@@ -461,6 +453,14 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
(unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev),
dev->devid, bio->bi_iter.bi_size);
+ /*
+ * Track reads if tracking is enabled; ignore I/O operations before the
+ * filesystem is fully initialized.
+ */
+ if (dev->fs_devices->collect_fs_stats && bio_op(bio) == REQ_OP_READ && dev->fs_info)
+ percpu_counter_add(&dev->fs_info->stats_read_blocks,
+ bio->bi_iter.bi_size >> dev->fs_info->sectorsize_bits);
+
if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
blkcg_punt_bio_submit(bio);
else
@@ -598,7 +598,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
{
bool auto_csum_mode = true;
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
struct btrfs_fs_devices *fs_devices = bbio->fs_info->fs_devices;
enum btrfs_offload_csum_mode csum_mode = READ_ONCE(fs_devices->offload_csum_mode);
@@ -660,8 +660,14 @@ static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
map_length = min(map_length, bbio->fs_info->max_zone_append_size);
sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
&nr_segs, map_length);
- if (sector_offset)
- return sector_offset << SECTOR_SHIFT;
+ if (sector_offset) {
+ /*
+ * bio_split_rw_at() could split at a size smaller than our
+ * sectorsize and thus cause unaligned I/Os. Fix that by
+ * always rounding down to the nearest boundary.
+ */
+ return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, bbio->fs_info->sectorsize);
+ }
return map_length;
}
@@ -689,7 +695,8 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
&bioc, &smap, &mirror_num);
if (error) {
ret = errno_to_blk_status(error);
- goto fail;
+ btrfs_bio_counter_dec(fs_info);
+ goto end_bbio;
}
map_length = min(map_length, length);
@@ -697,7 +704,15 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
map_length = btrfs_append_map_length(bbio, map_length);
if (map_length < length) {
- bbio = btrfs_split_bio(fs_info, bbio, map_length);
+ struct btrfs_bio *split;
+
+ split = btrfs_split_bio(fs_info, bbio, map_length);
+ if (IS_ERR(split)) {
+ ret = errno_to_blk_status(PTR_ERR(split));
+ btrfs_bio_counter_dec(fs_info);
+ goto end_bbio;
+ }
+ bbio = split;
bio = &bbio->bio;
}
@@ -718,8 +733,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
bio->bi_opf |= REQ_OP_ZONE_APPEND;
}
- if (is_data_bbio(bbio) && bioc &&
- btrfs_need_stripe_tree_update(bioc->fs_info, bioc->map_type)) {
+ if (is_data_bbio(bbio) && bioc && bioc->use_rst) {
/*
* No locking for the list update, as we only add to
* the list in the I/O submission path, and list
@@ -771,6 +785,7 @@ fail:
btrfs_bio_end_io(remaining, ret);
}
+end_bbio:
btrfs_bio_end_io(bbio, ret);
/* Do not submit another chunk */
return true;
diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h
index e48612340745..e2fe16074ad6 100644
--- a/fs/btrfs/bio.h
+++ b/fs/btrfs/bio.h
@@ -79,6 +79,9 @@ struct btrfs_bio {
/* File system that this I/O operates on. */
struct btrfs_fs_info *fs_info;
+ /* Save the first error status of split bio. */
+ blk_status_t status;
+
/*
* This member must come last, bio_alloc_bioset will allocate enough
* bytes for entire btrfs_bio but relies on bio being last.
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 7980b2e33a92..c0a8f7d92acc 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -173,43 +173,41 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
}
}
+static int btrfs_bg_start_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_block_group *new_bg =
+ rb_entry(new, struct btrfs_block_group, cache_node);
+ const struct btrfs_block_group *exist_bg =
+ rb_entry(exist, struct btrfs_block_group, cache_node);
+
+ if (new_bg->start < exist_bg->start)
+ return -1;
+ if (new_bg->start > exist_bg->start)
+ return 1;
+ return 0;
+}
+
/*
* This adds the block group to the fs_info rb tree for the block group cache
*/
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group *block_group)
{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct btrfs_block_group *cache;
- bool leftmost = true;
+ struct rb_node *exist;
+ int ret = 0;
ASSERT(block_group->length != 0);
write_lock(&info->block_group_cache_lock);
- p = &info->block_group_cache_tree.rb_root.rb_node;
-
- while (*p) {
- parent = *p;
- cache = rb_entry(parent, struct btrfs_block_group, cache_node);
- if (block_group->start < cache->start) {
- p = &(*p)->rb_left;
- } else if (block_group->start > cache->start) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- write_unlock(&info->block_group_cache_lock);
- return -EEXIST;
- }
- }
-
- rb_link_node(&block_group->cache_node, parent, p);
- rb_insert_color_cached(&block_group->cache_node,
- &info->block_group_cache_tree, leftmost);
+ exist = rb_find_add_cached(&block_group->cache_node,
+ &info->block_group_cache_tree, btrfs_bg_start_cmp);
+ if (exist)
+ ret = -EEXIST;
write_unlock(&info->block_group_cache_lock);
- return 0;
+ return ret;
}
/*
@@ -1223,7 +1221,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
block_group->space_info->total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
- btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
+ btrfs_space_info_update_bytes_zone_unusable(block_group->space_info,
-block_group->zone_unusable);
block_group->space_info->disk_total -= block_group->length * factor;
@@ -1396,8 +1394,7 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes to readonly */
sinfo->bytes_readonly += cache->zone_unusable;
- btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
- -cache->zone_unusable);
+ btrfs_space_info_update_bytes_zone_unusable(sinfo, -cache->zone_unusable);
cache->zone_unusable = 0;
}
cache->ro++;
@@ -1645,8 +1642,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- btrfs_space_info_update_bytes_pinned(fs_info, space_info,
- -block_group->pinned);
+ btrfs_space_info_update_bytes_pinned(space_info, -block_group->pinned);
space_info->bytes_readonly += block_group->pinned;
block_group->pinned = 0;
@@ -2672,7 +2668,6 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
- btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@@ -2797,7 +2792,7 @@ next:
* uncompressed data size, because the compression is only done
* when writeback triggered and we don't know how much space we
* are actually going to need, so we reserve the uncompressed
- * size because the data may be uncompressible in the worst case.
+ * size because the data may be incompressible in the worst case.
*/
if (ret == 0) {
bool used;
@@ -3060,8 +3055,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
(cache->alloc_offset - cache->used - cache->pinned -
cache->reserved) +
(cache->length - cache->zone_capacity);
- btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
- cache->zone_unusable);
+ btrfs_space_info_update_bytes_zone_unusable(sinfo, cache->zone_unusable);
sinfo->bytes_readonly -= cache->zone_unusable;
}
num_bytes = cache->length - cache->reserved -
@@ -3123,7 +3117,6 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
cache->global_root_id);
btrfs_set_stack_block_group_flags(&bgi, cache->flags);
write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
- btrfs_mark_buffer_dirty(trans, leaf);
fail:
btrfs_release_path(path);
/*
@@ -3699,7 +3692,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
+ btrfs_space_info_update_bytes_pinned(space_info, num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;
if (READ_ONCE(space_info->periodic_reclaim))
@@ -3781,8 +3774,7 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
space_info->bytes_reserved += num_bytes;
trace_btrfs_space_reservation(cache->fs_info, "space_info",
space_info->flags, num_bytes, 1);
- btrfs_space_info_update_bytes_may_use(cache->fs_info,
- space_info, -ram_bytes);
+ btrfs_space_info_update_bytes_may_use(space_info, -ram_bytes);
if (delalloc)
cache->delalloc_bytes += num_bytes;
@@ -3819,6 +3811,8 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
spin_lock(&cache->lock);
if (cache->ro)
space_info->bytes_readonly += num_bytes;
+ else if (btrfs_is_zoned(cache->fs_info))
+ space_info->bytes_zone_unusable += num_bytes;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
space_info->max_extent_size = 0;
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index a07b9594dc70..3f3608299c0b 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -150,9 +150,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
spin_unlock(&dest->lock);
}
if (num_bytes)
- btrfs_space_info_free_bytes_may_use(fs_info,
- space_info,
- num_bytes);
+ btrfs_space_info_free_bytes_may_use(space_info, num_bytes);
}
if (qgroup_to_release_ret)
*qgroup_to_release_ret = qgroup_to_release;
@@ -383,13 +381,11 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
if (block_rsv->reserved < block_rsv->size) {
num_bytes = block_rsv->size - block_rsv->reserved;
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
- num_bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, num_bytes);
block_rsv->reserved = block_rsv->size;
} else if (block_rsv->reserved > block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
- -num_bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, -num_bytes);
block_rsv->reserved = block_rsv->size;
btrfs_try_granting_tickets(fs_info, sinfo);
}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index e152fde888fc..b2fa33911c28 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -526,7 +526,7 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
u32 bio_offset, struct bio_vec *bv);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
struct btrfs_file_extent *file_extent,
- bool nowait, bool strict);
+ bool nowait);
void btrfs_del_delalloc_inode(struct btrfs_inode *inode);
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
@@ -577,7 +577,6 @@ void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state
struct extent_state *other);
void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *orig, u64 split);
-void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
void btrfs_evict_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
@@ -613,11 +612,17 @@ int btrfs_writepage_cow_fixup(struct folio *folio);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
- u64 file_offset, u64 disk_bytenr,
- u64 disk_io_size,
- struct page **pages);
+ u64 disk_bytenr, u64 disk_io_size,
+ struct page **pages, void *uring_ctx);
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
- struct btrfs_ioctl_encoded_io_args *encoded);
+ struct btrfs_ioctl_encoded_io_args *encoded,
+ struct extent_state **cached_state,
+ u64 *disk_bytenr, u64 *disk_io_size);
+ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
+ u64 start, u64 lockend,
+ struct extent_state **cached_state,
+ u64 disk_bytenr, u64 disk_io_size,
+ size_t count, bool compressed, bool *unlocked);
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
const struct btrfs_ioctl_encoded_io_args *encoded);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 90aef2627ca2..0c4d486c3048 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -453,7 +453,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (pg_index > end_index)
break;
- folio = __filemap_get_folio(mapping, pg_index, 0, 0);
+ folio = filemap_get_folio(mapping, pg_index);
if (!IS_ERR(folio)) {
u64 folio_sz = folio_size(folio);
u64 offset = offset_in_folio(folio, cur);
@@ -545,8 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page.
*/
if (fs_info->sectorsize < PAGE_SIZE)
- btrfs_subpage_start_reader(fs_info, folio, cur,
- add_size);
+ btrfs_folio_set_lock(fs_info, folio, cur, add_size);
folio_put(folio);
cur += add_size;
}
@@ -702,7 +701,7 @@ static void free_heuristic_ws(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *alloc_heuristic_ws(unsigned int level)
+static struct list_head *alloc_heuristic_ws(void)
{
struct heuristic_ws *ws;
@@ -744,9 +743,9 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
static struct list_head *alloc_workspace(int type, unsigned int level)
{
switch (type) {
- case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
+ case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws();
case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
- case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
+ case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace();
case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
default:
/*
@@ -1030,6 +1029,7 @@ int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping
{
int type = btrfs_compress_type(type_level);
int level = btrfs_compress_level(type_level);
+ const unsigned long orig_len = *total_out;
struct list_head *workspace;
int ret;
@@ -1037,6 +1037,8 @@ int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping
workspace = get_workspace(type, level);
ret = compression_compress_pages(type, workspace, mapping, start, folios,
out_folios, total_in, total_out);
+ /* The total read-in bytes should be no larger than the input. */
+ ASSERT(*total_in <= orig_len);
put_workspace(type, workspace);
return ret;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index b6563b6a333e..954034086d0d 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -175,7 +175,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
-struct list_head *lzo_alloc_workspace(unsigned int level);
+struct list_head *lzo_alloc_workspace(void);
void lzo_free_workspace(struct list_head *ws);
int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0cc919d15b14..92071ca0655f 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -37,19 +37,6 @@ static int push_node_left(struct btrfs_trans_handle *trans,
static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
-
-static const struct btrfs_csums {
- u16 size;
- const char name[10];
- const char driver[12];
-} btrfs_csums[] = {
- [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
- [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
- [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
- [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
- .driver = "blake2b-256" },
-};
-
/*
* The leaf data grows from end-to-front in the node. this returns the address
* of the start of the last item, which is the stop of the leaf data stack.
@@ -148,44 +135,6 @@ static inline void copy_leaf_items(const struct extent_buffer *dst,
nr_items * sizeof(struct btrfs_item));
}
-/* This exists for btrfs-progs usages. */
-u16 btrfs_csum_type_size(u16 type)
-{
- return btrfs_csums[type].size;
-}
-
-int btrfs_super_csum_size(const struct btrfs_super_block *s)
-{
- u16 t = btrfs_super_csum_type(s);
- /*
- * csum type is validated at mount time
- */
- return btrfs_csum_type_size(t);
-}
-
-const char *btrfs_super_csum_name(u16 csum_type)
-{
- /* csum type is validated at mount time */
- return btrfs_csums[csum_type].name;
-}
-
-/*
- * Return driver name if defined, otherwise the name that's also a valid driver
- * name
- */
-const char *btrfs_super_csum_driver(u16 csum_type)
-{
- /* csum type is validated at mount time */
- return btrfs_csums[csum_type].driver[0] ?
- btrfs_csums[csum_type].driver :
- btrfs_csums[csum_type].name;
-}
-
-size_t __attribute_const__ btrfs_get_num_csums(void)
-{
- return ARRAY_SIZE(btrfs_csums);
-}
-
struct btrfs_path *btrfs_alloc_path(void)
{
might_sleep();
@@ -226,22 +175,6 @@ noinline void btrfs_release_path(struct btrfs_path *p)
}
/*
- * We want the transaction abort to print stack trace only for errors where the
- * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
- * caused by external factors.
- */
-bool __cold abort_should_print_stack(int error)
-{
- switch (error) {
- case -EIO:
- case -EROFS:
- case -ENOMEM:
- return false;
- }
- return true;
-}
-
-/*
* safely gets a reference on the root node of a tree. A lock
* is not taken, so a concurrent writer may put a different node
* at the root of the tree. See btrfs_lock_root_node for the
@@ -654,6 +587,8 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
goto error_unlock_cow;
}
}
+
+ trace_btrfs_cow_block(root, buf, cow);
if (unlock_orig)
btrfs_tree_unlock(buf);
free_extent_buffer_stale(buf);
@@ -710,7 +645,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 search_start;
- int ret;
if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
btrfs_abort_transaction(trans, -EUCLEAN);
@@ -751,12 +685,8 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
* Also We don't care about the error, as it's handled internally.
*/
btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
- ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
- cow_ret, search_start, 0, nest);
-
- trace_btrfs_cow_block(root, buf, *cow_ret);
-
- return ret;
+ return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
+ cow_ret, search_start, 0, nest);
}
ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
@@ -1508,26 +1438,26 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
*/
static int
read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
- struct extent_buffer **eb_ret, int level, int slot,
+ struct extent_buffer **eb_ret, int slot,
const struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_tree_parent_check check = { 0 };
u64 blocknr;
- u64 gen;
- struct extent_buffer *tmp;
- int ret;
+ struct extent_buffer *tmp = NULL;
+ int ret = 0;
int parent_level;
- bool unlock_up;
+ int err;
+ bool read_tmp = false;
+ bool tmp_locked = false;
+ bool path_released = false;
- unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]);
blocknr = btrfs_node_blockptr(*eb_ret, slot);
- gen = btrfs_node_ptr_generation(*eb_ret, slot);
parent_level = btrfs_header_level(*eb_ret);
btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
check.has_first_key = true;
check.level = parent_level - 1;
- check.transid = gen;
+ check.transid = btrfs_node_ptr_generation(*eb_ret, slot);
check.owner_root = btrfs_root_id(root);
/*
@@ -1540,79 +1470,115 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
tmp = find_extent_buffer(fs_info, blocknr);
if (tmp) {
if (p->reada == READA_FORWARD_ALWAYS)
- reada_for_search(fs_info, p, level, slot, key->objectid);
+ reada_for_search(fs_info, p, parent_level, slot, key->objectid);
/* first we do an atomic uptodate check */
- if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
+ if (btrfs_buffer_uptodate(tmp, check.transid, 1) > 0) {
/*
* Do extra check for first_key, eb can be stale due to
* being cached, read from scrub, or have multiple
* parents (shared tree blocks).
*/
- if (btrfs_verify_level_key(tmp,
- parent_level - 1, &check.first_key, gen)) {
- free_extent_buffer(tmp);
- return -EUCLEAN;
+ if (btrfs_verify_level_key(tmp, &check)) {
+ ret = -EUCLEAN;
+ goto out;
}
*eb_ret = tmp;
- return 0;
+ tmp = NULL;
+ ret = 0;
+ goto out;
}
if (p->nowait) {
- free_extent_buffer(tmp);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
- if (unlock_up)
- btrfs_unlock_up_safe(p, level + 1);
-
- /* now we're allowed to do a blocking uptodate check */
- ret = btrfs_read_extent_buffer(tmp, &check);
- if (ret) {
- free_extent_buffer(tmp);
+ if (!p->skip_locking) {
+ btrfs_unlock_up_safe(p, parent_level + 1);
+ tmp_locked = true;
+ btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
- return ret;
+ ret = -EAGAIN;
+ path_released = true;
}
- if (unlock_up)
- ret = -EAGAIN;
+ /* Now we're allowed to do a blocking uptodate check. */
+ err = btrfs_read_extent_buffer(tmp, &check);
+ if (err) {
+ ret = err;
+ goto out;
+ }
+ if (ret == 0) {
+ ASSERT(!tmp_locked);
+ *eb_ret = tmp;
+ tmp = NULL;
+ }
goto out;
} else if (p->nowait) {
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
- if (unlock_up) {
- btrfs_unlock_up_safe(p, level + 1);
+ if (!p->skip_locking) {
+ btrfs_unlock_up_safe(p, parent_level + 1);
ret = -EAGAIN;
- } else {
- ret = 0;
}
if (p->reada != READA_NONE)
- reada_for_search(fs_info, p, level, slot, key->objectid);
+ reada_for_search(fs_info, p, parent_level, slot, key->objectid);
- tmp = read_tree_block(fs_info, blocknr, &check);
+ tmp = btrfs_find_create_tree_block(fs_info, blocknr, check.owner_root, check.level);
if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ tmp = NULL;
+ goto out;
+ }
+ read_tmp = true;
+
+ if (!p->skip_locking) {
+ ASSERT(ret == -EAGAIN);
+ tmp_locked = true;
+ btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
- return PTR_ERR(tmp);
+ path_released = true;
+ }
+
+ /* Now we're allowed to do a blocking uptodate check. */
+ err = btrfs_read_extent_buffer(tmp, &check);
+ if (err) {
+ ret = err;
+ goto out;
}
+
/*
* If the read above didn't mark this buffer up to date,
* it will never end up being up to date. Set ret to EIO now
* and give up so that our caller doesn't loop forever
* on our EAGAINs.
*/
- if (!extent_buffer_uptodate(tmp))
+ if (!extent_buffer_uptodate(tmp)) {
ret = -EIO;
+ goto out;
+ }
-out:
if (ret == 0) {
+ ASSERT(!tmp_locked);
*eb_ret = tmp;
- } else {
- free_extent_buffer(tmp);
- btrfs_release_path(p);
+ tmp = NULL;
}
+out:
+ if (tmp) {
+ if (tmp_locked)
+ btrfs_tree_read_unlock(tmp);
+ if (read_tmp && ret && ret != -EAGAIN)
+ free_extent_buffer_stale(tmp);
+ else
+ free_extent_buffer(tmp);
+ }
+ if (ret && !path_released)
+ btrfs_release_path(p);
return ret;
}
@@ -2010,7 +1976,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key, struct btrfs_path *p,
int ins_len, int cow)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info;
struct extent_buffer *b;
int slot;
int ret;
@@ -2023,6 +1989,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int min_write_lock_level;
int prev_cmp;
+ if (!root)
+ return -EINVAL;
+
+ fs_info = root->fs_info;
might_sleep();
lowest_level = p->lowest_level;
@@ -2197,8 +2167,8 @@ cow_done:
goto done;
}
- err = read_block_for_search(root, p, &b, level, slot, key);
- if (err == -EAGAIN)
+ err = read_block_for_search(root, p, &b, slot, key);
+ if (err == -EAGAIN && !p->nowait)
goto again;
if (err) {
ret = err;
@@ -2324,8 +2294,8 @@ again:
goto done;
}
- err = read_block_for_search(root, p, &b, level, slot, key);
- if (err == -EAGAIN)
+ err = read_block_for_search(root, p, &b, slot, key);
+ if (err == -EAGAIN && !p->nowait)
goto again;
if (err) {
ret = err;
@@ -2334,7 +2304,7 @@ again:
level = btrfs_header_level(b);
btrfs_tree_read_lock(b);
- b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
+ b = btrfs_tree_mod_log_rewind(fs_info, b, time_seq);
if (!b) {
ret = -ENOMEM;
goto done;
@@ -3863,6 +3833,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
+ key.type != BTRFS_RAID_STRIPE_KEY &&
key.type != BTRFS_EXTENT_CSUM_KEY);
if (btrfs_leaf_free_space(leaf) >= ins_len)
@@ -4930,8 +4901,7 @@ again:
}
next = c;
- ret = read_block_for_search(root, path, &next, level,
- slot, &key);
+ ret = read_block_for_search(root, path, &next, slot, &key);
if (ret == -EAGAIN && !path->nowait)
goto again;
@@ -4974,8 +4944,7 @@ again:
if (!level)
break;
- ret = read_block_for_search(root, path, &next, level,
- 0, &key);
+ ret = read_block_for_search(root, path, &next, 0, &key);
if (ret == -EAGAIN && !path->nowait)
goto again;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 317a3712270f..1096a80a64e7 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -7,7 +7,6 @@
#define BTRFS_CTREE_H
#include "linux/cleanup.h"
-#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/mutex.h>
@@ -371,6 +370,25 @@ static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transi
}
/*
+ * Return the generation this root started with.
+ *
+ * Every normal root that is created with root->root_key.offset set to it's
+ * originating generation. If it is a snapshot it is the generation when the
+ * snapshot was created.
+ *
+ * However for TREE_RELOC roots root_key.offset is the objectid of the owning
+ * tree root. Thankfully we copy the root item of the owning tree root, which
+ * has it's last_snapshot set to what we would have root_key.offset set to, so
+ * return that if this is a TREE_RELOC root.
+ */
+static inline u64 btrfs_root_origin_generation(const struct btrfs_root *root)
+{
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
+ return btrfs_root_last_snapshot(&root->root_item);
+ return root->root_key.offset;
+}
+
+/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
*/
@@ -487,20 +505,6 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
}
-#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
- ((bytes) >> (fs_info)->sectorsize_bits)
-
-static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
-{
- return mapping_gfp_constraint(mapping, ~__GFP_FS);
-}
-
-void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
-int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 num_bytes, u64 *actual_bytes);
-int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
-
-/* ctree.c */
int __init btrfs_ctree_init(void);
void __cold btrfs_ctree_exit(void);
@@ -737,23 +741,4 @@ static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
}
-u16 btrfs_csum_type_size(u16 type);
-int btrfs_super_csum_size(const struct btrfs_super_block *s);
-const char *btrfs_super_csum_name(u16 csum_type);
-const char *btrfs_super_csum_driver(u16 csum_type);
-size_t __attribute_const__ btrfs_get_num_csums(void);
-
-/*
- * We use page status Private2 to indicate there is an ordered extent with
- * unfinished IO.
- *
- * Rename the Private2 accessors to Ordered, to improve readability.
- */
-#define PageOrdered(page) PagePrivate2(page)
-#define SetPageOrdered(page) SetPagePrivate2(page)
-#define ClearPageOrdered(page) ClearPagePrivate2(page)
-#define folio_test_ordered(folio) folio_test_private_2(folio)
-#define folio_set_ordered(folio) folio_set_private_2(folio)
-#define folio_clear_ordered(folio) folio_clear_private_2(folio)
-
#endif
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index b95ef44c326b..968dae953948 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -763,12 +763,12 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
* We can get a merged extent, in that case, we need to re-search
* tree to get the original em for defrag.
*
- * If @newer_than is 0 or em::generation < newer_than, we can trust
- * this em, as either we don't care about the generation, or the
- * merged extent map will be rejected anyway.
+ * This is because even if we have adjacent extents that are contiguous
+ * and compatible (same type and flags), we still want to defrag them
+ * so that we use less metadata (extent items in the extent tree and
+ * file extent items in the inode's subvolume tree).
*/
- if (em && (em->flags & EXTENT_FLAG_MERGED) &&
- newer_than && em->generation >= newer_than) {
+ if (em && (em->flags & EXTENT_FLAG_MERGED)) {
free_extent_map(em);
em = NULL;
}
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 7aa8a395d838..88e900e5a43d 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -176,7 +176,7 @@ void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info,
ASSERT(IS_ALIGNED(len, fs_info->sectorsize));
data_sinfo = fs_info->data_sinfo;
- btrfs_space_info_free_bytes_may_use(fs_info, data_sinfo, len);
+ btrfs_space_info_free_bytes_may_use(data_sinfo, len);
}
/*
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 508bdbae29a0..0b4933c6a889 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -366,40 +366,35 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
return NULL;
}
+static int btrfs_delayed_item_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_delayed_item *new_item =
+ rb_entry(new, struct btrfs_delayed_item, rb_node);
+ const struct btrfs_delayed_item *exist_item =
+ rb_entry(exist, struct btrfs_delayed_item, rb_node);
+
+ if (new_item->index < exist_item->index)
+ return -1;
+ if (new_item->index > exist_item->index)
+ return 1;
+ return 0;
+}
+
static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
struct btrfs_delayed_item *ins)
{
- struct rb_node **p, *node;
- struct rb_node *parent_node = NULL;
struct rb_root_cached *root;
- struct btrfs_delayed_item *item;
- bool leftmost = true;
+ struct rb_node *exist;
if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
else
root = &delayed_node->del_root;
- p = &root->rb_root.rb_node;
- node = &ins->rb_node;
-
- while (*p) {
- parent_node = *p;
- item = rb_entry(parent_node, struct btrfs_delayed_item,
- rb_node);
-
- if (item->index < ins->index) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else if (item->index > ins->index) {
- p = &(*p)->rb_left;
- } else {
- return -EEXIST;
- }
- }
-
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
+ exist = rb_find_add_cached(&ins->rb_node, root, btrfs_delayed_item_cmp);
+ if (exist)
+ return -EEXIST;
if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
ins->index >= delayed_node->index_cnt)
@@ -1038,7 +1033,6 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
sizeof(struct btrfs_inode_item));
- btrfs_mark_buffer_dirty(trans, leaf);
if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
goto out;
@@ -1561,8 +1555,7 @@ release_node:
return ret;
}
-static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_node *node,
+static int btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node,
u64 index)
{
struct btrfs_delayed_item *item;
@@ -1620,7 +1613,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
if (IS_ERR(node))
return PTR_ERR(node);
- ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
+ ret = btrfs_delete_delayed_insertion_item(node, index);
if (!ret)
goto end;
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 7cfefdfe54ea..f4d9feac0d0e 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -64,9 +64,9 @@ struct btrfs_delayed_node {
struct mutex mutex;
struct btrfs_inode_item inode_item;
refcount_t refs;
+ int count;
u64 index_cnt;
unsigned long flags;
- int count;
/*
* The size of the next batch of dir index items to insert (if this
* node is from a directory inode). Protected by @mutex.
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ad9ef8312e41..98c5b61dabe8 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -9,6 +9,7 @@
#include "messages.h"
#include "ctree.h"
#include "delayed-ref.h"
+#include "extent-tree.h"
#include "transaction.h"
#include "qgroup.h"
#include "space-info.h"
@@ -92,6 +93,9 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
u64 num_bytes;
u64 reserved_bytes;
+ if (btrfs_is_testing(fs_info))
+ return;
+
num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
trans->delayed_ref_csum_deletions);
@@ -253,7 +257,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
spin_unlock(&block_rsv->lock);
if (to_free > 0)
- btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
+ btrfs_space_info_free_bytes_may_use(space_info, to_free);
if (refilled_bytes > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
@@ -264,8 +268,8 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
/*
* compare two delayed data backrefs with same bytenr and type
*/
-static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
- struct btrfs_delayed_ref_node *ref2)
+static int comp_data_refs(const struct btrfs_delayed_ref_node *ref1,
+ const struct btrfs_delayed_ref_node *ref2)
{
if (ref1->data_ref.objectid < ref2->data_ref.objectid)
return -1;
@@ -278,8 +282,8 @@ static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
return 0;
}
-static int comp_refs(struct btrfs_delayed_ref_node *ref1,
- struct btrfs_delayed_ref_node *ref2,
+static int comp_refs(const struct btrfs_delayed_ref_node *ref1,
+ const struct btrfs_delayed_ref_node *ref2,
bool check_seq)
{
int ret = 0;
@@ -298,7 +302,7 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
if (ref1->ref_root < ref2->ref_root)
return -1;
if (ref1->ref_root > ref2->ref_root)
- return -1;
+ return 1;
if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
ret = comp_data_refs(ref1, ref2);
}
@@ -313,142 +317,57 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
return 0;
}
-/* insert a new ref to head ref rbtree */
-static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
- struct rb_node *node)
+static int cmp_refs_node(const struct rb_node *new, const struct rb_node *exist)
{
- struct rb_node **p = &root->rb_root.rb_node;
- struct rb_node *parent_node = NULL;
- struct btrfs_delayed_ref_head *entry;
- struct btrfs_delayed_ref_head *ins;
- u64 bytenr;
- bool leftmost = true;
-
- ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
- bytenr = ins->bytenr;
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
- href_node);
-
- if (bytenr < entry->bytenr) {
- p = &(*p)->rb_left;
- } else if (bytenr > entry->bytenr) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- return entry;
- }
- }
+ const struct btrfs_delayed_ref_node *new_node =
+ rb_entry(new, struct btrfs_delayed_ref_node, ref_node);
+ const struct btrfs_delayed_ref_node *exist_node =
+ rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
- return NULL;
+ return comp_refs(new_node, exist_node, true);
}
static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
struct btrfs_delayed_ref_node *ins)
{
- struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *node = &ins->ref_node;
- struct rb_node *parent_node = NULL;
- struct btrfs_delayed_ref_node *entry;
- bool leftmost = true;
-
- while (*p) {
- int comp;
-
- parent_node = *p;
- entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
- ref_node);
- comp = comp_refs(ins, entry, true);
- if (comp < 0) {
- p = &(*p)->rb_left;
- } else if (comp > 0) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- return entry;
- }
- }
+ struct rb_node *exist;
- rb_link_node(node, parent_node, p);
- rb_insert_color_cached(node, root, leftmost);
+ exist = rb_find_add_cached(node, root, cmp_refs_node);
+ if (exist)
+ return rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
return NULL;
}
static struct btrfs_delayed_ref_head *find_first_ref_head(
struct btrfs_delayed_ref_root *dr)
{
- struct rb_node *n;
- struct btrfs_delayed_ref_head *entry;
+ unsigned long from = 0;
- n = rb_first_cached(&dr->href_root);
- if (!n)
- return NULL;
-
- entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
-
- return entry;
-}
+ lockdep_assert_held(&dr->lock);
-/*
- * Find a head entry based on bytenr. This returns the delayed ref head if it
- * was able to find one, or NULL if nothing was in that spot. If return_bigger
- * is given, the next bigger entry is returned if no exact match is found.
- */
-static struct btrfs_delayed_ref_head *find_ref_head(
- struct btrfs_delayed_ref_root *dr, u64 bytenr,
- bool return_bigger)
-{
- struct rb_root *root = &dr->href_root.rb_root;
- struct rb_node *n;
- struct btrfs_delayed_ref_head *entry;
-
- n = root->rb_node;
- entry = NULL;
- while (n) {
- entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
-
- if (bytenr < entry->bytenr)
- n = n->rb_left;
- else if (bytenr > entry->bytenr)
- n = n->rb_right;
- else
- return entry;
- }
- if (entry && return_bigger) {
- if (bytenr > entry->bytenr) {
- n = rb_next(&entry->href_node);
- if (!n)
- return NULL;
- entry = rb_entry(n, struct btrfs_delayed_ref_head,
- href_node);
- }
- return entry;
- }
- return NULL;
+ return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
}
-int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_head *head)
+static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
{
lockdep_assert_held(&delayed_refs->lock);
if (mutex_trylock(&head->mutex))
- return 0;
+ return true;
refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
spin_lock(&delayed_refs->lock);
- if (RB_EMPTY_NODE(&head->href_node)) {
+ if (!head->tracked) {
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
- return -EAGAIN;
+ return false;
}
btrfs_put_delayed_ref_head(head);
- return 0;
+ return true;
}
static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
@@ -462,7 +381,6 @@ static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
btrfs_put_delayed_ref(ref);
- atomic_dec(&delayed_refs->num_entries);
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
@@ -558,33 +476,31 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
}
struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ const struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs)
{
struct btrfs_delayed_ref_head *head;
+ unsigned long start_index;
+ unsigned long found_index;
+ bool found_head = false;
+ bool locked;
- lockdep_assert_held(&delayed_refs->lock);
+ spin_lock(&delayed_refs->lock);
again:
- head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
- true);
- if (!head && delayed_refs->run_delayed_start != 0) {
- delayed_refs->run_delayed_start = 0;
- head = find_first_ref_head(delayed_refs);
+ start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
+ xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
+ if (!head->processing) {
+ found_head = true;
+ break;
+ }
}
- if (!head)
- return NULL;
-
- while (head->processing) {
- struct rb_node *node;
-
- node = rb_next(&head->href_node);
- if (!node) {
- if (delayed_refs->run_delayed_start == 0)
- return NULL;
- delayed_refs->run_delayed_start = 0;
- goto again;
+ if (!found_head) {
+ if (delayed_refs->run_delayed_start == 0) {
+ spin_unlock(&delayed_refs->lock);
+ return NULL;
}
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
+ delayed_refs->run_delayed_start = 0;
+ goto again;
}
head->processing = true;
@@ -592,23 +508,73 @@ again:
delayed_refs->num_heads_ready--;
delayed_refs->run_delayed_start = head->bytenr +
head->num_bytes;
+
+ locked = btrfs_delayed_ref_lock(delayed_refs, head);
+ spin_unlock(&delayed_refs->lock);
+
+ /*
+ * We may have dropped the spin lock to get the head mutex lock, and
+ * that might have given someone else time to free the head. If that's
+ * true, it has been removed from our list and we can move on.
+ */
+ if (!locked)
+ return ERR_PTR(-EAGAIN);
+
return head;
}
-void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
+{
+ spin_lock(&delayed_refs->lock);
+ head->processing = false;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(head);
+}
+
+void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
+ const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
+
lockdep_assert_held(&delayed_refs->lock);
lockdep_assert_held(&head->lock);
- rb_erase_cached(&head->href_node, &delayed_refs->href_root);
- RB_CLEAR_NODE(&head->href_node);
- atomic_dec(&delayed_refs->num_entries);
+ xa_erase(&delayed_refs->head_refs, index);
+ head->tracked = false;
delayed_refs->num_heads--;
if (!head->processing)
delayed_refs->num_heads_ready--;
}
+struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_ref_node *ref;
+
+ lockdep_assert_held(&head->mutex);
+ lockdep_assert_held(&head->lock);
+
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
+ return NULL;
+
+ /*
+ * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
+ * This is to prevent a ref count from going down to zero, which deletes
+ * the extent item from the extent tree, when there still are references
+ * to add, which would fail because they would not find the extent item.
+ */
+ if (!list_empty(&head->ref_add_list))
+ return list_first_entry(&head->ref_add_list,
+ struct btrfs_delayed_ref_node, add_list);
+
+ ref = rb_entry(rb_first_cached(&head->ref_tree),
+ struct btrfs_delayed_ref_node, ref_node);
+ ASSERT(list_empty(&ref->add_list));
+ return ref;
+}
+
/*
* Helper to insert the ref_node to the tail or merge with tail.
*
@@ -629,7 +595,6 @@ static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
if (!exist) {
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
- atomic_inc(&root->num_entries);
spin_unlock(&href->lock);
trans->delayed_ref_updates++;
return false;
@@ -649,7 +614,7 @@ static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
&href->ref_add_list);
else if (ref->action == BTRFS_DROP_DELAYED_REF) {
ASSERT(!list_empty(&exist->add_list));
- list_del(&exist->add_list);
+ list_del_init(&exist->add_list);
} else {
ASSERT(0);
}
@@ -813,7 +778,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
head_ref->ref_tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&head_ref->ref_add_list);
- RB_CLEAR_NODE(&head_ref->href_node);
+ head_ref->tracked = false;
head_ref->processing = false;
head_ref->total_ref_mod = count_mod;
spin_lock_init(&head_ref->lock);
@@ -830,7 +795,6 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
qrecord->data_rsv = reserved;
qrecord->data_rsv_refroot = generic_ref->ref_root;
}
- qrecord->bytenr = generic_ref->bytenr;
qrecord->num_bytes = generic_ref->num_bytes;
qrecord->old_roots = NULL;
}
@@ -840,6 +804,8 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
* helper function to actually insert a head node into the rbtree.
* this does all the dirty work in terms of maintaining the correct
* overall modification count.
+ *
+ * Returns an error pointer in case of an error.
*/
static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle *trans,
@@ -847,31 +813,48 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord,
int action, bool *qrecord_inserted_ret)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
+ const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
bool qrecord_inserted = false;
delayed_refs = &trans->transaction->delayed_refs;
+ lockdep_assert_held(&delayed_refs->lock);
+
+#if BITS_PER_LONG == 32
+ if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
+ if (qrecord)
+ xa_release(&delayed_refs->dirty_extents, index);
+ btrfs_err_rl(fs_info,
+"delayed ref head %llu is beyond 32bit page cache and xarray index limit",
+ head_ref->bytenr);
+ btrfs_err_32bit_limit(fs_info);
+ return ERR_PTR(-EOVERFLOW);
+ }
+#endif
/* Record qgroup extent info if provided */
if (qrecord) {
int ret;
- ret = btrfs_qgroup_trace_extent_nolock(trans->fs_info,
- delayed_refs, qrecord);
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
+ head_ref->bytenr);
if (ret) {
/* Clean up if insertion fails or item exists. */
- xa_release(&delayed_refs->dirty_extents, qrecord->bytenr);
+ xa_release(&delayed_refs->dirty_extents, index);
+ /* Caller responsible for freeing qrecord on error. */
+ if (ret < 0)
+ return ERR_PTR(ret);
kfree(qrecord);
} else {
qrecord_inserted = true;
}
}
- trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
+ trace_add_delayed_ref_head(fs_info, head_ref, action);
- existing = htree_insert(&delayed_refs->href_root,
- &head_ref->href_node);
+ existing = xa_load(&delayed_refs->head_refs, index);
if (existing) {
update_existing_head_ref(trans, existing, head_ref);
/*
@@ -881,6 +864,19 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
+ existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
+ if (xa_is_err(existing)) {
+ /* Memory was preallocated by the caller. */
+ ASSERT(xa_err(existing) != -ENOMEM);
+ return ERR_PTR(xa_err(existing));
+ } else if (WARN_ON(existing)) {
+ /*
+ * Shouldn't happen we just did a lookup before under
+ * delayed_refs->lock.
+ */
+ return ERR_PTR(-EEXIST);
+ }
+ head_ref->tracked = true;
/*
* We reserve the amount of bytes needed to delete csums when
* adding the ref head and not when adding individual drop refs
@@ -890,12 +886,10 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
trans->delayed_ref_csum_deletions +=
- btrfs_csum_bytes_to_leaves(trans->fs_info,
- head_ref->num_bytes);
+ btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
}
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
- atomic_inc(&delayed_refs->num_entries);
}
if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted;
@@ -1000,42 +994,67 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_node *node;
struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_head *new_head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
+ const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
+ bool qrecord_reserved = false;
bool qrecord_inserted;
int action = generic_ref->action;
bool merged;
+ int ret;
node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
if (!node)
return -ENOMEM;
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
- if (!head_ref)
+ if (!head_ref) {
+ ret = -ENOMEM;
goto free_node;
+ }
+
+ delayed_refs = &trans->transaction->delayed_refs;
if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
- if (!record)
+ if (!record) {
+ ret = -ENOMEM;
goto free_head_ref;
- if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents,
- generic_ref->bytenr, GFP_NOFS))
+ }
+ if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
+ ret = -ENOMEM;
goto free_record;
+ }
+ qrecord_reserved = true;
+ }
+
+ ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
+ if (ret) {
+ if (qrecord_reserved)
+ xa_release(&delayed_refs->dirty_extents, index);
+ goto free_record;
}
init_delayed_ref_common(fs_info, node, generic_ref);
init_delayed_ref_head(head_ref, generic_ref, record, reserved);
head_ref->extent_op = extent_op;
- delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
/*
* insert both the head node and the new ref without dropping
* the spin lock
*/
- head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted);
+ new_head_ref = add_delayed_ref_head(trans, head_ref, record,
+ action, &qrecord_inserted);
+ if (IS_ERR(new_head_ref)) {
+ xa_release(&delayed_refs->head_refs, index);
+ spin_unlock(&delayed_refs->lock);
+ ret = PTR_ERR(new_head_ref);
+ goto free_record;
+ }
+ head_ref = new_head_ref;
merged = insert_delayed_ref(trans, head_ref, node);
spin_unlock(&delayed_refs->lock);
@@ -1054,7 +1073,7 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(trans, record);
+ return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
return 0;
free_record:
@@ -1063,7 +1082,7 @@ free_head_ref:
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
free_node:
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
- return -ENOMEM;
+ return ret;
}
/*
@@ -1093,7 +1112,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u8 level,
struct btrfs_delayed_extent_op *extent_op)
{
+ const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_head *head_ref_ret;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_ref generic_ref = {
.type = BTRFS_REF_METADATA,
@@ -1102,6 +1123,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
.num_bytes = num_bytes,
.tree_ref.level = level,
};
+ int ret;
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref)
@@ -1111,11 +1133,22 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
- NULL);
+ ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
+ if (ret) {
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ return ret;
+ }
+ spin_lock(&delayed_refs->lock);
+ head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
+ BTRFS_UPDATE_DELAYED_HEAD, NULL);
+ if (IS_ERR(head_ref_ret)) {
+ xa_release(&delayed_refs->head_refs, index);
+ spin_unlock(&delayed_refs->lock);
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ return PTR_ERR(head_ref_ret);
+ }
spin_unlock(&delayed_refs->lock);
/*
@@ -1139,11 +1172,15 @@ void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
* head node if found, or NULL if not.
*/
struct btrfs_delayed_ref_head *
-btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
+btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ u64 bytenr)
{
+ const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
+
lockdep_assert_held(&delayed_refs->lock);
- return find_ref_head(delayed_refs, bytenr, false);
+ return xa_load(&delayed_refs->head_refs, index);
}
static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
@@ -1213,6 +1250,84 @@ bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
return found;
}
+void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ bool testing = btrfs_is_testing(fs_info);
+
+ spin_lock(&delayed_refs->lock);
+ while (true) {
+ struct btrfs_delayed_ref_head *head;
+ struct rb_node *n;
+ bool pin_bytes = false;
+
+ head = find_first_ref_head(delayed_refs);
+ if (!head)
+ break;
+
+ if (!btrfs_delayed_ref_lock(delayed_refs, head))
+ continue;
+
+ spin_lock(&head->lock);
+ while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
+ struct btrfs_delayed_ref_node *ref;
+
+ ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
+ drop_delayed_ref(fs_info, delayed_refs, head, ref);
+ }
+ if (head->must_insert_reserved)
+ pin_bytes = true;
+ btrfs_free_delayed_extent_op(head->extent_op);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ mutex_unlock(&head->mutex);
+
+ if (!testing && pin_bytes) {
+ struct btrfs_block_group *bg;
+
+ bg = btrfs_lookup_block_group(fs_info, head->bytenr);
+ if (WARN_ON_ONCE(bg == NULL)) {
+ /*
+ * Unexpected and there's nothing we can do here
+ * because we are in a transaction abort path,
+ * so any errors can only be ignored or reported
+ * while attempting to cleanup all resources.
+ */
+ btrfs_err(fs_info,
+"block group for delayed ref at %llu was not found while destroying ref head",
+ head->bytenr);
+ } else {
+ spin_lock(&bg->space_info->lock);
+ spin_lock(&bg->lock);
+ bg->pinned += head->num_bytes;
+ btrfs_space_info_update_bytes_pinned(bg->space_info,
+ head->num_bytes);
+ bg->reserved -= head->num_bytes;
+ bg->space_info->bytes_reserved -= head->num_bytes;
+ spin_unlock(&bg->lock);
+ spin_unlock(&bg->space_info->lock);
+
+ btrfs_put_block_group(bg);
+ }
+
+ btrfs_error_unpin_extent_range(fs_info, head->bytenr,
+ head->bytenr + head->num_bytes - 1);
+ }
+ if (!testing)
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
+ btrfs_put_delayed_ref_head(head);
+ cond_resched();
+ spin_lock(&delayed_refs->lock);
+ }
+
+ if (!testing)
+ btrfs_qgroup_destroy_extent_records(trans);
+
+ spin_unlock(&delayed_refs->lock);
+}
+
void __cold btrfs_delayed_ref_exit(void)
{
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 085f30968aba..a35067cebb97 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -61,7 +61,8 @@ struct btrfs_delayed_ref_node {
/*
* If action is BTRFS_ADD_DELAYED_REF, also link this node to
* ref_head->ref_add_list, then we do not need to iterate the
- * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
+ * refs rbtree in the corresponding delayed ref head
+ * (struct btrfs_delayed_ref_head::ref_tree).
*/
struct list_head add_list;
@@ -123,12 +124,6 @@ struct btrfs_delayed_ref_head {
u64 bytenr;
u64 num_bytes;
/*
- * For insertion into struct btrfs_delayed_ref_root::href_root.
- * Keep it in the same cache line as 'bytenr' for more efficient
- * searches in the rbtree.
- */
- struct rb_node href_node;
- /*
* the mutex is held while running the refs, and it is also
* held when checking the sum of reference modifications.
*/
@@ -191,6 +186,11 @@ struct btrfs_delayed_ref_head {
bool is_data;
bool is_system;
bool processing;
+ /*
+ * Indicate if it's currently in the data structure that tracks head
+ * refs (struct btrfs_delayed_ref_root::head_refs).
+ */
+ bool tracked;
};
enum btrfs_delayed_ref_flags {
@@ -199,30 +199,52 @@ enum btrfs_delayed_ref_flags {
};
struct btrfs_delayed_ref_root {
- /* head ref rbtree */
- struct rb_root_cached href_root;
+ /*
+ * Track head references.
+ * The keys correspond to the logical address of the extent ("bytenr")
+ * right shifted by fs_info->sectorsize_bits. This is both to get a more
+ * dense index space (optimizes xarray structure) and because indexes in
+ * xarrays are of "unsigned long" type, meaning they are 32 bits wide on
+ * 32 bits platforms, limiting the extent range to 4G which is too low
+ * and makes it unusable (truncated index values) on 32 bits platforms.
+ * Protected by the spinlock 'lock' defined below.
+ */
+ struct xarray head_refs;
- /* Track dirty extent records. */
+ /*
+ * Track dirty extent records.
+ * The keys correspond to the logical address of the extent ("bytenr")
+ * right shifted by fs_info->sectorsize_bits, for same reasons as above.
+ */
struct xarray dirty_extents;
- /* this spin lock protects the rbtree and the entries inside */
- spinlock_t lock;
-
- /* how many delayed ref updates we've queued, used by the
- * throttling code
+ /*
+ * Protects the xarray head_refs, its entries and the following fields:
+ * num_heads, num_heads_ready, pending_csums and run_delayed_start.
*/
- atomic_t num_entries;
+ spinlock_t lock;
- /* total number of head nodes in tree */
+ /* Total number of head refs, protected by the spinlock 'lock'. */
unsigned long num_heads;
- /* total number of head nodes ready for processing */
+ /*
+ * Total number of head refs ready for processing, protected by the
+ * spinlock 'lock'.
+ */
unsigned long num_heads_ready;
+ /*
+ * Track space reserved for deleting csums of data extents.
+ * Protected by the spinlock 'lock'.
+ */
u64 pending_csums;
unsigned long flags;
+ /*
+ * Track from which bytenr to start searching ref heads.
+ * Protected by the spinlock 'lock'.
+ */
u64 run_delayed_start;
/*
@@ -364,19 +386,23 @@ void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *
-btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
u64 bytenr);
-int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_head *head);
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
mutex_unlock(&head->mutex);
}
-void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ const struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs);
+void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
+struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
@@ -391,6 +417,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
u64 root, u64 parent);
+void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans);
static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
{
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 83d5cdd77f29..f86fbea0b3de 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -45,7 +45,7 @@
*
* - Copy existing extents
*
- * This happens by re-using scrub facility, as scrub also iterates through
+ * This happens by reusing scrub facility, as scrub also iterates through
* existing extents from commit root.
*
* Location: scrub_write_block_to_dev_replace() from
@@ -440,9 +440,6 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
dev_replace->cursor_right);
dev_replace->item_needs_writeback = 0;
up_write(&dev_replace->rwsem);
-
- btrfs_mark_buffer_dirty(trans, eb);
-
out:
btrfs_free_path(path);
@@ -641,6 +638,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return ret;
down_write(&dev_replace->rwsem);
+ dev_replace->replace_task = current;
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
@@ -994,6 +992,7 @@ error:
list_add(&tgt_device->dev_alloc_list, &fs_devices->alloc_list);
fs_devices->rw_devices++;
+ dev_replace->replace_task = NULL;
up_write(&dev_replace->rwsem);
btrfs_rm_dev_replace_blocked(fs_info);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 001c0c2f872c..ccf91de29f80 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -27,7 +27,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
const char *name,
int name_len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *ptr;
struct extent_buffer *leaf;
@@ -35,7 +34,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (ret == -EEXIST) {
struct btrfs_dir_item *di;
- di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ di = btrfs_match_dir_item_name(path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
btrfs_extend_item(trans, path, data_size);
@@ -93,7 +92,6 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, name, name_ptr, name_len);
write_extent_buffer(leaf, data, data_ptr, data_len);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
return ret;
}
@@ -153,7 +151,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, name->name, name_ptr, name->len);
- btrfs_mark_buffer_dirty(trans, leaf);
second_insert:
/* FIXME, use some real flag for selecting the extra index */
@@ -190,7 +187,7 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
if (ret > 0)
return ERR_PTR(-ENOENT);
- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+ return btrfs_match_dir_item_name(path, name, name_len);
}
/*
@@ -341,14 +338,13 @@ btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path,
if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
break;
- di = btrfs_match_dir_item_name(root->fs_info, path,
- name->name, name->len);
+ di = btrfs_match_dir_item_name(path, name->name, name->len);
if (di)
return di;
}
/* Adjust return code if the key was not found in the next leaf. */
- if (ret > 0)
- ret = 0;
+ if (ret >= 0)
+ ret = -ENOENT;
return ERR_PTR(ret);
}
@@ -378,8 +374,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
- const struct btrfs_path *path,
+struct btrfs_dir_item *btrfs_match_dir_item_name(const struct btrfs_path *path,
const char *name, int name_len)
{
struct btrfs_dir_item *dir_item;
diff --git a/fs/btrfs/dir-item.h b/fs/btrfs/dir-item.h
index 5f6dfafc91f1..28d69970bc70 100644
--- a/fs/btrfs/dir-item.h
+++ b/fs/btrfs/dir-item.h
@@ -44,8 +44,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
- const struct btrfs_path *path,
+struct btrfs_dir_item *btrfs_match_dir_item_name(const struct btrfs_path *path,
const char *name,
int name_len);
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
index bd38df5647e3..8567af46e16f 100644
--- a/fs/btrfs/direct-io.c
+++ b/fs/btrfs/direct-io.c
@@ -248,8 +248,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
len = min(len, em->len - (start - em->start));
block_start = extent_map_block_start(em) + (start - em->start);
- if (can_nocow_extent(inode, start, &len,
- &file_extent, false, false) == 1) {
+ if (can_nocow_extent(inode, start, &len, &file_extent, false) == 1) {
bg = btrfs_inc_nocow_writers(fs_info, block_start);
if (bg)
can_nocow = true;
@@ -834,7 +833,7 @@ relock:
return ret;
}
- ret = btrfs_write_check(iocb, from, ret);
+ ret = btrfs_write_check(iocb, ret);
if (ret < 0) {
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
goto out;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4ad5db619b00..f09db62e61a1 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -226,7 +226,7 @@ int btrfs_read_extent_buffer(struct extent_buffer *eb,
while (1) {
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
+ ret = read_extent_buffer_pages(eb, mirror_num, check);
if (!ret)
break;
@@ -917,8 +917,7 @@ fail:
return ERR_PTR(ret);
}
-static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+static struct btrfs_root *alloc_log_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
@@ -966,7 +965,7 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
{
struct btrfs_root *log_root;
- log_root = alloc_log_tree(trans, fs_info);
+ log_root = alloc_log_tree(fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
@@ -992,7 +991,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *inode_item;
int ret;
- log_root = alloc_log_tree(trans, fs_info);
+ log_root = alloc_log_tree(fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
@@ -1259,6 +1258,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
{
struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
+ percpu_counter_destroy(&fs_info->stats_read_blocks);
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
percpu_counter_destroy(&fs_info->ordered_bytes);
@@ -1959,7 +1959,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
fs_info->qgroup_seq = 1;
fs_info->qgroup_ulist = NULL;
fs_info->qgroup_rescan_running = false;
- fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
+ fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
mutex_init(&fs_info->qgroup_rescan_lock);
}
@@ -2328,6 +2328,71 @@ out:
return ret;
}
+static int validate_sys_chunk_array(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_super_block *sb)
+{
+ unsigned int cur = 0; /* Offset inside the sys chunk array */
+ /*
+ * At sb read time, fs_info is not fully initialized. Thus we have
+ * to use super block sectorsize, which should have been validated.
+ */
+ const u32 sectorsize = btrfs_super_sectorsize(sb);
+ u32 sys_array_size = btrfs_super_sys_array_size(sb);
+
+ if (sys_array_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
+ btrfs_err(fs_info, "system chunk array too big %u > %u",
+ sys_array_size, BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
+ return -EUCLEAN;
+ }
+
+ while (cur < sys_array_size) {
+ struct btrfs_disk_key *disk_key;
+ struct btrfs_chunk *chunk;
+ struct btrfs_key key;
+ u64 type;
+ u16 num_stripes;
+ u32 len;
+ int ret;
+
+ disk_key = (struct btrfs_disk_key *)(sb->sys_chunk_array + cur);
+ len = sizeof(*disk_key);
+
+ if (cur + len > sys_array_size)
+ goto short_read;
+ cur += len;
+
+ btrfs_disk_key_to_cpu(&key, disk_key);
+ if (key.type != BTRFS_CHUNK_ITEM_KEY) {
+ btrfs_err(fs_info,
+ "unexpected item type %u in sys_array at offset %u",
+ key.type, cur);
+ return -EUCLEAN;
+ }
+ chunk = (struct btrfs_chunk *)(sb->sys_chunk_array + cur);
+ num_stripes = btrfs_stack_chunk_num_stripes(chunk);
+ if (cur + btrfs_chunk_item_size(num_stripes) > sys_array_size)
+ goto short_read;
+ type = btrfs_stack_chunk_type(chunk);
+ if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) {
+ btrfs_err(fs_info,
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur);
+ return -EUCLEAN;
+ }
+ ret = btrfs_check_chunk_valid(fs_info, NULL, chunk, key.offset,
+ sectorsize);
+ if (ret < 0)
+ return ret;
+ cur += btrfs_chunk_item_size(num_stripes);
+ }
+ return 0;
+short_read:
+ btrfs_err(fs_info,
+ "super block sys chunk array short read, cur=%u sys_array_size=%u",
+ cur, sys_array_size);
+ return -EUCLEAN;
+}
+
/*
* Real super block validation
* NOTE: super csum type and incompat features will not be checked here.
@@ -2496,6 +2561,8 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
+ ret = validate_sys_chunk_array(fs_info, sb);
+
/*
* Obvious sys_chunk_array corruptions, it must hold at least one key
* and one chunk
@@ -2786,6 +2853,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
btrfs_init_scrub(fs_info);
btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(fs_info);
+ btrfs_init_extent_map_shrinker_work(fs_info);
rwlock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT_CACHED;
@@ -2852,12 +2920,14 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (ret)
return ret;
- spin_lock_init(&fs_info->extent_map_shrinker_lock);
-
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret)
return ret;
+ ret = percpu_counter_init(&fs_info->stats_read_blocks, 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
fs_info->dirty_metadata_batch = PAGE_SIZE *
(1 + ilog2(nr_cpu_ids));
@@ -3202,8 +3272,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
return 0;
}
-int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
- const char *options)
+int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
{
u32 sectorsize;
u32 nodesize;
@@ -3324,6 +3393,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize;
+ fs_info->fs_devices->fs_info = fs_info;
/*
* Handle the space caching options appropriately now that we have the
@@ -4186,7 +4256,7 @@ static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
btrfs_warn(fs_info,
"transaction %llu (with %llu dirty metadata bytes) is not committed",
trans->transid, dirty_bytes);
- btrfs_cleanup_one_transaction(trans, fs_info);
+ btrfs_cleanup_one_transaction(trans);
if (trans == fs_info->running_transaction)
fs_info->running_transaction = NULL;
@@ -4265,6 +4335,15 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
* already the cleaner, but below we run all pending delayed iputs.
*/
btrfs_flush_workqueue(fs_info->fixup_workers);
+ /*
+ * Similar case here, we have to wait for delalloc workers before we
+ * proceed below and stop the cleaner kthread, otherwise we trigger a
+ * use-after-tree on the cleaner kthread task_struct when a delalloc
+ * worker running submit_compressed_extents() adds a delayed iput, which
+ * does a wake up on the cleaner kthread, which was already freed below
+ * when we call kthread_stop().
+ */
+ btrfs_flush_workqueue(fs_info->delalloc_workers);
/*
* After we parked the cleaner kthread, ordered extents may have
@@ -4294,6 +4373,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
cancel_work_sync(&fs_info->preempt_reclaim_work);
+ cancel_work_sync(&fs_info->em_shrinker_work);
/* Cancel or finish ongoing discard work */
btrfs_discard_cleanup(fs_info);
@@ -4531,75 +4611,6 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
}
-static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_fs_info *fs_info)
-{
- struct rb_node *node;
- struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
- struct btrfs_delayed_ref_node *ref;
-
- spin_lock(&delayed_refs->lock);
- while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
- struct btrfs_delayed_ref_head *head;
- struct rb_node *n;
- bool pin_bytes = false;
-
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
- if (btrfs_delayed_ref_lock(delayed_refs, head))
- continue;
-
- spin_lock(&head->lock);
- while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
- ref = rb_entry(n, struct btrfs_delayed_ref_node,
- ref_node);
- rb_erase_cached(&ref->ref_node, &head->ref_tree);
- RB_CLEAR_NODE(&ref->ref_node);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
- atomic_dec(&delayed_refs->num_entries);
- btrfs_put_delayed_ref(ref);
- btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
- }
- if (head->must_insert_reserved)
- pin_bytes = true;
- btrfs_free_delayed_extent_op(head->extent_op);
- btrfs_delete_ref_head(delayed_refs, head);
- spin_unlock(&head->lock);
- spin_unlock(&delayed_refs->lock);
- mutex_unlock(&head->mutex);
-
- if (pin_bytes) {
- struct btrfs_block_group *cache;
-
- cache = btrfs_lookup_block_group(fs_info, head->bytenr);
- BUG_ON(!cache);
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- cache->pinned += head->num_bytes;
- btrfs_space_info_update_bytes_pinned(fs_info,
- cache->space_info, head->num_bytes);
- cache->reserved -= head->num_bytes;
- cache->space_info->bytes_reserved -= head->num_bytes;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- btrfs_put_block_group(cache);
-
- btrfs_error_unpin_extent_range(fs_info, head->bytenr,
- head->bytenr + head->num_bytes - 1);
- }
- btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
- btrfs_put_delayed_ref_head(head);
- cond_resched();
- spin_lock(&delayed_refs->lock);
- }
- btrfs_qgroup_destroy_extent_records(trans);
-
- spin_unlock(&delayed_refs->lock);
-}
-
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
@@ -4805,9 +4816,9 @@ static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->fs_roots_radix_lock);
}
-void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
- struct btrfs_fs_info *fs_info)
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans)
{
+ struct btrfs_fs_info *fs_info = cur_trans->fs_info;
struct btrfs_device *dev, *tmp;
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
@@ -4819,7 +4830,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
list_del_init(&dev->post_commit_list);
}
- btrfs_destroy_delayed_refs(cur_trans, fs_info);
+ btrfs_destroy_delayed_refs(cur_trans);
cur_trans->state = TRANS_STATE_COMMIT_START;
wake_up(&fs_info->transaction_blocked_wait);
@@ -4865,7 +4876,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
} else {
spin_unlock(&fs_info->trans_lock);
}
- btrfs_cleanup_one_transaction(t, fs_info);
+ btrfs_cleanup_one_transaction(t);
spin_lock(&fs_info->trans_lock);
if (t == fs_info->running_transaction)
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 99af64d3f277..587842991b24 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -52,8 +52,7 @@ struct extent_buffer *btrfs_find_create_tree_block(
int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info);
int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
const struct btrfs_super_block *disk_sb);
-int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
- const char *options);
+int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices);
void __cold close_ctree(struct btrfs_fs_info *fs_info);
int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
const struct btrfs_super_block *sb, int mirror_num);
@@ -97,9 +96,6 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
/*
* This function is used to grab the root, and avoid it is freed when we
* access it. But it doesn't ensure that the tree is not dropped.
- *
- * If you want to ensure the whole tree is safe, you should use
- * fs_info->subvol_srcu
*/
static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
{
@@ -127,8 +123,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info);
-void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
- struct btrfs_fs_info *fs_info);
+void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid);
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a5966324607d..3014a1a23efd 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -182,7 +182,7 @@ search_again:
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
refcount_inc(&head->refs);
@@ -570,7 +570,6 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
}
}
- btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
fail:
btrfs_release_path(path);
@@ -618,7 +617,6 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
- btrfs_mark_buffer_dirty(trans, leaf);
}
return ret;
}
@@ -795,7 +793,6 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
if (insert) {
extra_size = btrfs_extent_inline_ref_size(want);
path->search_for_extension = 1;
- path->keep_locks = 1;
} else
extra_size = -1;
@@ -946,6 +943,25 @@ again:
ret = -EAGAIN;
goto out;
}
+
+ if (path->slots[0] + 1 < btrfs_header_nritems(path->nodes[0])) {
+ struct btrfs_key tmp_key;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &tmp_key, path->slots[0] + 1);
+ if (tmp_key.objectid == bytenr &&
+ tmp_key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ goto out_no_entry;
+ }
+
+ if (!path->keep_locks) {
+ btrfs_release_path(path);
+ path->keep_locks = 1;
+ goto again;
+ }
+
/*
* To add new inline back ref, we have to make sure
* there is no corresponding back ref item.
@@ -959,13 +975,15 @@ again:
goto out;
}
}
+out_no_entry:
*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
- if (insert) {
+ if (path->keep_locks) {
path->keep_locks = 0;
- path->search_for_extension = 0;
btrfs_unlock_up_safe(path, 1);
}
+ if (insert)
+ path->search_for_extension = 0;
return ret;
}
@@ -1030,7 +1048,6 @@ void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
} else {
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
- btrfs_mark_buffer_dirty(trans, leaf);
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1175,7 +1192,6 @@ static noinline_for_stack int update_inline_extent_backref(
item_size -= size;
btrfs_truncate_item(trans, path, item_size, 1);
}
- btrfs_mark_buffer_dirty(trans, leaf);
return 0;
}
@@ -1240,12 +1256,12 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
{
int j, ret = 0;
u64 bytes_left, end;
- u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
+ u64 aligned_start = ALIGN(start, SECTOR_SIZE);
/* Adjust the range to be aligned to 512B sectors if necessary. */
if (start != aligned_start) {
len -= aligned_start - start;
- len = round_down(len, 1 << SECTOR_SHIFT);
+ len = round_down(len, SECTOR_SIZE);
start = aligned_start;
}
@@ -1300,13 +1316,29 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
bytes_left = end - start;
}
- if (bytes_left) {
+ while (bytes_left) {
+ u64 bytes_to_discard = min(BTRFS_MAX_DISCARD_CHUNK_SIZE, bytes_left);
+
ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
- bytes_left >> SECTOR_SHIFT,
+ bytes_to_discard >> SECTOR_SHIFT,
GFP_NOFS);
- if (!ret)
- *discarded_bytes += bytes_left;
+
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ break;
+ continue;
+ }
+
+ start += bytes_to_discard;
+ bytes_left -= bytes_to_discard;
+ *discarded_bytes += bytes_to_discard;
+
+ if (btrfs_trim_interrupted()) {
+ ret = -ERESTARTSYS;
+ break;
+ }
}
+
return ret;
}
@@ -1491,7 +1523,6 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, item);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/* now insert the actual backref */
@@ -1675,8 +1706,6 @@ again:
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
-
- btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@@ -1767,40 +1796,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
return ret;
}
-static inline struct btrfs_delayed_ref_node *
-select_delayed_ref(struct btrfs_delayed_ref_head *head)
-{
- struct btrfs_delayed_ref_node *ref;
-
- if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
- return NULL;
-
- /*
- * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
- * This is to prevent a ref count from going down to zero, which deletes
- * the extent item from the extent tree, when there still are references
- * to add, which would fail because they would not find the extent item.
- */
- if (!list_empty(&head->ref_add_list))
- return list_first_entry(&head->ref_add_list,
- struct btrfs_delayed_ref_node, add_list);
-
- ref = rb_entry(rb_first_cached(&head->ref_tree),
- struct btrfs_delayed_ref_node, ref_node);
- ASSERT(list_empty(&ref->add_list));
- return ref;
-}
-
-static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_head *head)
-{
- spin_lock(&delayed_refs->lock);
- head->processing = false;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_delayed_ref_unlock(head);
-}
-
static struct btrfs_delayed_extent_op *cleanup_extent_op(
struct btrfs_delayed_ref_head *head)
{
@@ -1875,7 +1870,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
ret = run_and_cleanup_extent_op(trans, head);
if (ret < 0) {
- unselect_delayed_ref_head(delayed_refs, head);
+ btrfs_unselect_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
return ret;
} else if (ret) {
@@ -1894,7 +1889,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
spin_unlock(&delayed_refs->lock);
return 1;
}
- btrfs_delete_ref_head(delayed_refs, head);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
@@ -1917,39 +1912,6 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
return ret;
}
-static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
- struct btrfs_trans_handle *trans)
-{
- struct btrfs_delayed_ref_root *delayed_refs =
- &trans->transaction->delayed_refs;
- struct btrfs_delayed_ref_head *head = NULL;
- int ret;
-
- spin_lock(&delayed_refs->lock);
- head = btrfs_select_ref_head(delayed_refs);
- if (!head) {
- spin_unlock(&delayed_refs->lock);
- return head;
- }
-
- /*
- * Grab the lock that says we are going to process all the refs for
- * this head
- */
- ret = btrfs_delayed_ref_lock(delayed_refs, head);
- spin_unlock(&delayed_refs->lock);
-
- /*
- * We may have dropped the spin lock to get the head mutex lock, and
- * that might have given someone else time to free the head. If that's
- * true, it has been removed from our list and we can move on.
- */
- if (ret == -EAGAIN)
- head = ERR_PTR(-EAGAIN);
-
- return head;
-}
-
static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *locked_ref,
u64 *bytes_released)
@@ -1966,11 +1928,11 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
lockdep_assert_held(&locked_ref->mutex);
lockdep_assert_held(&locked_ref->lock);
- while ((ref = select_delayed_ref(locked_ref))) {
+ while ((ref = btrfs_select_delayed_ref(locked_ref))) {
if (ref->seq &&
btrfs_check_delayed_seq(fs_info, ref->seq)) {
spin_unlock(&locked_ref->lock);
- unselect_delayed_ref_head(delayed_refs, locked_ref);
+ btrfs_unselect_ref_head(delayed_refs, locked_ref);
return -EAGAIN;
}
@@ -1993,7 +1955,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
default:
WARN_ON(1);
}
- atomic_dec(&delayed_refs->num_entries);
/*
* Record the must_insert_reserved flag before we drop the
@@ -2019,7 +1980,7 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- unselect_delayed_ref_head(delayed_refs, locked_ref);
+ btrfs_unselect_ref_head(delayed_refs, locked_ref);
btrfs_put_delayed_ref(ref);
return ret;
}
@@ -2057,7 +2018,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
do {
if (!locked_ref) {
- locked_ref = btrfs_obtain_ref_head(trans);
+ locked_ref = btrfs_select_ref_head(fs_info, delayed_refs);
if (IS_ERR_OR_NULL(locked_ref)) {
if (PTR_ERR(locked_ref) == -EAGAIN) {
continue;
@@ -2204,7 +2165,7 @@ again:
btrfs_create_pending_block_groups(trans);
spin_lock(&delayed_refs->lock);
- if (RB_EMPTY_ROOT(&delayed_refs->href_root.rb_root)) {
+ if (xa_empty(&delayed_refs->head_refs)) {
spin_unlock(&delayed_refs->lock);
return 0;
}
@@ -2238,10 +2199,11 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
return ret;
}
-static noinline int check_delayed_ref(struct btrfs_root *root,
+static noinline int check_delayed_ref(struct btrfs_inode *inode,
struct btrfs_path *path,
- u64 objectid, u64 offset, u64 bytenr)
+ u64 offset, u64 bytenr)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -2259,7 +2221,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
delayed_refs = &cur_trans->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
btrfs_put_transaction(cur_trans);
@@ -2315,7 +2277,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
* then we have a cross reference.
*/
if (ref->ref_root != btrfs_root_id(root) ||
- ref_owner != objectid || ref_offset != offset) {
+ ref_owner != btrfs_ino(inode) || ref_offset != offset) {
ret = 1;
break;
}
@@ -2326,11 +2288,53 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
return ret;
}
-static noinline int check_committed_ref(struct btrfs_root *root,
+/*
+ * Check if there are references for a data extent other than the one belonging
+ * to the given inode and offset.
+ *
+ * @inode: The only inode we expect to find associated with the data extent.
+ * @path: A path to use for searching the extent tree.
+ * @offset: The only offset we expect to find associated with the data extent.
+ * @bytenr: The logical address of the data extent.
+ *
+ * When the extent does not have any other references other than the one we
+ * expect to find, we always return a value of 0 with the path having a locked
+ * leaf that contains the extent's extent item - this is necessary to ensure
+ * we don't race with a task running delayed references, and our caller must
+ * have such a path when calling check_delayed_ref() - it must lock a delayed
+ * ref head while holding the leaf locked. In case the extent item is not found
+ * in the extent tree, we return -ENOENT with the path having the leaf (locked)
+ * where the extent item should be, in order to prevent races with another task
+ * running delayed references, so that we don't miss any reference when calling
+ * check_delayed_ref().
+ *
+ * Note: this may return false positives, and this is because we want to be
+ * quick here as we're called in write paths (when flushing delalloc and
+ * in the direct IO write path). For example we can have an extent with
+ * a single reference but that reference is not inlined, or we may have
+ * many references in the extent tree but we also have delayed references
+ * that cancel all the reference except the one for our inode and offset,
+ * but it would be expensive to do such checks and complex due to all
+ * locking to avoid races between the checks and flushing delayed refs,
+ * plus non-inline references may be located on leaves other than the one
+ * that contains the extent item in the extent tree. The important thing
+ * here is to not return false negatives and that the false positives are
+ * not very common.
+ *
+ * Returns: 0 if there are no cross references and with the path having a locked
+ * leaf from the extent tree that contains the extent's extent item.
+ *
+ * 1 if there are cross references (false positives can happen).
+ *
+ * < 0 in case of an error. In case of -ENOENT the leaf in the extent
+ * tree where the extent item should be located at is read locked and
+ * accessible in the given path.
+ */
+static noinline int check_committed_ref(struct btrfs_inode *inode,
struct btrfs_path *path,
- u64 objectid, u64 offset, u64 bytenr,
- bool strict)
+ u64 offset, u64 bytenr)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
struct extent_buffer *leaf;
@@ -2349,35 +2353,32 @@ static noinline int check_committed_ref(struct btrfs_root *root,
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (ret == 0) {
/*
* Key with offset -1 found, there would have to exist an extent
* item with such offset, but this is out of the valid range.
*/
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
- ret = -ENOENT;
if (path->slots[0] == 0)
- goto out;
+ return -ENOENT;
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
- goto out;
+ return -ENOENT;
- ret = 1;
item_size = btrfs_item_size(leaf, path->slots[0]);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY);
/* No inline refs; we need to bail before checking for owner ref. */
if (item_size == sizeof(*ei))
- goto out;
+ return 1;
/* Check for an owner ref; skip over it to the real inline refs. */
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
@@ -2385,56 +2386,69 @@ static noinline int check_committed_ref(struct btrfs_root *root,
if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) {
expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
iref = (struct btrfs_extent_inline_ref *)(iref + 1);
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
}
/* If extent item has more than 1 inline ref then it's shared */
if (item_size != expected_size)
- goto out;
-
- /*
- * If extent created before last snapshot => it's shared unless the
- * snapshot has been deleted. Use the heuristic if strict is false.
- */
- if (!strict &&
- (btrfs_extent_generation(leaf, ei) <=
- btrfs_root_last_snapshot(&root->root_item)))
- goto out;
+ return 1;
/* If this extent has SHARED_DATA_REF then it's shared */
- type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
if (type != BTRFS_EXTENT_DATA_REF_KEY)
- goto out;
+ return 1;
ref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (btrfs_extent_refs(leaf, ei) !=
btrfs_extent_data_ref_count(leaf, ref) ||
btrfs_extent_data_ref_root(leaf, ref) != btrfs_root_id(root) ||
- btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
+ btrfs_extent_data_ref_objectid(leaf, ref) != btrfs_ino(inode) ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
- goto out;
+ return 1;
- ret = 0;
-out:
- return ret;
+ return 0;
}
-int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
- u64 bytenr, bool strict, struct btrfs_path *path)
+int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset,
+ u64 bytenr, struct btrfs_path *path)
{
int ret;
do {
- ret = check_committed_ref(root, path, objectid,
- offset, bytenr, strict);
+ ret = check_committed_ref(inode, path, offset, bytenr);
if (ret && ret != -ENOENT)
goto out;
- ret = check_delayed_ref(root, path, objectid, offset, bytenr);
- } while (ret == -EAGAIN);
+ /*
+ * The path must have a locked leaf from the extent tree where
+ * the extent item for our extent is located, in case it exists,
+ * or where it should be located in case it doesn't exist yet
+ * because it's new and its delayed ref was not yet flushed.
+ * We need to lock the delayed ref head at check_delayed_ref(),
+ * if one exists, while holding the leaf locked in order to not
+ * race with delayed ref flushing, missing references and
+ * incorrectly reporting that the extent is not shared.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
+ struct extent_buffer *leaf = path->nodes[0];
+
+ ASSERT(leaf != NULL);
+ btrfs_assert_tree_read_locked(leaf);
+
+ if (ret != -ENOENT) {
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ ASSERT(key.objectid == bytenr);
+ ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY);
+ }
+ }
+
+ ret = check_delayed_ref(inode, path, offset, bytenr);
+ } while (ret == -EAGAIN && !path->nowait);
out:
btrfs_release_path(path);
- if (btrfs_is_data_reloc_root(root))
+ if (btrfs_is_data_reloc_root(inode->root))
WARN_ON(ret > 0);
return ret;
}
@@ -2579,13 +2593,10 @@ static int pin_down_extent(struct btrfs_trans_handle *trans,
struct btrfs_block_group *cache,
u64 bytenr, u64 num_bytes, int reserved)
{
- struct btrfs_fs_info *fs_info = cache->fs_info;
-
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info,
- num_bytes);
+ btrfs_space_info_update_bytes_pinned(cache->space_info, num_bytes);
if (reserved) {
cache->reserved -= num_bytes;
cache->space_info->bytes_reserved -= num_bytes;
@@ -2732,15 +2743,15 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
{
struct btrfs_block_group *cache = NULL;
struct btrfs_space_info *space_info;
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_free_cluster *cluster = NULL;
- u64 len;
u64 total_unpinned = 0;
u64 empty_cluster = 0;
bool readonly;
int ret = 0;
while (start <= end) {
+ u64 len;
+
readonly = false;
if (!cache ||
start >= cache->start + cache->length) {
@@ -2786,37 +2797,19 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
cache->pinned -= len;
- btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
+ btrfs_space_info_update_bytes_pinned(space_info, -len);
space_info->max_extent_size = 0;
if (cache->ro) {
space_info->bytes_readonly += len;
readonly = true;
} else if (btrfs_is_zoned(fs_info)) {
/* Need reset before reusing in a zoned block group */
- btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
- len);
+ btrfs_space_info_update_bytes_zone_unusable(space_info, len);
readonly = true;
}
spin_unlock(&cache->lock);
- if (!readonly && return_free_space &&
- global_rsv->space_info == space_info) {
- spin_lock(&global_rsv->lock);
- if (!global_rsv->full) {
- u64 to_add = min(len, global_rsv->size -
- global_rsv->reserved);
-
- global_rsv->reserved += to_add;
- btrfs_space_info_update_bytes_may_use(fs_info,
- space_info, to_add);
- if (global_rsv->reserved >= global_rsv->size)
- global_rsv->full = 1;
- len -= to_add;
- }
- spin_unlock(&global_rsv->lock);
- }
- /* Add to any tickets we may have */
- if (!readonly && return_free_space && len)
- btrfs_try_granting_tickets(fs_info, space_info);
+ if (!readonly && return_free_space)
+ btrfs_return_free_space(space_info, len);
spin_unlock(&space_info->lock);
}
@@ -3128,7 +3121,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
break;
}
- /* Quick path didn't find the EXTEMT/METADATA_ITEM */
+ /* Quick path didn't find the EXTENT/METADATA_ITEM */
if (path->slots[0] - extent_slot > 5)
break;
extent_slot--;
@@ -3267,7 +3260,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
} else {
btrfs_set_extent_refs(leaf, ei, refs);
- btrfs_mark_buffer_dirty(trans, leaf);
}
if (found_extent) {
ret = remove_extent_backref(trans, extent_root, path,
@@ -3361,13 +3353,14 @@ out:
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
u64 bytenr)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
int ret = 0;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
if (!head)
goto out_delayed_unlock;
@@ -3385,7 +3378,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (!mutex_trylock(&head->mutex))
goto out;
- btrfs_delete_ref_head(delayed_refs, head);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
head->processing = false;
spin_unlock(&head->lock);
@@ -3395,7 +3388,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (head->must_insert_reserved)
ret = 1;
- btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
@@ -4834,7 +4827,6 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
}
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_free_path(path);
return alloc_reserved_extent(trans, ins->objectid, ins->offset);
@@ -4909,7 +4901,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
}
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
@@ -5254,7 +5245,7 @@ struct walk_control {
* corrupted file systems must have been caught before calling this function.
*/
static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *wc,
- struct extent_buffer *eb, u64 refs, u64 flags, int slot)
+ struct extent_buffer *eb, u64 flags, int slot)
{
struct btrfs_key key;
u64 generation;
@@ -5292,7 +5283,7 @@ static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *
* reference to it.
*/
generation = btrfs_node_ptr_generation(eb, slot);
- if (!wc->update_ref || generation <= root->root_key.offset)
+ if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
return false;
/*
@@ -5347,7 +5338,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
goto reada;
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset)
+ generation <= btrfs_root_origin_generation(root))
continue;
/* We don't lock the tree block, it's OK to be racy here */
@@ -5368,7 +5359,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
continue;
/* If we don't need to visit this node don't reada. */
- if (!visit_node_for_delete(root, wc, eb, refs, flags, slot))
+ if (!visit_node_for_delete(root, wc, eb, flags, slot))
continue;
reada:
btrfs_readahead_node_child(eb, slot);
@@ -5502,7 +5493,7 @@ again:
*/
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
if (!head)
goto out;
if (!mutex_trylock(&head->mutex)) {
@@ -5690,7 +5681,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
* for the subtree
*/
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset) {
+ generation <= btrfs_root_origin_generation(root)) {
wc->lookup_info = 1;
return 1;
}
@@ -5721,8 +5712,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
/* If we don't have to walk into this node skip it. */
if (!visit_node_for_delete(root, wc, path->nodes[level],
- wc->refs[level - 1], wc->flags[level - 1],
- path->slots[level]))
+ wc->flags[level - 1], path->slots[level]))
goto skip;
/*
@@ -6459,7 +6449,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
start += len;
*trimmed += bytes;
- if (fatal_signal_pending(current)) {
+ if (btrfs_trim_interrupted()) {
ret = -ERESTARTSYS;
break;
}
diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
index 2ad51130c037..cfa52264f678 100644
--- a/fs/btrfs/extent-tree.h
+++ b/fs/btrfs/extent-tree.h
@@ -116,8 +116,7 @@ int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
const struct extent_buffer *eb);
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
-int btrfs_cross_ref_exist(struct btrfs_root *root,
- u64 objectid, u64 offset, u64 bytenr, bool strict,
+int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset, u64 bytenr,
struct btrfs_path *path);
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -163,5 +162,9 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
struct extent_buffer *parent);
+void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
+ u64 num_bytes, u64 *actual_bytes);
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
#endif
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 39c9677c47d5..d9f856358704 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -190,7 +190,7 @@ static void process_one_folio(struct btrfs_fs_info *fs_info,
btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
- btrfs_folio_end_writer_lock(fs_info, folio, start, len);
+ btrfs_folio_end_lock(fs_info, folio, start, len);
}
static void __process_folios_contig(struct address_space *mapping,
@@ -198,9 +198,8 @@ static void __process_folios_contig(struct address_space *mapping,
u64 end, unsigned long page_ops)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
- pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
- pgoff_t index = start_index;
struct folio_batch fbatch;
int i;
@@ -221,7 +220,7 @@ static void __process_folios_contig(struct address_space *mapping,
}
}
-static noinline void __unlock_for_delalloc(const struct inode *inode,
+static noinline void unlock_delalloc_folio(const struct inode *inode,
const struct folio *locked_folio,
u64 start, u64 end)
{
@@ -242,9 +241,8 @@ static noinline int lock_delalloc_folios(struct inode *inode,
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct address_space *mapping = inode->i_mapping;
- pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
- pgoff_t index = start_index;
u64 processed_end = start;
struct folio_batch fbatch;
@@ -262,22 +260,23 @@ static noinline int lock_delalloc_folios(struct inode *inode,
for (i = 0; i < found_folios; i++) {
struct folio *folio = fbatch.folios[i];
- u32 len = end + 1 - start;
+ u64 range_start;
+ u32 range_len;
if (folio == locked_folio)
continue;
- if (btrfs_folio_start_writer_lock(fs_info, folio, start,
- len))
- goto out;
-
+ folio_lock(folio);
if (!folio_test_dirty(folio) || folio->mapping != mapping) {
- btrfs_folio_end_writer_lock(fs_info, folio, start,
- len);
+ folio_unlock(folio);
goto out;
}
+ range_start = max_t(u64, folio_pos(folio), start);
+ range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
+ end + 1) - range_start;
+ btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
- processed_end = folio_pos(folio) + folio_size(folio) - 1;
+ processed_end = range_start + range_len - 1;
}
folio_batch_release(&fbatch);
cond_resched();
@@ -287,8 +286,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
out:
folio_batch_release(&fbatch);
if (processed_end > start)
- __unlock_for_delalloc(inode, locked_folio, start,
- processed_end);
+ unlock_delalloc_folio(inode, locked_folio, start, processed_end);
return -EAGAIN;
}
@@ -389,7 +387,7 @@ again:
unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
- __unlock_for_delalloc(inode, locked_folio, delalloc_start,
+ unlock_delalloc_folio(inode, locked_folio, delalloc_start,
delalloc_end);
cond_resched();
goto again;
@@ -437,7 +435,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
if (!btrfs_is_subpage(fs_info, folio->mapping))
folio_unlock(folio);
else
- btrfs_subpage_end_reader(fs_info, folio, start, len);
+ btrfs_folio_end_lock(fs_info, folio, start, len);
}
/*
@@ -494,7 +492,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
return;
ASSERT(folio_test_private(folio));
- btrfs_subpage_start_reader(fs_info, folio, folio_pos(folio), PAGE_SIZE);
+ btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
}
/*
@@ -709,6 +707,7 @@ static void alloc_new_bio(struct btrfs_inode *inode,
bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
bio_ctrl->end_io_func, NULL);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
bbio->inode = inode;
bbio->file_offset = file_offset;
bio_ctrl->bbio = bbio;
@@ -785,7 +784,7 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
}
if (bio_ctrl->wbc)
- wbc_account_cgroup_owner(bio_ctrl->wbc, &folio->page,
+ wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
len);
size -= len;
@@ -861,11 +860,6 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb,
return ret;
}
-int set_page_extent_mapped(struct page *page)
-{
- return set_folio_extent_mapped(page_folio(page));
-}
-
int set_folio_extent_mapped(struct folio *folio)
{
struct btrfs_fs_info *fs_info;
@@ -900,9 +894,9 @@ void clear_folio_extent_mapped(struct folio *folio)
folio_detach_private(folio);
}
-static struct extent_map *__get_extent_map(struct inode *inode,
- struct folio *folio, u64 start,
- u64 len, struct extent_map **em_cached)
+static struct extent_map *get_extent_map(struct btrfs_inode *inode,
+ struct folio *folio, u64 start,
+ u64 len, struct extent_map **em_cached)
{
struct extent_map *em;
struct extent_state *cached_state = NULL;
@@ -921,14 +915,14 @@ static struct extent_map *__get_extent_map(struct inode *inode,
*em_cached = NULL;
}
- btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
- em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
+ btrfs_lock_and_flush_ordered_range(inode, start, start + len - 1, &cached_state);
+ em = btrfs_get_extent(inode, folio, start, len);
if (!IS_ERR(em)) {
BUG_ON(*em_cached);
refcount_inc(&em->refs);
*em_cached = em;
}
- unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
+ unlock_extent(&inode->io_tree, start, start + len - 1, &cached_state);
return em;
}
@@ -984,8 +978,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
end_folio_read(folio, true, cur, iosize);
break;
}
- em = __get_extent_map(inode, folio, cur, end - cur + 1,
- em_cached);
+ em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
if (IS_ERR(em)) {
end_folio_read(folio, false, cur, end + 1 - cur);
return PTR_ERR(em);
@@ -1101,15 +1094,59 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
return ret;
}
+static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
+ u64 start, u32 len)
+{
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+ const u64 folio_start = folio_pos(folio);
+ unsigned int start_bit;
+ unsigned int nbits;
+
+ ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
+ start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
+ nbits = len >> fs_info->sectorsize_bits;
+ ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
+ bitmap_set(delalloc_bitmap, start_bit, nbits);
+}
+
+static bool find_next_delalloc_bitmap(struct folio *folio,
+ unsigned long *delalloc_bitmap, u64 start,
+ u64 *found_start, u32 *found_len)
+{
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+ const u64 folio_start = folio_pos(folio);
+ const unsigned int bitmap_size = fs_info->sectors_per_page;
+ unsigned int start_bit;
+ unsigned int first_zero;
+ unsigned int first_set;
+
+ ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
+
+ start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
+ first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
+ if (first_set >= bitmap_size)
+ return false;
+
+ *found_start = folio_start + (first_set << fs_info->sectorsize_bits);
+ first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
+ *found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
+ return true;
+}
+
/*
- * helper for extent_writepage(), doing all of the delayed allocation setup.
+ * Do all of the delayed allocation setup.
+ *
+ * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
+ * The @folio should no longer be touched (treat it as already unlocked).
*
- * This returns 1 if btrfs_run_delalloc_range function did all the work required
- * to write the page (copy into inline extent). In this case the IO has
- * been started and the page is already unlocked.
+ * Return 0 if there is still dirty block that needs to be submitted through
+ * extent_writepage_io().
+ * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
+ * submitted, and @folio is still kept locked.
*
- * This returns 0 if all went well (page still locked)
- * This returns < 0 if there were errors (page still locked)
+ * Return <0 if there is any error hit.
+ * Any allocated ordered extent range covering this folio will be marked
+ * finished (IOERR), and @folio is still kept locked.
*/
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
struct folio *folio,
@@ -1120,16 +1157,28 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
const u64 page_start = folio_pos(folio);
const u64 page_end = page_start + folio_size(folio) - 1;
+ unsigned long delalloc_bitmap = 0;
/*
* Save the last found delalloc end. As the delalloc end can go beyond
* page boundary, thus we cannot rely on subpage bitmap to locate the
* last delalloc end.
*/
u64 last_delalloc_end = 0;
+ /*
+ * The range end (exclusive) of the last successfully finished delalloc
+ * range.
+ * Any range covered by ordered extent must either be manually marked
+ * finished (error handling), or has IO submitted (and finish the
+ * ordered extent normally).
+ *
+ * This records the end of ordered extent cleanup if we hit an error.
+ */
+ u64 last_finished_delalloc_end = page_start;
u64 delalloc_start = page_start;
u64 delalloc_end = page_end;
u64 delalloc_to_write = 0;
int ret = 0;
+ int bit;
/* Save the dirty bitmap as our submission bitmap will be a subset of it. */
if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
@@ -1139,6 +1188,12 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
bio_ctrl->submit_bitmap = 1;
}
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
+ u64 start = page_start + (bit << fs_info->sectorsize_bits);
+
+ btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
+ }
+
/* Lock all (subpage) delalloc ranges inside the folio first. */
while (delalloc_start < page_end) {
delalloc_end = page_end;
@@ -1147,9 +1202,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
delalloc_start = delalloc_end + 1;
continue;
}
- btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start,
- min(delalloc_end, page_end) + 1 -
- delalloc_start);
+ set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
+ min(delalloc_end, page_end) + 1 - delalloc_start);
last_delalloc_end = delalloc_end;
delalloc_start = delalloc_end + 1;
}
@@ -1174,7 +1228,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
found_len = last_delalloc_end + 1 - found_start;
found = true;
} else {
- found = btrfs_subpage_find_writer_locked(fs_info, folio,
+ found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
delalloc_start, &found_start, &found_len);
}
if (!found)
@@ -1188,11 +1242,28 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
found_len = last_delalloc_end + 1 - found_start;
if (ret >= 0) {
+ /*
+ * Some delalloc range may be created by previous folios.
+ * Thus we still need to clean up this range during error
+ * handling.
+ */
+ last_finished_delalloc_end = found_start;
/* No errors hit so far, run the current delalloc range. */
ret = btrfs_run_delalloc_range(inode, folio,
found_start,
found_start + found_len - 1,
wbc);
+ if (ret >= 0)
+ last_finished_delalloc_end = found_start + found_len;
+ if (unlikely(ret < 0))
+ btrfs_err_rl(fs_info,
+"failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d",
+ btrfs_root_id(inode->root),
+ btrfs_ino(inode),
+ folio_pos(folio),
+ fs_info->sectors_per_page,
+ &bio_ctrl->submit_bitmap,
+ found_start, found_len, ret);
} else {
/*
* We've hit an error during previous delalloc range,
@@ -1200,7 +1271,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
*/
unlock_extent(&inode->io_tree, found_start,
found_start + found_len - 1, NULL);
- __unlock_for_delalloc(&inode->vfs_inode, folio,
+ unlock_delalloc_folio(&inode->vfs_inode, folio,
found_start,
found_start + found_len - 1);
}
@@ -1227,8 +1298,22 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
delalloc_start = found_start + found_len;
}
- if (ret < 0)
+ /*
+ * It's possible we had some ordered extents created before we hit
+ * an error, cleanup non-async successfully created delalloc ranges.
+ */
+ if (unlikely(ret < 0)) {
+ unsigned int bitmap_size = min(
+ (last_finished_delalloc_end - page_start) >>
+ fs_info->sectorsize_bits,
+ fs_info->sectors_per_page);
+
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
+ btrfs_mark_ordered_io_finished(inode, folio,
+ page_start + (bit << fs_info->sectorsize_bits),
+ fs_info->sectorsize, false);
return ret;
+ }
out:
if (last_delalloc_end)
delalloc_end = last_delalloc_end;
@@ -1288,7 +1373,7 @@ static int submit_one_sector(struct btrfs_inode *inode,
em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
if (IS_ERR(em))
- return PTR_ERR_OR_ZERO(em);
+ return PTR_ERR(em);
extent_offset = filepos - em->start;
em_end = extent_map_end(em);
@@ -1306,7 +1391,14 @@ static int submit_one_sector(struct btrfs_inode *inode,
free_extent_map(em);
em = NULL;
- btrfs_set_range_writeback(inode, filepos, filepos + sectorsize - 1);
+ /*
+ * Although the PageDirty bit is cleared before entering this
+ * function, subpage dirty bit is not cleared.
+ * So clear subpage dirty bit here so next time we won't submit
+ * a folio for a range already written to disk.
+ */
+ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
+ btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
/*
* Above call should set the whole folio with writeback flag, even
* just for a single subpage sector.
@@ -1315,13 +1407,6 @@ static int submit_one_sector(struct btrfs_inode *inode,
*/
ASSERT(folio_test_writeback(folio));
- /*
- * Although the PageDirty bit is cleared before entering this
- * function, subpage dirty bit is not cleared.
- * So clear subpage dirty bit here so next time we won't submit
- * folio for range already written to disk.
- */
- btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
submit_extent_folio(bio_ctrl, disk_bytenr, folio,
sectorsize, filepos - folio_pos(folio));
return 0;
@@ -1344,6 +1429,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long range_bitmap = 0;
bool submitted_io = false;
+ bool error = false;
const u64 folio_start = folio_pos(folio);
u64 cur;
int bit;
@@ -1386,13 +1472,26 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
break;
}
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
- if (ret < 0)
- goto out;
+ if (unlikely(ret < 0)) {
+ /*
+ * bio_ctrl may contain a bio crossing several folios.
+ * Submit it immediately so that the bio has a chance
+ * to finish normally, other than marked as error.
+ */
+ submit_one_bio(bio_ctrl);
+ /*
+ * Failed to grab the extent map which should be very rare.
+ * Since there is no bio submitted to finish the ordered
+ * extent, we have to manually finish this sector.
+ */
+ btrfs_mark_ordered_io_finished(inode, folio, cur,
+ fs_info->sectorsize, false);
+ error = true;
+ continue;
+ }
submitted_io = true;
}
- btrfs_folio_assert_not_dirty(fs_info, folio, start, len);
-out:
/*
* If we didn't submitted any sector (>= i_size), folio dirty get
* cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
@@ -1400,8 +1499,11 @@ out:
*
* Here we set writeback and clear for the range. If the full folio
* is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
+ *
+ * If we hit any error, the corresponding sector will still be dirty
+ * thus no need to clear PAGECACHE_TAG_DIRTY.
*/
- if (!submitted_io) {
+ if (!submitted_io && !error) {
btrfs_folio_set_writeback(fs_info, folio, start, len);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
}
@@ -1419,15 +1521,14 @@ out:
*/
static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
{
- struct inode *inode = folio->mapping->host;
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- const u64 page_start = folio_pos(folio);
+ struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
size_t pg_offset;
- loff_t i_size = i_size_read(inode);
+ loff_t i_size = i_size_read(&inode->vfs_inode);
unsigned long end_index = i_size >> PAGE_SHIFT;
- trace_extent_writepage(folio, inode, bio_ctrl->wbc);
+ trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
WARN_ON(!folio_test_locked(folio));
@@ -1451,41 +1552,37 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
if (ret < 0)
goto done;
- ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
+ ret = writepage_delalloc(inode, folio, bio_ctrl);
if (ret == 1)
return 0;
if (ret)
goto done;
- ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
+ ret = extent_writepage_io(inode, folio, folio_pos(folio),
PAGE_SIZE, bio_ctrl, i_size);
if (ret == 1)
return 0;
+ if (ret < 0)
+ btrfs_err_rl(fs_info,
+"failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d",
+ btrfs_root_id(inode->root), btrfs_ino(inode),
+ folio_pos(folio), fs_info->sectors_per_page,
+ &bio_ctrl->submit_bitmap, ret);
bio_ctrl->wbc->nr_to_write--;
done:
- if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
- page_start, PAGE_SIZE, !ret);
+ if (ret < 0)
mapping_set_error(folio->mapping, ret);
- }
-
/*
* Only unlock ranges that are submitted. As there can be some async
* submitted ranges inside the folio.
*/
- btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
+ btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
ASSERT(ret <= 0);
return ret;
}
-void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
-{
- wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
- TASK_UNINTERRUPTIBLE);
-}
-
/*
* Lock extent buffer status and pages for writeback.
*
@@ -1626,11 +1723,10 @@ static void end_bbio_meta_write(struct btrfs_bio *bbio)
{
struct extent_buffer *eb = bbio->private;
struct btrfs_fs_info *fs_info = eb->fs_info;
- bool uptodate = !bbio->bio.bi_status;
struct folio_iter fi;
u32 bio_offset = 0;
- if (!uptodate)
+ if (bbio->bio.bi_status != BLK_STS_OK)
set_btree_ioerr(eb);
bio_for_each_folio_all(fi, &bbio->bio) {
@@ -1707,7 +1803,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
ret = bio_add_folio(&bbio->bio, folio, eb->len,
eb->start - folio_pos(folio));
ASSERT(ret);
- wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
+ wbc_account_cgroup_owner(wbc, folio, eb->len);
folio_unlock(folio);
} else {
int num_folios = num_extent_folios(eb);
@@ -1721,8 +1817,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
folio_start_writeback(folio);
ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
ASSERT(ret);
- wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
- eb->folio_size);
+ wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
wbc->nr_to_write -= folio_nr_pages(folio);
folio_unlock(folio);
}
@@ -2115,7 +2210,27 @@ retry:
continue;
}
- if (wbc->sync_mode != WB_SYNC_NONE) {
+ /*
+ * For subpage case, compression can lead to mixed
+ * writeback and dirty flags, e.g:
+ * 0 32K 64K 96K 128K
+ * | |//////||/////| |//|
+ *
+ * In above case, [32K, 96K) is asynchronously submitted
+ * for compression, and [124K, 128K) needs to be written back.
+ *
+ * If we didn't wait wrtiteback for page 64K, [128K, 128K)
+ * won't be submitted as the page still has writeback flag
+ * and will be skipped in the next check.
+ *
+ * This mixed writeback and dirty case is only possible for
+ * subpage case.
+ *
+ * TODO: Remove this check after migrating compression to
+ * regular submission.
+ */
+ if (wbc->sync_mode != WB_SYNC_NONE ||
+ btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
if (folio_test_writeback(folio))
submit_write_bio(bio_ctrl, 0);
folio_wait_writeback(folio);
@@ -2200,7 +2315,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
u32 cur_len = cur_end + 1 - cur;
struct folio *folio;
- folio = __filemap_get_folio(mapping, cur >> PAGE_SHIFT, 0, 0);
+ folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
/*
* This shouldn't happen, the pages are pinned and locked, this
@@ -2228,12 +2343,9 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
if (ret == 1)
goto next_page;
- if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
- cur, cur_len, !ret);
+ if (ret)
mapping_set_error(mapping, ret);
- }
- btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
+ btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
if (ret < 0)
found_error = true;
next_page:
@@ -2317,7 +2429,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
* to drop the page.
*/
static bool try_release_extent_state(struct extent_io_tree *tree,
- struct folio *folio, gfp_t mask)
+ struct folio *folio)
{
u64 start = folio_pos(folio);
u64 end = start + PAGE_SIZE - 1;
@@ -2428,12 +2540,7 @@ next:
cond_resched();
}
}
- return try_release_extent_state(io_tree, folio, mask);
-}
-
-static void __free_extent_buffer(struct extent_buffer *eb)
-{
- kmem_cache_free(extent_buffer_cache, eb);
+ return try_release_extent_state(io_tree, folio);
}
static int extent_buffer_under_io(const struct extent_buffer *eb)
@@ -2442,7 +2549,7 @@ static int extent_buffer_under_io(const struct extent_buffer *eb)
test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}
-static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
+static bool folio_range_has_eb(struct folio *folio)
{
struct btrfs_subpage *subpage;
@@ -2452,12 +2559,6 @@ static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *foli
subpage = folio_get_private(folio);
if (atomic_read(&subpage->eb_refs))
return true;
- /*
- * Even there is no eb refs here, we may still have
- * end_folio_read() call relying on page::private.
- */
- if (atomic_read(&subpage->readers))
- return true;
}
return false;
}
@@ -2516,14 +2617,14 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo
* We can only detach the folio private if there are no other ebs in the
* page range and no unfinished IO.
*/
- if (!folio_range_has_eb(fs_info, folio))
+ if (!folio_range_has_eb(folio))
btrfs_detach_subpage(fs_info, folio);
spin_unlock(&folio->mapping->i_private_lock);
}
-/* Release all pages attached to the extent buffer */
-static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
+/* Release all folios attached to the extent buffer */
+static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb)
{
ASSERT(!extent_buffer_under_io(eb));
@@ -2545,9 +2646,9 @@ static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
- btrfs_release_extent_buffer_pages(eb);
+ btrfs_release_extent_buffer_folios(eb);
btrfs_leak_debug_del_eb(eb);
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
}
static struct extent_buffer *
@@ -2645,7 +2746,7 @@ err:
folio_put(eb->folios[i]);
}
}
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
return NULL;
}
@@ -2772,13 +2873,12 @@ free_eb:
}
#endif
-static struct extent_buffer *grab_extent_buffer(
- struct btrfs_fs_info *fs_info, struct page *page)
+static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
+ struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct extent_buffer *exists;
- lockdep_assert_held(&page->mapping->i_private_lock);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
/*
* For subpage case, we completely rely on radix tree to ensure we
@@ -2793,7 +2893,7 @@ static struct extent_buffer *grab_extent_buffer(
return NULL;
/*
- * We could have already allocated an eb for this page and attached one
+ * We could have already allocated an eb for this folio and attached one
* so lets see if we can get a ref on the existing eb, and if we can we
* know it's good and we can just return that one, else we know we can
* just overwrite folio private.
@@ -2802,16 +2902,19 @@ static struct extent_buffer *grab_extent_buffer(
if (atomic_inc_not_zero(&exists->refs))
return exists;
- WARN_ON(PageDirty(page));
+ WARN_ON(folio_test_dirty(folio));
folio_detach_private(folio);
return NULL;
}
-static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
+/*
+ * Validate alignment constraints of eb at logical address @start.
+ */
+static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
{
if (!IS_ALIGNED(start, fs_info->sectorsize)) {
btrfs_err(fs_info, "bad tree block start %llu", start);
- return -EINVAL;
+ return true;
}
if (fs_info->nodesize < PAGE_SIZE &&
@@ -2819,14 +2922,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
btrfs_err(fs_info,
"tree block crosses page boundary, start %llu nodesize %u",
start, fs_info->nodesize);
- return -EINVAL;
+ return true;
}
if (fs_info->nodesize >= PAGE_SIZE &&
!PAGE_ALIGNED(start)) {
btrfs_err(fs_info,
"tree block is not page aligned, start %llu nodesize %u",
start, fs_info->nodesize);
- return -EINVAL;
+ return true;
}
if (!IS_ALIGNED(start, fs_info->nodesize) &&
!test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
@@ -2834,10 +2937,9 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
start, fs_info->nodesize);
}
- return 0;
+ return false;
}
-
/*
* Return 0 if eb->folios[i] is attached to btree inode successfully.
* Return >0 if there is already another extent buffer for the range,
@@ -2893,8 +2995,7 @@ finish:
} else if (existing_folio) {
struct extent_buffer *existing_eb;
- existing_eb = grab_extent_buffer(fs_info,
- folio_page(existing_folio, 0));
+ existing_eb = grab_extent_buffer(fs_info, existing_folio);
if (existing_eb) {
/* The extent buffer still exists, we can use it directly. */
*found_eb_ret = existing_eb;
@@ -3091,7 +3192,7 @@ again:
* live buffer and won't free them prematurely.
*/
for (int i = 0; i < num_folios; i++)
- unlock_page(folio_page(eb->folios[i], 0));
+ folio_unlock(eb->folios[i]);
return eb;
out:
@@ -3115,13 +3216,13 @@ out:
for (int i = 0; i < attached; i++) {
ASSERT(eb->folios[i]);
detach_extent_buffer_folio(eb, eb->folios[i]);
- unlock_page(folio_page(eb->folios[i], 0));
+ folio_unlock(eb->folios[i]);
folio_put(eb->folios[i]);
eb->folios[i] = NULL;
}
/*
* Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
- * so it can be cleaned up without utlizing page->mapping.
+ * so it can be cleaned up without utilizing page->mapping.
*/
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
@@ -3137,7 +3238,7 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
struct extent_buffer *eb =
container_of(head, struct extent_buffer, rcu_head);
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
}
static int release_extent_buffer(struct extent_buffer *eb)
@@ -3161,11 +3262,11 @@ static int release_extent_buffer(struct extent_buffer *eb)
}
btrfs_leak_debug_del_eb(eb);
- /* Should be safe to release our pages at this point */
- btrfs_release_extent_buffer_pages(eb);
+ /* Should be safe to release folios at this point. */
+ btrfs_release_extent_buffer_folios(eb);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
- __free_extent_buffer(eb);
+ kmem_cache_free(extent_buffer_cache, eb);
return 1;
}
#endif
@@ -3324,12 +3425,12 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
* the above race.
*/
if (subpage)
- lock_page(folio_page(eb->folios[0], 0));
+ folio_lock(eb->folios[0]);
for (int i = 0; i < num_folios; i++)
btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
eb->start, eb->len);
if (subpage)
- unlock_page(folio_page(eb->folios[0], 0));
+ folio_unlock(eb->folios[0]);
percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
eb->len,
eb->fs_info->dirty_metadata_batch);
@@ -3439,8 +3540,8 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
bio_put(&bbio->bio);
}
-int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
- const struct btrfs_tree_parent_check *check)
+int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *check)
{
struct btrfs_bio *bbio;
bool ret;
@@ -3458,7 +3559,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
/* Someone else is already reading the buffer, just wait for it. */
if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
- goto done;
+ return 0;
/*
* Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
@@ -3498,14 +3599,21 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
}
}
btrfs_submit_bbio(bbio, mirror_num);
+ return 0;
+}
-done:
- if (wait == WAIT_COMPLETE) {
- wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
- if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
- return -EIO;
- }
+int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *check)
+{
+ int ret;
+ ret = read_extent_buffer_pages_nowait(eb, mirror_num, check);
+ if (ret < 0)
+ return ret;
+
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
+ if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
+ return -EIO;
return 0;
}
@@ -4221,7 +4329,6 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root, u64 gen, int level)
{
struct btrfs_tree_parent_check check = {
- .has_first_key = 0,
.level = level,
.transid = gen
};
@@ -4237,7 +4344,7 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
return;
}
- ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
+ ret = read_extent_buffer_pages_nowait(eb, 0, &check);
if (ret < 0)
free_extent_buffer_stale(eb);
else
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 8a36117ed453..6c5328bfabc2 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -248,7 +248,6 @@ int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
void btrfs_readahead(struct readahead_control *rac);
int set_folio_extent_mapped(struct folio *folio);
-int set_page_extent_mapped(struct page *page);
void clear_folio_extent_mapped(struct folio *folio);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
@@ -262,12 +261,17 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
void free_extent_buffer(struct extent_buffer *eb);
void free_extent_buffer_stale(struct extent_buffer *eb);
-#define WAIT_NONE 0
-#define WAIT_COMPLETE 1
-#define WAIT_PAGE_LOCK 2
-int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
const struct btrfs_tree_parent_check *parent_check);
-void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
+int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *parent_check);
+
+static inline void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
+ TASK_UNINTERRUPTIBLE);
+}
+
void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 owner_root, u64 gen, int level);
void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 25d191f1ac10..67ce85ff0ae2 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -77,10 +77,13 @@ static u64 range_end(u64 start, u64 len)
return start + len;
}
-static void dec_evictable_extent_maps(struct btrfs_inode *inode)
+static void remove_em(struct btrfs_inode *inode, struct extent_map *em)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ rb_erase(&em->rb_node, &inode->extent_tree.root);
+ RB_CLEAR_NODE(&em->rb_node);
+
if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(inode->root)))
percpu_counter_dec(&fs_info->evictable_extent_maps);
}
@@ -230,7 +233,12 @@ static bool mergeable_maps(const struct extent_map *prev, const struct extent_ma
if (extent_map_end(prev) != next->start)
return false;
- if (prev->flags != next->flags)
+ /*
+ * The merged flag is not an on-disk flag, it just indicates we had the
+ * extent maps of 2 (or more) adjacent extents merged, so factor it out.
+ */
+ if ((prev->flags & ~EXTENT_FLAG_MERGED) !=
+ (next->flags & ~EXTENT_FLAG_MERGED))
return false;
if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1)
@@ -243,13 +251,19 @@ static bool mergeable_maps(const struct extent_map *prev, const struct extent_ma
/*
* Handle the on-disk data extents merge for @prev and @next.
*
+ * @prev: left extent to merge
+ * @next: right extent to merge
+ * @merged: the extent we will not discard after the merge; updated with new values
+ *
+ * After this, one of the two extents is the new merged extent and the other is
+ * removed from the tree and likely freed. Note that @merged is one of @prev/@next
+ * so there is const/non-const aliasing occurring here.
+ *
* Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes.
* For now only uncompressed regular extent can be merged.
- *
- * @prev and @next will be both updated to point to the new merged range.
- * Thus one of them should be removed by the caller.
*/
-static void merge_ondisk_extents(struct extent_map *prev, struct extent_map *next)
+static void merge_ondisk_extents(const struct extent_map *prev, const struct extent_map *next,
+ struct extent_map *merged)
{
u64 new_disk_bytenr;
u64 new_disk_num_bytes;
@@ -284,15 +298,10 @@ static void merge_ondisk_extents(struct extent_map *prev, struct extent_map *nex
new_disk_bytenr;
new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr;
- prev->disk_bytenr = new_disk_bytenr;
- prev->disk_num_bytes = new_disk_num_bytes;
- prev->ram_bytes = new_disk_num_bytes;
- prev->offset = new_offset;
-
- next->disk_bytenr = new_disk_bytenr;
- next->disk_num_bytes = new_disk_num_bytes;
- next->ram_bytes = new_disk_num_bytes;
- next->offset = new_offset;
+ merged->disk_bytenr = new_disk_bytenr;
+ merged->disk_num_bytes = new_disk_num_bytes;
+ merged->ram_bytes = new_disk_num_bytes;
+ merged->offset = new_offset;
}
static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix,
@@ -333,7 +342,6 @@ static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map
static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct extent_map_tree *tree = &inode->extent_tree;
struct extent_map *merge = NULL;
struct rb_node *rb;
@@ -361,14 +369,12 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
em->generation = max(em->generation, merge->generation);
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
- merge_ondisk_extents(merge, em);
+ merge_ondisk_extents(merge, em, em);
em->flags |= EXTENT_FLAG_MERGED;
validate_extent_map(fs_info, em);
- rb_erase(&merge->rb_node, &tree->root);
- RB_CLEAR_NODE(&merge->rb_node);
+ remove_em(inode, merge);
free_extent_map(merge);
- dec_evictable_extent_maps(inode);
}
}
@@ -378,14 +384,12 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
em->len += merge->len;
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
- merge_ondisk_extents(em, merge);
+ merge_ondisk_extents(em, merge, em);
validate_extent_map(fs_info, em);
- rb_erase(&merge->rb_node, &tree->root);
- RB_CLEAR_NODE(&merge->rb_node);
em->generation = max(em->generation, merge->generation);
em->flags |= EXTENT_FLAG_MERGED;
+ remove_em(inode, merge);
free_extent_map(merge);
- dec_evictable_extent_maps(inode);
}
}
@@ -582,12 +586,10 @@ void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
lockdep_assert_held_write(&tree->lock);
WARN_ON(em->flags & EXTENT_FLAG_PINNED);
- rb_erase(&em->rb_node, &tree->root);
if (!(em->flags & EXTENT_FLAG_LOGGING))
list_del_init(&em->list);
- RB_CLEAR_NODE(&em->rb_node);
- dec_evictable_extent_maps(inode);
+ remove_em(inode, em);
}
static void replace_extent_mapping(struct btrfs_inode *inode,
@@ -1116,13 +1118,12 @@ out_free_pre:
struct btrfs_em_shrink_ctx {
long nr_to_scan;
long scanned;
- u64 last_ino;
- u64 last_root;
};
static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
{
- const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info);
struct extent_map_tree *tree = &inode->extent_tree;
long nr_dropped = 0;
struct rb_node *node;
@@ -1195,7 +1196,8 @@ next:
* lock. This is to avoid slowing other tasks trying to take the
* lock.
*/
- if (need_resched() || rwlock_needbreak(&tree->lock))
+ if (need_resched() || rwlock_needbreak(&tree->lock) ||
+ btrfs_fs_closing(fs_info))
break;
node = next;
}
@@ -1207,19 +1209,21 @@ next:
static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_inode *inode;
long nr_dropped = 0;
- u64 min_ino = ctx->last_ino + 1;
+ u64 min_ino = fs_info->em_shrinker_last_ino + 1;
inode = btrfs_find_first_inode(root, min_ino);
while (inode) {
nr_dropped += btrfs_scan_inode(inode, ctx);
min_ino = btrfs_ino(inode) + 1;
- ctx->last_ino = btrfs_ino(inode);
+ fs_info->em_shrinker_last_ino = btrfs_ino(inode);
btrfs_add_delayed_iput(inode);
- if (ctx->scanned >= ctx->nr_to_scan)
+ if (ctx->scanned >= ctx->nr_to_scan ||
+ btrfs_fs_closing(inode->root->fs_info))
break;
cond_resched();
@@ -1235,52 +1239,43 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
* inode if there is one or we will find out this was the last
* one and move to the next root.
*/
- ctx->last_root = btrfs_root_id(root);
+ fs_info->em_shrinker_last_root = btrfs_root_id(root);
} else {
/*
* No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
* that when processing the next root we start from its first inode.
*/
- ctx->last_ino = 0;
- ctx->last_root = btrfs_root_id(root) + 1;
+ fs_info->em_shrinker_last_ino = 0;
+ fs_info->em_shrinker_last_root = btrfs_root_id(root) + 1;
}
return nr_dropped;
}
-long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
+static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
{
+ struct btrfs_fs_info *fs_info;
struct btrfs_em_shrink_ctx ctx;
u64 start_root_id;
u64 next_root_id;
bool cycled = false;
long nr_dropped = 0;
- ctx.scanned = 0;
- ctx.nr_to_scan = nr_to_scan;
+ fs_info = container_of(work, struct btrfs_fs_info, em_shrinker_work);
- /*
- * In case we have multiple tasks running this shrinker, make the next
- * one start from the next inode in case it starts before we finish.
- */
- spin_lock(&fs_info->extent_map_shrinker_lock);
- ctx.last_ino = fs_info->extent_map_shrinker_last_ino;
- fs_info->extent_map_shrinker_last_ino++;
- ctx.last_root = fs_info->extent_map_shrinker_last_root;
- spin_unlock(&fs_info->extent_map_shrinker_lock);
+ ctx.scanned = 0;
+ ctx.nr_to_scan = atomic64_read(&fs_info->em_shrinker_nr_to_scan);
- start_root_id = ctx.last_root;
- next_root_id = ctx.last_root;
+ start_root_id = fs_info->em_shrinker_last_root;
+ next_root_id = fs_info->em_shrinker_last_root;
if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
- trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan,
- nr, ctx.last_root,
- ctx.last_ino);
+ trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr);
}
- while (ctx.scanned < ctx.nr_to_scan) {
+ while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) {
struct btrfs_root *root;
unsigned long count;
@@ -1294,8 +1289,8 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
spin_unlock(&fs_info->fs_roots_radix_lock);
if (start_root_id > 0 && !cycled) {
next_root_id = 0;
- ctx.last_root = 0;
- ctx.last_ino = 0;
+ fs_info->em_shrinker_last_root = 0;
+ fs_info->em_shrinker_last_ino = 0;
cycled = true;
continue;
}
@@ -1314,29 +1309,40 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
btrfs_put_root(root);
}
- /*
- * In case of multiple tasks running this extent map shrinking code this
- * isn't perfect but it's simple and silences things like KCSAN. It's
- * not possible to know which task made more progress because we can
- * cycle back to the first root and first inode if it's not the first
- * time the shrinker ran, see the above logic. Also a task that started
- * later may finish ealier than another task and made less progress. So
- * make this simple and update to the progress of the last task that
- * finished, with the occasional possiblity of having two consecutive
- * runs of the shrinker process the same inodes.
- */
- spin_lock(&fs_info->extent_map_shrinker_lock);
- fs_info->extent_map_shrinker_last_ino = ctx.last_ino;
- fs_info->extent_map_shrinker_last_root = ctx.last_root;
- spin_unlock(&fs_info->extent_map_shrinker_lock);
-
if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
- trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped,
- nr, ctx.last_root,
- ctx.last_ino);
+ trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
}
- return nr_dropped;
+ atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
+}
+
+void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
+{
+ /*
+ * Do nothing if the shrinker is already running. In case of high memory
+ * pressure we can have a lot of tasks calling us and all passing the
+ * same nr_to_scan value, but in reality we may need only to free
+ * nr_to_scan extent maps (or less). In case we need to free more than
+ * that, we will be called again by the fs shrinker, so no worries about
+ * not doing enough work to reclaim memory from extent maps.
+ * We can also be repeatedly called with the same nr_to_scan value
+ * simply because the shrinker runs asynchronously and multiple calls
+ * to this function are made before the shrinker does enough progress.
+ *
+ * That's why we set the atomic counter to nr_to_scan only if its
+ * current value is zero, instead of incrementing the counter by
+ * nr_to_scan.
+ */
+ if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
+ return;
+
+ queue_work(system_unbound_wq, &fs_info->em_shrinker_work);
+}
+
+void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
+{
+ atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
+ INIT_WORK(&fs_info->em_shrinker_work, btrfs_extent_map_shrinker_worker);
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 5154a8f1d26c..cd123b266b64 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -189,6 +189,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode,
int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
struct extent_map *new_em,
bool modified);
-long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
+void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
+void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info);
#endif
diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c
index df7f09f3b02e..b80c07ad8c5e 100644
--- a/fs/btrfs/fiemap.c
+++ b/fs/btrfs/fiemap.c
@@ -186,7 +186,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
* we have in the cache is the last delalloc range we
* found while the file extent item we found can be
* either for a whole delalloc range we previously
- * emmitted or only a part of that range.
+ * emitted or only a part of that range.
*
* We have two cases here:
*
@@ -194,13 +194,13 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
* cached extent's end. In this case just ignore the
* current file extent item because we don't want to
* overlap with previous ranges that may have been
- * emmitted already;
+ * emitted already;
*
* 2) The file extent item starts behind the currently
* cached extent but its end offset goes beyond the
* end offset of the cached extent. We don't want to
* overlap with a previous range that may have been
- * emmitted already, so we emit the currently cached
+ * emitted already, so we emit the currently cached
* extent and then partially store the current file
* extent item's range in the cache, for the subrange
* going the cached extent's end to the end of the
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 886749b39672..d04a3b47b1fb 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -190,8 +190,6 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_compression(leaf, item, 0);
btrfs_set_file_extent_encryption(leaf, item, 0);
btrfs_set_file_extent_other_encoding(leaf, item, 0);
-
- btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@@ -1259,7 +1257,6 @@ found:
ins_size /= csum_size;
total_bytes += ins_size * fs_info->sectorsize;
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(path);
cond_resched();
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 4fb521d91b06..36f51c311bb1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -36,100 +36,42 @@
#include "ioctl.h"
#include "file.h"
#include "super.h"
-
-/* simple helper to fault in pages and copy. This should go away
- * and be replaced with calls into generic code.
- */
-static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
- struct page **prepared_pages,
- struct iov_iter *i)
-{
- size_t copied = 0;
- size_t total_copied = 0;
- int pg = 0;
- int offset = offset_in_page(pos);
-
- while (write_bytes > 0) {
- size_t count = min_t(size_t,
- PAGE_SIZE - offset, write_bytes);
- struct page *page = prepared_pages[pg];
- /*
- * Copy data from userspace to the current page
- */
- copied = copy_page_from_iter_atomic(page, offset, count, i);
-
- /* Flush processor's dcache for this page */
- flush_dcache_page(page);
-
- /*
- * if we get a partial write, we can end up with
- * partially up to date pages. These add
- * a lot of complexity, so make sure they don't
- * happen by forcing this copy to be retried.
- *
- * The rest of the btrfs_file_write code will fall
- * back to page at a time copies after we return 0.
- */
- if (unlikely(copied < count)) {
- if (!PageUptodate(page)) {
- iov_iter_revert(i, copied);
- copied = 0;
- }
- if (!copied)
- break;
- }
-
- write_bytes -= copied;
- total_copied += copied;
- offset += copied;
- if (offset == PAGE_SIZE) {
- pg++;
- offset = 0;
- }
- }
- return total_copied;
-}
+#include "print-tree.h"
/*
- * unlocks pages after btrfs_file_write is done with them
+ * Unlock folio after btrfs_file_write() is done with it.
*/
-static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
- struct page **pages, size_t num_pages,
+static void btrfs_drop_folio(struct btrfs_fs_info *fs_info, struct folio *folio,
u64 pos, u64 copied)
{
- size_t i;
u64 block_start = round_down(pos, fs_info->sectorsize);
u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
ASSERT(block_len <= U32_MAX);
- for (i = 0; i < num_pages; i++) {
- /* page checked is some magic around finding pages that
- * have been modified without going through btrfs_set_page_dirty
- * clear it here. There should be no need to mark the pages
- * accessed as prepare_pages should have marked them accessed
- * in prepare_pages via find_or_create_page()
- */
- btrfs_folio_clamp_clear_checked(fs_info, page_folio(pages[i]),
- block_start, block_len);
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
+ /*
+ * Folio checked is some magic around finding folios that have been
+ * modified without going through btrfs_dirty_folio(). Clear it here.
+ * There should be no need to mark the pages accessed as
+ * prepare_one_folio() should have marked them accessed in
+ * prepare_one_folio() via find_or_create_page()
+ */
+ btrfs_folio_clamp_clear_checked(fs_info, folio, block_start, block_len);
+ folio_unlock(folio);
+ folio_put(folio);
}
/*
- * After btrfs_copy_from_user(), update the following things for delalloc:
- * - Mark newly dirtied pages as DELALLOC in the io tree.
+ * After copy_folio_from_iter_atomic(), update the following things for delalloc:
+ * - Mark newly dirtied folio as DELALLOC in the io tree.
* Used to advise which range is to be written back.
- * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
+ * - Mark modified folio as Uptodate/Dirty and not needing COW fixup
* - Update inode size for past EOF write
*/
-int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
- size_t num_pages, loff_t pos, size_t write_bytes,
- struct extent_state **cached, bool noreserve)
+int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos,
+ size_t write_bytes, struct extent_state **cached, bool noreserve)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret = 0;
- int i;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
@@ -147,6 +89,8 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
num_bytes = round_up(write_bytes + pos - start_pos,
fs_info->sectorsize);
ASSERT(num_bytes <= U32_MAX);
+ ASSERT(folio_pos(folio) <= pos &&
+ folio_pos(folio) + folio_size(folio) >= pos + write_bytes);
end_of_last_block = start_pos + num_bytes - 1;
@@ -163,16 +107,9 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
if (ret)
return ret;
- for (i = 0; i < num_pages; i++) {
- struct page *p = pages[i];
-
- btrfs_folio_clamp_set_uptodate(fs_info, page_folio(p),
- start_pos, num_bytes);
- btrfs_folio_clamp_clear_checked(fs_info, page_folio(p),
- start_pos, num_bytes);
- btrfs_folio_clamp_set_dirty(fs_info, page_folio(p),
- start_pos, num_bytes);
- }
+ btrfs_folio_clamp_set_uptodate(fs_info, folio, start_pos, num_bytes);
+ btrfs_folio_clamp_clear_checked(fs_info, folio, start_pos, num_bytes);
+ btrfs_folio_clamp_set_dirty(fs_info, folio, start_pos, num_bytes);
/*
* we've only changed i_size in ram, and we haven't updated
@@ -242,7 +179,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
if (args->drop_cache)
btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
- if (args->start >= inode->disk_i_size && !args->replace_extent)
+ if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
modify_tree = 0;
update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
@@ -263,7 +200,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
ret = btrfs_next_leaf(root, path);
if (ret < 0)
break;
@@ -339,7 +280,11 @@ next_slot:
* | -------- extent -------- |
*/
if (args->start > key.offset && args->end < extent_end) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
break;
@@ -369,7 +314,6 @@ next_slot:
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->start);
- btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0) {
struct btrfs_ref ref = {
@@ -415,7 +359,6 @@ next_slot:
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->end);
- btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += args->end - key.offset;
break;
@@ -427,7 +370,11 @@ next_slot:
* | -------- extent -------- |
*/
if (args->start > key.offset && args->end >= extent_end) {
- BUG_ON(del_nr > 0);
+ if (WARN_ON(del_nr > 0)) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
break;
@@ -435,7 +382,6 @@ next_slot:
btrfs_set_file_extent_num_bytes(leaf, fi,
args->start - key.offset);
- btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += extent_end - args->start;
if (args->end == extent_end)
@@ -455,7 +401,11 @@ delete_extent_item:
del_slot = path->slots[0];
del_nr = 1;
} else {
- BUG_ON(del_slot + del_nr != path->slots[0]);
+ if (WARN_ON(del_slot + del_nr != path->slots[0])) {
+ btrfs_print_leaf(leaf);
+ ret = -EINVAL;
+ break;
+ }
del_nr++;
}
@@ -686,7 +636,6 @@ again:
trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
end - other_start);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@@ -715,7 +664,6 @@ again:
other_end - start);
btrfs_set_file_extent_offset(leaf, fi,
start - orig_offset);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@@ -749,7 +697,6 @@ again:
btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - split);
- btrfs_mark_buffer_dirty(trans, leaf);
ref.action = BTRFS_ADD_DELAYED_REF;
ref.bytenr = bytenr;
@@ -828,7 +775,6 @@ again:
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
} else {
fi = btrfs_item_ptr(leaf, del_slot - 1,
struct btrfs_file_extent_item);
@@ -837,7 +783,6 @@ again:
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - key.offset);
- btrfs_mark_buffer_dirty(trans, leaf);
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret < 0) {
@@ -851,53 +796,47 @@ out:
}
/*
- * on error we return an unlocked page and the error value
- * on success we return a locked page and 0
+ * On error return an unlocked folio and the error value
+ * On success return a locked folio and 0
*/
-static int prepare_uptodate_page(struct inode *inode,
- struct page *page, u64 pos,
- bool force_uptodate)
+static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 pos,
+ u64 len, bool force_uptodate)
{
- struct folio *folio = page_folio(page);
+ u64 clamp_start = max_t(u64, pos, folio_pos(folio));
+ u64 clamp_end = min_t(u64, pos + len, folio_pos(folio) + folio_size(folio));
int ret = 0;
- if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
- !PageUptodate(page)) {
- ret = btrfs_read_folio(NULL, folio);
- if (ret)
- return ret;
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- return -EIO;
- }
-
- /*
- * Since btrfs_read_folio() will unlock the folio before it
- * returns, there is a window where btrfs_release_folio() can be
- * called to release the page. Here we check both inode
- * mapping and PagePrivate() to make sure the page was not
- * released.
- *
- * The private flag check is essential for subpage as we need
- * to store extra bitmap using folio private.
- */
- if (page->mapping != inode->i_mapping || !folio_test_private(folio)) {
- unlock_page(page);
- return -EAGAIN;
- }
- }
- return 0;
-}
+ if (folio_test_uptodate(folio))
+ return 0;
-static fgf_t get_prepare_fgp_flags(bool nowait)
-{
- fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
+ if (!force_uptodate &&
+ IS_ALIGNED(clamp_start, PAGE_SIZE) &&
+ IS_ALIGNED(clamp_end, PAGE_SIZE))
+ return 0;
- if (nowait)
- fgp_flags |= FGP_NOWAIT;
+ ret = btrfs_read_folio(NULL, folio);
+ if (ret)
+ return ret;
+ folio_lock(folio);
+ if (!folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ return -EIO;
+ }
- return fgp_flags;
+ /*
+ * Since btrfs_read_folio() will unlock the folio before it returns,
+ * there is a window where btrfs_release_folio() can be called to
+ * release the page. Here we check both inode mapping and page
+ * private to make sure the page was not released.
+ *
+ * The private flag check is essential for subpage as we need to store
+ * extra bitmap using folio private.
+ */
+ if (folio->mapping != inode->i_mapping || !folio_test_private(folio)) {
+ folio_unlock(folio);
+ return -EAGAIN;
+ }
+ return 0;
}
static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
@@ -914,89 +853,68 @@ static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
}
/*
- * this just gets pages into the page cache and locks them down.
+ * Get folio into the page cache and lock it.
*/
-static noinline int prepare_pages(struct inode *inode, struct page **pages,
- size_t num_pages, loff_t pos,
- size_t write_bytes, bool force_uptodate,
- bool nowait)
+static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ret,
+ loff_t pos, size_t write_bytes,
+ bool force_uptodate, bool nowait)
{
- int i;
unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = get_prepare_gfp_flags(inode, nowait);
- fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
+ fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN);
+ struct folio *folio;
int ret = 0;
- int faili;
- for (i = 0; i < num_pages; i++) {
again:
- pages[i] = pagecache_get_page(inode->i_mapping, index + i,
- fgp_flags, mask | __GFP_WRITE);
- if (!pages[i]) {
- faili = i - 1;
- if (nowait)
- ret = -EAGAIN;
- else
- ret = -ENOMEM;
- goto fail;
- }
-
- ret = set_page_extent_mapped(pages[i]);
- if (ret < 0) {
- faili = i;
- goto fail;
- }
-
- if (i == 0)
- ret = prepare_uptodate_page(inode, pages[i], pos,
- force_uptodate);
- if (!ret && i == num_pages - 1)
- ret = prepare_uptodate_page(inode, pages[i],
- pos + write_bytes, false);
- if (ret) {
- put_page(pages[i]);
- if (!nowait && ret == -EAGAIN) {
- ret = 0;
- goto again;
- }
- faili = i - 1;
- goto fail;
+ folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags, mask);
+ if (IS_ERR(folio)) {
+ if (nowait)
+ ret = -EAGAIN;
+ else
+ ret = PTR_ERR(folio);
+ return ret;
+ }
+ folio_wait_writeback(folio);
+ /* Only support page sized folio yet. */
+ ASSERT(folio_order(folio) == 0);
+ ret = set_folio_extent_mapped(folio);
+ if (ret < 0) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
+ }
+ ret = prepare_uptodate_folio(inode, folio, pos, write_bytes, force_uptodate);
+ if (ret) {
+ /* The folio is already unlocked. */
+ folio_put(folio);
+ if (!nowait && ret == -EAGAIN) {
+ ret = 0;
+ goto again;
}
- wait_on_page_writeback(pages[i]);
+ return ret;
}
-
+ *folio_ret = folio;
return 0;
-fail:
- while (faili >= 0) {
- unlock_page(pages[faili]);
- put_page(pages[faili]);
- faili--;
- }
- return ret;
-
}
/*
- * This function locks the extent and properly waits for data=ordered extents
- * to finish before allowing the pages to be modified if need.
+ * Locks the extent and properly waits for data=ordered extents to finish
+ * before allowing the folios to be modified if need.
*
- * The return value:
+ * Return:
* 1 - the extent is locked
* 0 - the extent is not locked, and everything is OK
- * -EAGAIN - need re-prepare the pages
- * the other < 0 number - Something wrong happens
+ * -EAGAIN - need to prepare the folios again
*/
static noinline int
-lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
- size_t num_pages, loff_t pos,
- size_t write_bytes,
+lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
+ loff_t pos, size_t write_bytes,
u64 *lockstart, u64 *lockend, bool nowait,
struct extent_state **cached_state)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start_pos;
u64 last_pos;
- int i;
int ret = 0;
start_pos = round_down(pos, fs_info->sectorsize);
@@ -1008,12 +926,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
if (nowait) {
if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
cached_state)) {
- for (i = 0; i < num_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- pages[i] = NULL;
- }
-
+ folio_unlock(folio);
+ folio_put(folio);
return -EAGAIN;
}
} else {
@@ -1027,10 +941,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
ordered->file_offset <= last_pos) {
unlock_extent(&inode->io_tree, start_pos, last_pos,
cached_state);
- for (i = 0; i < num_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
+ folio_unlock(folio);
+ folio_put(folio);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
return -EAGAIN;
@@ -1044,11 +956,10 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
}
/*
- * We should be called after prepare_pages() which should have locked
+ * We should be called after prepare_one_folio() which should have locked
* all pages in the range.
*/
- for (i = 0; i < num_pages; i++)
- WARN_ON(!PageLocked(pages[i]));
+ WARN_ON(!folio_test_locked(folio));
return ret;
}
@@ -1104,7 +1015,7 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
&cached_state);
}
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
- NULL, nowait, false);
+ NULL, nowait);
if (ret <= 0)
btrfs_drew_write_unlock(&root->snapshot_lock);
else
@@ -1120,27 +1031,7 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
btrfs_drew_write_unlock(&inode->root->snapshot_lock);
}
-static void update_time_for_write(struct inode *inode)
-{
- struct timespec64 now, ts;
-
- if (IS_NOCMTIME(inode))
- return;
-
- now = current_time(inode);
- ts = inode_get_mtime(inode);
- if (!timespec64_equal(&ts, &now))
- inode_set_mtime_to_ts(inode, now);
-
- ts = inode_get_ctime(inode);
- if (!timespec64_equal(&ts, &now))
- inode_set_ctime_to_ts(inode, now);
-
- if (IS_I_VERSION(inode))
- inode_inc_iversion(inode);
-}
-
-int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count)
+int btrfs_write_check(struct kiocb *iocb, size_t count)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
@@ -1170,7 +1061,10 @@ int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count)
* need to start yet another transaction to update the inode as we will
* update the inode when we finish writing whatever data we write.
*/
- update_time_for_write(inode);
+ if (!IS_NOCMTIME(inode)) {
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ inode_inc_iversion(inode);
+ }
start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
@@ -1192,20 +1086,17 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
loff_t pos;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct page **pages = NULL;
struct extent_changeset *data_reserved = NULL;
u64 release_bytes = 0;
u64 lockstart;
u64 lockend;
size_t num_written = 0;
- int nrptrs;
ssize_t ret;
- bool only_release_metadata = false;
- bool force_page_uptodate = false;
loff_t old_isize = i_size_read(inode);
unsigned int ilock_flags = 0;
const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
+ bool only_release_metadata = false;
if (nowait)
ilock_flags |= BTRFS_ILOCK_TRY;
@@ -1218,38 +1109,26 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
if (ret <= 0)
goto out;
- ret = btrfs_write_check(iocb, i, ret);
+ ret = btrfs_write_check(iocb, ret);
if (ret < 0)
goto out;
pos = iocb->ki_pos;
- nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
- PAGE_SIZE / (sizeof(struct page *)));
- nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
- nrptrs = max(nrptrs, 8);
- pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- goto out;
- }
-
while (iov_iter_count(i) > 0) {
struct extent_state *cached_state = NULL;
size_t offset = offset_in_page(pos);
size_t sector_offset;
- size_t write_bytes = min(iov_iter_count(i),
- nrptrs * (size_t)PAGE_SIZE -
- offset);
- size_t num_pages;
+ size_t write_bytes = min(iov_iter_count(i), PAGE_SIZE - offset);
size_t reserve_bytes;
- size_t dirty_pages;
size_t copied;
size_t dirty_sectors;
size_t num_sectors;
+ struct folio *folio = NULL;
int extents_locked;
+ bool force_page_uptodate = false;
/*
- * Fault pages before locking them in prepare_pages
+ * Fault pages before locking them in prepare_one_folio()
* to avoid recursive lock
*/
if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
@@ -1288,8 +1167,6 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
only_release_metadata = true;
}
- num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
- WARN_ON(num_pages > nrptrs);
reserve_bytes = round_up(write_bytes + sector_offset,
fs_info->sectorsize);
WARN_ON(reserve_bytes == 0);
@@ -1317,23 +1194,17 @@ again:
break;
}
- /*
- * This is going to setup the pages array with the number of
- * pages we want, so we don't really need to worry about the
- * contents of pages from loop to loop
- */
- ret = prepare_pages(inode, pages, num_pages,
- pos, write_bytes, force_page_uptodate, false);
+ ret = prepare_one_folio(inode, &folio, pos, write_bytes,
+ force_page_uptodate, false);
if (ret) {
btrfs_delalloc_release_extents(BTRFS_I(inode),
reserve_bytes);
break;
}
- extents_locked = lock_and_cleanup_extent_if_need(
- BTRFS_I(inode), pages,
- num_pages, pos, write_bytes, &lockstart,
- &lockend, nowait, &cached_state);
+ extents_locked = lock_and_cleanup_extent_if_need(BTRFS_I(inode),
+ folio, pos, write_bytes, &lockstart,
+ &lockend, nowait, &cached_state);
if (extents_locked < 0) {
if (!nowait && extents_locked == -EAGAIN)
goto again;
@@ -1344,28 +1215,34 @@ again:
break;
}
- copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
+ copied = copy_folio_from_iter_atomic(folio,
+ offset_in_folio(folio, pos), write_bytes, i);
+ flush_dcache_folio(folio);
+
+ /*
+ * If we get a partial write, we can end up with partially
+ * uptodate page. Although if sector size < page size we can
+ * handle it, but if it's not sector aligned it can cause
+ * a lot of complexity, so make sure they don't happen by
+ * forcing retry this copy.
+ */
+ if (unlikely(copied < write_bytes)) {
+ if (!folio_test_uptodate(folio)) {
+ iov_iter_revert(i, copied);
+ copied = 0;
+ }
+ }
num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
dirty_sectors = round_up(copied + sector_offset,
fs_info->sectorsize);
dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
- /*
- * if we have trouble faulting in the pages, fall
- * back to one page at a time
- */
- if (copied < write_bytes)
- nrptrs = 1;
-
if (copied == 0) {
force_page_uptodate = true;
dirty_sectors = 0;
- dirty_pages = 0;
} else {
force_page_uptodate = false;
- dirty_pages = DIV_ROUND_UP(copied + offset,
- PAGE_SIZE);
}
if (num_sectors > dirty_sectors) {
@@ -1375,13 +1252,10 @@ again:
btrfs_delalloc_release_metadata(BTRFS_I(inode),
release_bytes, true);
} else {
- u64 __pos;
-
- __pos = round_down(pos,
- fs_info->sectorsize) +
- (dirty_pages << PAGE_SHIFT);
+ u64 release_start = round_up(pos + copied,
+ fs_info->sectorsize);
btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved, __pos,
+ data_reserved, release_start,
release_bytes, true);
}
}
@@ -1389,15 +1263,14 @@ again:
release_bytes = round_up(copied + sector_offset,
fs_info->sectorsize);
- ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
- dirty_pages, pos, copied,
+ ret = btrfs_dirty_folio(BTRFS_I(inode), folio, pos, copied,
&cached_state, only_release_metadata);
/*
* If we have not locked the extent range, because the range's
* start offset is >= i_size, we might still have a non-NULL
* cached extent state, acquired while marking the extent range
- * as delalloc through btrfs_dirty_pages(). Therefore free any
+ * as delalloc through btrfs_dirty_page(). Therefore free any
* possible cached extent state to avoid a memory leak.
*/
if (extents_locked)
@@ -1408,7 +1281,7 @@ again:
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) {
- btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
+ btrfs_drop_folio(fs_info, folio, pos, copied);
break;
}
@@ -1416,7 +1289,7 @@ again:
if (only_release_metadata)
btrfs_check_nocow_unlock(BTRFS_I(inode));
- btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
+ btrfs_drop_folio(fs_info, folio, pos, copied);
cond_resched();
@@ -1424,8 +1297,6 @@ again:
num_written += copied;
}
- kfree(pages);
-
if (release_bytes) {
if (only_release_metadata) {
btrfs_check_nocow_unlock(BTRFS_I(inode));
@@ -1470,7 +1341,7 @@ static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
if (ret || encoded->len == 0)
goto out;
- ret = btrfs_write_check(iocb, from, encoded->len);
+ ret = btrfs_write_check(iocb, encoded->len);
if (ret < 0)
goto out;
@@ -2137,7 +2008,6 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
@@ -2154,7 +2024,6 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
btrfs_release_path(path);
@@ -2302,7 +2171,6 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
if (extent_info->is_new_extent)
btrfs_set_file_extent_generation(leaf, extent, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
@@ -3802,6 +3670,7 @@ const struct file_operations btrfs_file_operations = {
.compat_ioctl = btrfs_compat_ioctl,
#endif
.remap_file_range = btrfs_remap_file_range,
+ .uring_cmd = btrfs_uring_cmd,
.fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
};
diff --git a/fs/btrfs/file.h b/fs/btrfs/file.h
index 912254e653cf..de89e644be29 100644
--- a/fs/btrfs/file.h
+++ b/fs/btrfs/file.h
@@ -34,9 +34,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
const struct btrfs_ioctl_encoded_io_args *encoded);
int btrfs_release_file(struct inode *inode, struct file *file);
-int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
- size_t num_pages, loff_t pos, size_t write_bytes,
- struct extent_state **cached, bool noreserve);
+int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos,
+ size_t write_bytes, struct extent_state **cached, bool noreserve);
int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end);
int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes, bool nowait);
@@ -44,7 +43,7 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
struct extent_state **cached_state,
u64 *delalloc_start_ret, u64 *delalloc_end_ret);
-int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count);
+int btrfs_write_check(struct kiocb *iocb, size_t count);
ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i);
#endif
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index eaa1dbd31352..d42b6f882f57 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -11,7 +11,8 @@
#include <linux/ratelimit.h>
#include <linux/error-injection.h>
#include <linux/sched/mm.h>
-#include "ctree.h"
+#include <linux/string_choices.h>
+#include "extent-tree.h"
#include "fs.h"
#include "messages.h"
#include "misc.h"
@@ -197,7 +198,6 @@ static int __create_free_space_inode(struct btrfs_root *root,
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
btrfs_set_inode_block_group(leaf, inode_item, offset);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -215,7 +215,6 @@ static int __create_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header);
memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@@ -462,7 +461,7 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
return -ENOMEM;
}
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(page_folio(page));
if (ret < 0) {
unlock_page(page);
put_page(page);
@@ -1188,7 +1187,6 @@ update_cache_item(struct btrfs_trans_handle *trans,
btrfs_set_free_space_entries(leaf, header, entries);
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@@ -1387,6 +1385,7 @@ static int __btrfs_write_out_cache(struct inode *inode,
int bitmaps = 0;
int ret;
int must_iput = 0;
+ int i_size;
if (!i_size_read(inode))
return -EIO;
@@ -1457,11 +1456,16 @@ static int __btrfs_write_out_cache(struct inode *inode,
io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
- ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
- io_ctl->num_pages, 0, i_size_read(inode),
- &cached_state, false);
- if (ret)
- goto out_nospc;
+ i_size = i_size_read(inode);
+ for (int i = 0; i < round_up(i_size, PAGE_SIZE) / PAGE_SIZE; i++) {
+ u64 dirty_start = i * PAGE_SIZE;
+ u64 dirty_len = min_t(u64, dirty_start + PAGE_SIZE, i_size) - dirty_start;
+
+ ret = btrfs_dirty_folio(BTRFS_I(inode), page_folio(io_ctl->pages[i]),
+ dirty_start, dirty_len, &cached_state, false);
+ if (ret < 0)
+ goto out_nospc;
+ }
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);
@@ -2936,12 +2940,11 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
if (info->bytes >= bytes && !block_group->ro)
count++;
btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
- info->offset, info->bytes,
- (info->bitmap) ? "yes" : "no");
+ info->offset, info->bytes, str_yes_no(info->bitmap));
}
spin_unlock(&ctl->tree_lock);
btrfs_info(fs_info, "block group has cluster?: %s",
- list_empty(&block_group->cluster_list) ? "no" : "yes");
+ str_no_yes(list_empty(&block_group->cluster_list)));
btrfs_info(fs_info,
"%d free space entries at or bigger than %llu bytes",
count, bytes);
@@ -3809,7 +3812,7 @@ next:
if (async && *total_trimmed)
break;
- if (fatal_signal_pending(current)) {
+ if (btrfs_trim_interrupted()) {
ret = -ERESTARTSYS;
break;
}
@@ -4000,7 +4003,7 @@ next:
}
block_group->discard_cursor = start;
- if (fatal_signal_pending(current)) {
+ if (btrfs_trim_interrupted()) {
if (start != offset)
reset_trimming_bitmap(ctl, offset);
ret = -ERESTARTSYS;
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 83774bfd7b3b..9f1dbfdee8ca 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/freezer.h>
#include "fs.h"
struct inode;
@@ -56,6 +57,11 @@ static inline bool btrfs_free_space_trimming_bitmap(
return (info->trim_state == BTRFS_TRIM_STATE_TRIMMING);
}
+static inline bool btrfs_trim_interrupted(void)
+{
+ return fatal_signal_pending(current) || freezing(current);
+}
+
/*
* Deltas are an effective way to populate global statistics. Give macro names
* to make it clear what we're doing. An example is discard_extents in
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 7ba50e133921..cae540ec15ed 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -89,7 +89,6 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_free_space_info);
btrfs_set_free_space_extent_count(leaf, info, 0);
btrfs_set_free_space_flags(leaf, info, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
out:
@@ -287,7 +286,6 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
if (extent_count != expected_extent_count) {
@@ -324,7 +322,6 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, bitmap_cursor, ptr,
data_size);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
i += extent_size;
@@ -430,7 +427,6 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
@@ -495,7 +491,6 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
extent_count += new_extents;
btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
@@ -1350,6 +1345,12 @@ int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_end_transaction(trans);
return ret;
}
+ if (btrfs_should_end_transaction(trans)) {
+ btrfs_end_transaction(trans);
+ trans = btrfs_start_transaction(free_space_root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ }
node = rb_next(node);
}
diff --git a/fs/btrfs/fs.c b/fs/btrfs/fs.c
index 31c1648bc0b4..09cfb43580cb 100644
--- a/fs/btrfs/fs.c
+++ b/fs/btrfs/fs.c
@@ -4,6 +4,136 @@
#include "ctree.h"
#include "fs.h"
#include "accessors.h"
+#include "volumes.h"
+
+static const struct btrfs_csums {
+ u16 size;
+ const char name[10];
+ const char driver[12];
+} btrfs_csums[] = {
+ [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
+ [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
+ [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
+ [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
+ .driver = "blake2b-256" },
+};
+
+/* This exists for btrfs-progs usages. */
+u16 btrfs_csum_type_size(u16 type)
+{
+ return btrfs_csums[type].size;
+}
+
+int btrfs_super_csum_size(const struct btrfs_super_block *s)
+{
+ u16 t = btrfs_super_csum_type(s);
+
+ /* csum type is validated at mount time. */
+ return btrfs_csum_type_size(t);
+}
+
+const char *btrfs_super_csum_name(u16 csum_type)
+{
+ /* csum type is validated at mount time. */
+ return btrfs_csums[csum_type].name;
+}
+
+/*
+ * Return driver name if defined, otherwise the name that's also a valid driver
+ * name.
+ */
+const char *btrfs_super_csum_driver(u16 csum_type)
+{
+ /* csum type is validated at mount time */
+ return btrfs_csums[csum_type].driver[0] ?
+ btrfs_csums[csum_type].driver :
+ btrfs_csums[csum_type].name;
+}
+
+size_t __attribute_const__ btrfs_get_num_csums(void)
+{
+ return ARRAY_SIZE(btrfs_csums);
+}
+
+/*
+ * Start exclusive operation @type, return true on success.
+ */
+bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type)
+{
+ bool ret = false;
+
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
+ fs_info->exclusive_operation = type;
+ ret = true;
+ }
+ spin_unlock(&fs_info->super_lock);
+
+ return ret;
+}
+
+/*
+ * Conditionally allow to enter the exclusive operation in case it's compatible
+ * with the running one. This must be paired with btrfs_exclop_start_unlock()
+ * and btrfs_exclop_finish().
+ *
+ * Compatibility:
+ * - the same type is already running
+ * - when trying to add a device and balance has been paused
+ * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
+ * must check the condition first that would allow none -> @type
+ */
+bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type)
+{
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == type ||
+ (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
+ type == BTRFS_EXCLOP_DEV_ADD))
+ return true;
+
+ spin_unlock(&fs_info->super_lock);
+ return false;
+}
+
+void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
+{
+ spin_unlock(&fs_info->super_lock);
+}
+
+void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
+{
+ spin_lock(&fs_info->super_lock);
+ WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
+ spin_unlock(&fs_info->super_lock);
+ sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
+}
+
+void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation op)
+{
+ switch (op) {
+ case BTRFS_EXCLOP_BALANCE_PAUSED:
+ spin_lock(&fs_info->super_lock);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
+ spin_unlock(&fs_info->super_lock);
+ break;
+ case BTRFS_EXCLOP_BALANCE:
+ spin_lock(&fs_info->super_lock);
+ ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
+ spin_unlock(&fs_info->super_lock);
+ break;
+ default:
+ btrfs_warn(fs_info,
+ "invalid exclop balance operation %d requested", op);
+ }
+}
void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
const char *name)
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 79f64e383edd..b572d6b9730b 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -14,10 +14,10 @@
#include <linux/lockdep.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/rwlock_types.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
#include <linux/list.h>
+#include <linux/pagemap.h>
#include <linux/radix-tree.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
@@ -263,10 +263,10 @@ enum {
BTRFS_FEATURE_INCOMPAT_ZONED | \
BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
/*
* Features under developmen like Extent tree v2 support is enabled
- * only under CONFIG_BTRFS_DEBUG.
+ * only under CONFIG_BTRFS_EXPERIMENTAL
*/
#define BTRFS_FEATURE_INCOMPAT_SUPP \
(BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
@@ -317,6 +317,8 @@ struct btrfs_dev_replace {
struct percpu_counter bio_counter;
wait_queue_head_t replace_wait;
+
+ struct task_struct *replace_task;
};
/*
@@ -625,6 +627,9 @@ struct btrfs_fs_info {
struct kobject *qgroups_kobj;
struct kobject *discard_kobj;
+ /* Track the number of blocks (sectors) read by the filesystem. */
+ struct percpu_counter stats_read_blocks;
+
/* Used to keep from writing metadata until there is a nice batch */
struct percpu_counter dirty_metadata_bytes;
struct percpu_counter delalloc_bytes;
@@ -633,9 +638,10 @@ struct btrfs_fs_info {
s32 delalloc_batch;
struct percpu_counter evictable_extent_maps;
- spinlock_t extent_map_shrinker_lock;
- u64 extent_map_shrinker_last_root;
- u64 extent_map_shrinker_last_ino;
+ u64 em_shrinker_last_root;
+ u64 em_shrinker_last_ino;
+ atomic64_t em_shrinker_nr_to_scan;
+ struct work_struct em_shrinker_work;
/* Protected by 'trans_lock'. */
struct list_head dirty_cowonly_roots;
@@ -876,17 +882,19 @@ struct btrfs_fs_info {
#endif
};
-#define page_to_inode(_page) (BTRFS_I(_Generic((_page), \
- struct page *: (_page))->mapping->host))
#define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \
struct folio *: (_folio))->mapping->host))
-#define page_to_fs_info(_page) (page_to_inode(_page)->root->fs_info)
#define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info)
#define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \
struct inode *: (_inode)))->root->fs_info)
+static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
+{
+ return mapping_gfp_constraint(mapping, ~__GFP_FS);
+}
+
static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
{
return READ_ONCE(fs_info->generation);
@@ -953,6 +961,8 @@ static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
sizeof(struct btrfs_item))
+#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) ((bytes) >> (fs_info)->sectorsize_bits)
+
static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
{
return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
@@ -982,6 +992,17 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args);
+u16 btrfs_csum_type_size(u16 type);
+int btrfs_super_csum_size(const struct btrfs_super_block *s);
+const char *btrfs_super_csum_name(u16 csum_type);
+const char *btrfs_super_csum_driver(u16 csum_type);
+size_t __attribute_const__ btrfs_get_num_csums(void);
+
+static inline bool btrfs_is_empty_uuid(const u8 *uuid)
+{
+ return uuid_is_null((const uuid_t *)uuid);
+}
+
/* Compatibility and incompatibility defines */
void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
const char *name);
@@ -1058,6 +1079,14 @@ static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
(unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
&(fs_info)->fs_state)))
+/*
+ * We use folio flag owner_2 to indicate there is an ordered extent with
+ * unfinished IO.
+ */
+#define folio_test_ordered(folio) folio_test_owner_2(folio)
+#define folio_set_ordered(folio) folio_set_owner_2(folio)
+#define folio_clear_ordered(folio) folio_clear_owner_2(folio)
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
#define EXPORT_FOR_TESTS
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 29572dfaf878..448aa1a682d6 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -298,8 +298,6 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)&extref->name;
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
out:
btrfs_free_path(path);
return ret;
@@ -363,8 +361,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)(ref + 1);
}
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
out:
btrfs_free_path(path);
@@ -590,7 +586,6 @@ search_again:
num_dec = (orig_num_bytes - extent_num_bytes);
if (extent_start != 0)
control->sub_bytes += num_dec;
- btrfs_mark_buffer_dirty(trans, leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf, fi);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5618ca02934a..fe2c810335ff 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -393,35 +393,14 @@ void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
* extent (btrfs_finish_ordered_io()).
*/
static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
- struct folio *locked_folio,
u64 offset, u64 bytes)
{
unsigned long index = offset >> PAGE_SHIFT;
unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
- u64 page_start = 0, page_end = 0;
struct folio *folio;
- if (locked_folio) {
- page_start = folio_pos(locked_folio);
- page_end = page_start + folio_size(locked_folio) - 1;
- }
-
while (index <= end_index) {
- /*
- * For locked page, we will call btrfs_mark_ordered_io_finished
- * through btrfs_mark_ordered_io_finished() on it
- * in run_delalloc_range() for the error handling, which will
- * clear page Ordered and run the ordered extent accounting.
- *
- * Here we can't just clear the Ordered bit, or
- * btrfs_mark_ordered_io_finished() would skip the accounting
- * for the page range, and the ordered extent will never finish.
- */
- if (locked_folio && index == (page_start >> PAGE_SHIFT)) {
- index++;
- continue;
- }
- folio = __filemap_get_folio(inode->vfs_inode.i_mapping, index, 0, 0);
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
index++;
if (IS_ERR(folio))
continue;
@@ -436,23 +415,6 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
folio_put(folio);
}
- if (locked_folio) {
- /* The locked page covers the full range, nothing needs to be done */
- if (bytes + offset <= page_start + folio_size(locked_folio))
- return;
- /*
- * In case this page belongs to the delalloc range being
- * instantiated then skip it, since the first page of a range is
- * going to be properly cleaned up by the caller of
- * run_delalloc_range
- */
- if (page_start >= offset && page_end <= (offset + bytes - 1)) {
- bytes = offset + bytes - folio_pos(locked_folio) -
- folio_size(locked_folio);
- offset = folio_pos(locked_folio) + folio_size(locked_folio);
- }
- }
-
return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
}
@@ -556,8 +518,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
} else {
struct folio *folio;
- folio = __filemap_get_folio(inode->vfs_inode.i_mapping,
- 0, 0, 0);
+ folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
ASSERT(!IS_ERR(folio));
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_local_folio(folio, 0);
@@ -565,7 +526,6 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
kunmap_local(kaddr);
folio_put(folio);
}
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
@@ -646,7 +606,7 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
* If being used directly, you must have already checked we're allowed to cow
* the range by getting true from can_cow_file_range_inline().
*/
-static noinline int __cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
+static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
u64 size, size_t compressed_size,
int compress_type,
struct folio *compressed_folio,
@@ -736,7 +696,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode,
return 1;
lock_extent(&inode->io_tree, offset, end, &cached);
- ret = __cow_file_range_inline(inode, offset, size, compressed_size,
+ ret = __cow_file_range_inline(inode, size, compressed_size,
compress_type, compressed_folio,
update_i_size);
if (ret > 0) {
@@ -832,32 +792,16 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
return 0;
}
/*
- * Special check for subpage.
- *
- * We lock the full page then run each delalloc range in the page, thus
- * for the following case, we will hit some subpage specific corner case:
+ * Only enable sector perfect compression for experimental builds.
*
- * 0 32K 64K
- * | |///////| |///////|
- * \- A \- B
+ * This is a big feature change for subpage cases, and can hit
+ * different corner cases, so only limit this feature for
+ * experimental build for now.
*
- * In above case, both range A and range B will try to unlock the full
- * page [0, 64K), causing the one finished later will have page
- * unlocked already, triggering various page lock requirement BUG_ON()s.
- *
- * So here we add an artificial limit that subpage compression can only
- * if the range is fully page aligned.
- *
- * In theory we only need to ensure the first page is fully covered, but
- * the tailing partial page will be locked until the full compression
- * finishes, delaying the write of other range.
- *
- * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
- * first to prevent any submitted async extent to unlock the full page.
- * By this, we can ensure for subpage case that only the last async_cow
- * will unlock the full page.
+ * ETA for moving this out of experimental builds is 6.15.
*/
- if (fs_info->sectorsize < PAGE_SIZE) {
+ if (fs_info->sectorsize < PAGE_SIZE &&
+ !IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
if (!PAGE_ALIGNED(start) ||
!PAGE_ALIGNED(end + 1))
return 0;
@@ -896,13 +840,14 @@ static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 e
for (unsigned long index = start >> PAGE_SHIFT;
index <= end_index; index++) {
- folio = __filemap_get_folio(inode->i_mapping, index, 0, 0);
+ folio = filemap_get_folio(inode->i_mapping, index);
if (IS_ERR(folio)) {
if (!ret)
ret = PTR_ERR(folio);
continue;
}
- folio_clear_dirty_for_io(folio);
+ btrfs_folio_clamp_clear_dirty(inode_to_fs_info(inode), folio, start,
+ end + 1 - start);
folio_put(folio);
}
return ret;
@@ -1001,17 +946,6 @@ again:
(start > 0 || end + 1 < inode->disk_i_size))
goto cleanup_and_bail_uncompressed;
- /*
- * For subpage case, we require full page alignment for the sector
- * aligned range.
- * Thus we must also check against @actual_end, not just @end.
- */
- if (blocksize < PAGE_SIZE) {
- if (!PAGE_ALIGNED(start) ||
- !PAGE_ALIGNED(round_up(actual_end, blocksize)))
- goto cleanup_and_bail_uncompressed;
- }
-
total_compressed = min_t(unsigned long, total_compressed,
BTRFS_MAX_UNCOMPRESSED);
total_in = 0;
@@ -1156,19 +1090,14 @@ static void submit_uncompressed_range(struct btrfs_inode *inode,
&wbc, false);
wbc_detach_inode(&wbc);
if (ret < 0) {
- btrfs_cleanup_ordered_extents(inode, locked_folio,
- start, end - start + 1);
- if (locked_folio) {
- const u64 page_start = folio_pos(locked_folio);
-
- folio_start_writeback(locked_folio);
- folio_end_writeback(locked_folio);
- btrfs_mark_ordered_io_finished(inode, locked_folio,
- page_start, PAGE_SIZE,
- !ret);
- mapping_set_error(locked_folio->mapping, ret);
- folio_unlock(locked_folio);
- }
+ btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
+ if (locked_folio)
+ btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
+ start, async_extent->ram_size);
+ btrfs_err_rl(inode->root->fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start, async_extent->ram_size, ret);
}
}
@@ -1359,7 +1288,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
u64 alloc_hint = 0;
u64 orig_start = start;
u64 num_bytes;
- unsigned long ram_size;
u64 cur_alloc_size = 0;
u64 min_alloc_size;
u64 blocksize = fs_info->sectorsize;
@@ -1367,7 +1295,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
struct extent_map *em;
unsigned clear_bits;
unsigned long page_ops;
- bool extent_reserved = false;
int ret = 0;
if (btrfs_is_free_space_inode(inode)) {
@@ -1402,6 +1329,17 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
/*
+ * We're not doing compressed IO, don't unlock the first page (which
+ * the caller expects to stay locked), don't clear any dirty bits and
+ * don't set any writeback bits.
+ *
+ * Do set the Ordered (Private2) bit so we know this page was properly
+ * setup for writepage.
+ */
+ page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
+ page_ops |= PAGE_SET_ORDERED;
+
+ /*
* Relocation relies on the relocated extents to have exactly the same
* size as the original extents. Normally writeback for relocation data
* extents follows a NOCOW path because relocation preallocates the
@@ -1421,8 +1359,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
- cur_alloc_size = num_bytes;
- ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
+ ret = btrfs_reserve_extent(root, num_bytes, num_bytes,
min_alloc_size, 0, alloc_hint,
&ins, 1, 1);
if (ret == -EAGAIN) {
@@ -1453,9 +1390,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (ret < 0)
goto out_unlock;
cur_alloc_size = ins.offset;
- extent_reserved = true;
- ram_size = ins.offset;
file_extent.disk_bytenr = ins.objectid;
file_extent.disk_num_bytes = ins.offset;
file_extent.num_bytes = ins.offset;
@@ -1463,14 +1398,18 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
file_extent.offset = 0;
file_extent.compression = BTRFS_COMPRESS_NONE;
- lock_extent(&inode->io_tree, start, start + ram_size - 1,
+ /*
+ * Locked range will be released either during error clean up or
+ * after the whole range is finished.
+ */
+ lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
&cached);
em = btrfs_create_io_em(inode, start, &file_extent,
BTRFS_ORDERED_REGULAR);
if (IS_ERR(em)) {
unlock_extent(&inode->io_tree, start,
- start + ram_size - 1, &cached);
+ start + cur_alloc_size - 1, &cached);
ret = PTR_ERR(em);
goto out_reserve;
}
@@ -1480,7 +1419,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
1 << BTRFS_ORDERED_REGULAR);
if (IS_ERR(ordered)) {
unlock_extent(&inode->io_tree, start,
- start + ram_size - 1, &cached);
+ start + cur_alloc_size - 1, &cached);
ret = PTR_ERR(ordered);
goto out_drop_extent_cache;
}
@@ -1501,35 +1440,20 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
*/
if (ret)
btrfs_drop_extent_map_range(inode, start,
- start + ram_size - 1,
+ start + cur_alloc_size - 1,
false);
}
btrfs_put_ordered_extent(ordered);
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- /*
- * We're not doing compressed IO, don't unlock the first page
- * (which the caller expects to stay locked), don't clear any
- * dirty bits and don't set any writeback bits
- *
- * Do set the Ordered (Private2) bit so we know this page was
- * properly setup for writepage.
- */
- page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
- page_ops |= PAGE_SET_ORDERED;
-
- extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
- locked_folio, &cached,
- EXTENT_LOCKED | EXTENT_DELALLOC,
- page_ops);
if (num_bytes < cur_alloc_size)
num_bytes = 0;
else
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
- extent_reserved = false;
+ cur_alloc_size = 0;
/*
* btrfs_reloc_clone_csums() error, since start is increased
@@ -1539,13 +1463,15 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (ret)
goto out_unlock;
}
+ extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
done:
if (done_offset)
*done_offset = end;
return ret;
out_drop_extent_cache:
- btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false);
+ btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false);
out_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
@@ -1559,35 +1485,30 @@ out_unlock:
* We process each region below.
*/
- clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
- page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
-
/*
* For the range (1). We have already instantiated the ordered extents
* for this region. They are cleaned up by
* btrfs_cleanup_ordered_extents() in e.g,
- * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
- * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
- * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
- * function.
+ * btrfs_run_delalloc_range().
+ * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
+ * are also handled by the cleanup function.
*
- * However, in case of @keep_locked, we still need to unlock the pages
- * (except @locked_folio) to ensure all the pages are unlocked.
+ * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
+ * finish the writeback of the involved folios, which will be never submitted.
*/
- if (keep_locked && orig_start < start) {
+ if (orig_start < start) {
+ clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
+ page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+
if (!locked_folio)
mapping_set_error(inode->vfs_inode.i_mapping, ret);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
- locked_folio, NULL, 0, page_ops);
+ locked_folio, NULL, clear_bits, page_ops);
}
- /*
- * At this point we're unlocked, we want to make sure we're only
- * clearing these flags under the extent lock, so lock the rest of the
- * range and clear everything up.
- */
- lock_extent(&inode->io_tree, start, end, NULL);
+ clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
+ page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
/*
* For the range (2). If we reserved an extent for our delalloc range
@@ -1599,13 +1520,12 @@ out_unlock:
* to decrement again the data space_info's bytes_may_use counter,
* therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
*/
- if (extent_reserved) {
+ if (cur_alloc_size) {
extent_clear_unlock_delalloc(inode, start,
start + cur_alloc_size - 1,
locked_folio, &cached, clear_bits,
page_ops);
btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
- start += cur_alloc_size;
}
/*
@@ -1614,12 +1534,18 @@ out_unlock:
* space_info's bytes_may_use counter, reserved in
* btrfs_check_data_free_space().
*/
- if (start < end) {
+ if (start + cur_alloc_size < end) {
clear_bits |= EXTENT_CLEAR_DATA_RESV;
- extent_clear_unlock_delalloc(inode, start, end, locked_folio,
+ extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
+ end, locked_folio,
&cached, clear_bits, page_ops);
- btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
+ btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
+ end - start - cur_alloc_size + 1, NULL);
}
+ btrfs_err_rl(fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), orig_start, end + 1 - orig_start, ret);
return ret;
}
@@ -1729,7 +1655,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
* need full accuracy. Just account the whole thing
* against the first page.
*/
- wbc_account_cgroup_owner(wbc, &locked_folio->page,
+ wbc_account_cgroup_owner(wbc, locked_folio,
cur_end - start);
async_chunk[i].locked_folio = locked_folio;
locked_folio = NULL;
@@ -1840,7 +1766,7 @@ static int fallback_to_cow(struct btrfs_inode *inode,
bytes = range_bytes;
spin_lock(&sinfo->lock);
- btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
+ btrfs_space_info_update_bytes_may_use(sinfo, bytes);
spin_unlock(&sinfo->lock);
if (count > 0)
@@ -1868,7 +1794,6 @@ struct can_nocow_file_extent_args {
/* End file offset (inclusive) of the range we want to NOCOW. */
u64 end;
bool writeback_path;
- bool strict;
/*
* Free the path passed to can_nocow_file_extent() once it's not needed
* anymore.
@@ -1923,8 +1848,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
* for its subvolume was created, then this implies the extent is shared,
* hence we must COW.
*/
- if (!args->strict &&
- btrfs_file_extent_generation(leaf, fi) <=
+ if (btrfs_file_extent_generation(leaf, fi) <=
btrfs_root_last_snapshot(&root->root_item))
goto out;
@@ -1953,9 +1877,8 @@ static int can_nocow_file_extent(struct btrfs_path *path,
*/
btrfs_release_path(path);
- ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
- key->offset - args->file_extent.offset,
- args->file_extent.disk_bytenr, args->strict, path);
+ ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
+ args->file_extent.disk_bytenr, path);
WARN_ON_ONCE(ret > 0 && is_freespace_inode);
if (ret != 0)
goto out;
@@ -2002,6 +1925,53 @@ static int can_nocow_file_extent(struct btrfs_path *path,
}
/*
+ * Cleanup the dirty folios which will never be submitted due to error.
+ *
+ * When running a delalloc range, we may need to split the ranges (due to
+ * fragmentation or NOCOW). If we hit an error in the later part, we will error
+ * out and previously successfully executed range will never be submitted, thus
+ * we have to cleanup those folios by clearing their dirty flag, starting and
+ * finishing the writeback.
+ */
+static void cleanup_dirty_folios(struct btrfs_inode *inode,
+ struct folio *locked_folio,
+ u64 start, u64 end, int error)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t end_index = end >> PAGE_SHIFT;
+ u32 len;
+
+ ASSERT(end + 1 - start < U32_MAX);
+ ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
+ IS_ALIGNED(end + 1, fs_info->sectorsize));
+ len = end + 1 - start;
+
+ /*
+ * Handle the locked folio first.
+ * The btrfs_folio_clamp_*() helpers can handle range out of the folio case.
+ */
+ btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
+
+ for (pgoff_t index = start_index; index <= end_index; index++) {
+ struct folio *folio;
+
+ /* Already handled at the beginning. */
+ if (index == locked_folio->index)
+ continue;
+ folio = __filemap_get_folio(mapping, index, FGP_LOCK, GFP_NOFS);
+ /* Cache already dropped, no need to do any cleanup. */
+ if (IS_ERR(folio))
+ continue;
+ btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ mapping_set_error(mapping, error);
+}
+
+/*
* when nowcow writeback call back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
*
@@ -2016,6 +1986,11 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
struct btrfs_root *root = inode->root;
struct btrfs_path *path;
u64 cow_start = (u64)-1;
+ /*
+ * If not 0, represents the inclusive end of the last fallback_to_cow()
+ * range. Only for error handling.
+ */
+ u64 cow_end = 0;
u64 cur_offset = start;
int ret;
bool check_prev = true;
@@ -2176,6 +2151,7 @@ must_cow:
found_key.offset - 1);
cow_start = (u64)-1;
if (ret) {
+ cow_end = found_key.offset - 1;
btrfs_dec_nocow_writers(nocow_bg);
goto error;
}
@@ -2249,11 +2225,12 @@ must_cow:
cow_start = cur_offset;
if (cow_start != (u64)-1) {
- cur_offset = end;
ret = fallback_to_cow(inode, locked_folio, cow_start, end);
cow_start = (u64)-1;
- if (ret)
+ if (ret) {
+ cow_end = end;
goto error;
+ }
}
btrfs_free_path(path);
@@ -2261,12 +2238,41 @@ must_cow:
error:
/*
+ * There are several error cases:
+ *
+ * 1) Failed without falling back to COW
+ * start cur_offset end
+ * |/////////////| |
+ *
+ * For range [start, cur_offset) the folios are already unlocked (except
+ * @locked_folio), EXTENT_DELALLOC already removed.
+ * Only need to clear the dirty flag as they will never be submitted.
+ * Ordered extent and extent maps are handled by
+ * btrfs_mark_ordered_io_finished() inside run_delalloc_range().
+ *
+ * 2) Failed with error from fallback_to_cow()
+ * start cur_offset cow_end end
+ * |/////////////|-----------| |
+ *
+ * For range [start, cur_offset) it's the same as case 1).
+ * But for range [cur_offset, cow_end), the folios have dirty flag
+ * cleared and unlocked, EXTENT_DEALLLOC cleared by cow_file_range().
+ *
+ * Thus we should not call extent_clear_unlock_delalloc() on range
+ * [cur_offset, cow_end), as the folios are already unlocked.
+ *
+ * So clear the folio dirty flags for [start, cur_offset) first.
+ */
+ if (cur_offset > start)
+ cleanup_dirty_folios(inode, locked_folio, start, cur_offset - 1, ret);
+
+ /*
* If an error happened while a COW region is outstanding, cur_offset
- * needs to be reset to cow_start to ensure the COW region is unlocked
- * as well.
+ * needs to be reset to @cow_end + 1 to skip the COW range, as
+ * cow_file_range() will do the proper cleanup at error.
*/
- if (cow_start != (u64)-1)
- cur_offset = cow_start;
+ if (cow_end)
+ cur_offset = cow_end + 1;
/*
* We need to lock the extent here because we're clearing DELALLOC and
@@ -2286,6 +2292,10 @@ error:
btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL);
}
btrfs_free_path(path);
+ btrfs_err_rl(fs_info,
+ "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
+ __func__, btrfs_root_id(inode->root),
+ btrfs_ino(inode), start, end + 1 - start, ret);
return ret;
}
@@ -2336,8 +2346,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
out:
if (ret < 0)
- btrfs_cleanup_ordered_extents(inode, locked_folio, start,
- end - start + 1);
+ btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
return ret;
}
@@ -2952,7 +2961,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(struct btrfs_file_extent_item));
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
@@ -3094,34 +3102,19 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
- if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
- BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
-
- btrfs_inode_safe_disk_i_size_write(inode, 0);
- if (freespace_inode)
- trans = btrfs_join_transaction_spacecache(root);
- else
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- trans = NULL;
- goto out;
- }
- trans->block_rsv = &inode->block_rsv;
- ret = btrfs_update_inode_fallback(trans, inode);
- if (ret) /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, ret);
-
- ret = btrfs_insert_raid_extent(trans, ordered_extent);
- if (ret)
- btrfs_abort_transaction(trans, ret);
-
- goto out;
+ /*
+ * If it's a COW write we need to lock the extent range as we will be
+ * inserting/replacing file extent items and unpinning an extent map.
+ * This must be taken before joining a transaction, as it's a higher
+ * level lock (like the inode's VFS lock), otherwise we can run into an
+ * ABBA deadlock with other tasks (transactions work like a lock,
+ * depending on their current state).
+ */
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
+ clear_bits |= EXTENT_LOCKED;
+ lock_extent(io_tree, start, end, &cached_state);
}
- clear_bits |= EXTENT_LOCKED;
- lock_extent(io_tree, start, end, &cached_state);
-
if (freespace_inode)
trans = btrfs_join_transaction_spacecache(root);
else
@@ -3135,8 +3128,28 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
trans->block_rsv = &inode->block_rsv;
ret = btrfs_insert_raid_extent(trans, ordered_extent);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
goto out;
+ }
+
+ if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
+ /* Logic error */
+ ASSERT(list_empty(&ordered_extent->list));
+ if (!list_empty(&ordered_extent->list)) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ btrfs_inode_safe_disk_i_size_write(inode, 0);
+ ret = btrfs_update_inode_fallback(trans, inode);
+ if (ret) {
+ /* -ENOMEM or corruption */
+ btrfs_abort_transaction(trans, ret);
+ }
+ goto out;
+ }
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
@@ -3791,14 +3804,45 @@ static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
return 0;
}
+static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_inode *existing;
+ const u64 ino = btrfs_ino(inode);
+ int ret;
+
+ if (inode_unhashed(&inode->vfs_inode))
+ return 0;
+
+ if (prealloc) {
+ ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
+ if (ret)
+ return ret;
+ }
+
+ existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
+
+ if (xa_is_err(existing)) {
+ ret = xa_err(existing);
+ ASSERT(ret != -EINVAL);
+ ASSERT(ret != -ENOMEM);
+ return ret;
+ } else if (existing) {
+ WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING)));
+ }
+
+ return 0;
+}
+
/*
- * read an inode from the btree into the in-memory inode
+ * Read a locked inode from the btree into the in-memory inode and add it to
+ * its root list/tree.
+ *
+ * On failure clean up the inode.
*/
-static int btrfs_read_locked_inode(struct inode *inode,
- struct btrfs_path *in_path)
+static int btrfs_read_locked_inode(struct inode *inode, struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct btrfs_path *path = in_path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3812,25 +3856,25 @@ static int btrfs_read_locked_inode(struct inode *inode,
ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
if (ret)
- return ret;
+ goto out;
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
- if (!path) {
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- }
+ ASSERT(path);
btrfs_get_inode_key(BTRFS_I(inode), &location);
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
- if (path != in_path)
- btrfs_free_path(path);
- return ret;
+ /*
+ * ret > 0 can come from btrfs_search_slot called by
+ * btrfs_lookup_inode(), this means the inode was not found.
+ */
+ if (ret > 0)
+ ret = -ENOENT;
+ goto out;
}
leaf = path->nodes[0];
@@ -3965,8 +4009,6 @@ cache_acl:
btrfs_ino(BTRFS_I(inode)),
btrfs_root_id(root), ret);
}
- if (path != in_path)
- btrfs_free_path(path);
if (!maybe_acls)
cache_no_acl(inode);
@@ -3993,7 +4035,15 @@ cache_acl:
}
btrfs_sync_inode_flags_to_i_flags(inode);
+
+ ret = btrfs_add_inode_to_root(BTRFS_I(inode), true);
+ if (ret)
+ goto out;
+
return 0;
+out:
+ iget_failed(inode);
+ return ret;
}
/*
@@ -4074,7 +4124,6 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
@@ -4368,11 +4417,8 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
*/
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
- if (IS_ERR_OR_NULL(di)) {
- if (!di)
- ret = -ENOENT;
- else
- ret = PTR_ERR(di);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -5505,35 +5551,7 @@ out:
return err;
}
-static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
-{
- struct btrfs_root *root = inode->root;
- struct btrfs_inode *existing;
- const u64 ino = btrfs_ino(inode);
- int ret;
-
- if (inode_unhashed(&inode->vfs_inode))
- return 0;
-
- if (prealloc) {
- ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
- if (ret)
- return ret;
- }
-
- existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
-
- if (xa_is_err(existing)) {
- ret = xa_err(existing);
- ASSERT(ret != -EINVAL);
- ASSERT(ret != -ENOMEM);
- return ret;
- } else if (existing) {
- WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING)));
- }
- return 0;
-}
static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
{
@@ -5595,10 +5613,8 @@ static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
}
/*
- * Get an inode object given its inode number and corresponding root.
- * Path can be preallocated to prevent recursing back to iget through
- * allocator. NULL is also valid but may require an additional allocation
- * later.
+ * Get an inode object given its inode number and corresponding root. Path is
+ * preallocated to prevent recursing back to iget through allocator.
*/
struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
struct btrfs_path *path)
@@ -5614,30 +5630,40 @@ struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
return inode;
ret = btrfs_read_locked_inode(inode, path);
- /*
- * ret > 0 can come from btrfs_search_slot called by
- * btrfs_read_locked_inode(), this means the inode item was not found.
- */
- if (ret > 0)
- ret = -ENOENT;
- if (ret < 0)
- goto error;
-
- ret = btrfs_add_inode_to_root(BTRFS_I(inode), true);
- if (ret < 0)
- goto error;
+ if (ret)
+ return ERR_PTR(ret);
unlock_new_inode(inode);
-
return inode;
-error:
- iget_failed(inode);
- return ERR_PTR(ret);
}
+/*
+ * Get an inode object given its inode number and corresponding root.
+ */
struct inode *btrfs_iget(u64 ino, struct btrfs_root *root)
{
- return btrfs_iget_path(ino, root, NULL);
+ struct inode *inode;
+ struct btrfs_path *path;
+ int ret;
+
+ inode = btrfs_iget_locked(ino, root);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+
+ ret = btrfs_read_locked_inode(inode, path);
+ btrfs_free_path(path);
+ if (ret)
+ return ERR_PTR(ret);
+
+ unlock_new_inode(inode);
+ return inode;
}
static struct inode *new_simple_dir(struct inode *dir,
@@ -6026,7 +6052,7 @@ again:
* offset. This means that new entries created during readdir
* are *guaranteed* to be seen in the future by that readdir.
* This has broken buggy programs which operate on names as
- * they're returned by readdir. Until we re-use freed offsets
+ * they're returned by readdir. Until we reuse freed offsets
* we have this hack to stop new entries from being returned
* under the assumption that they'll never reach this huge
* offset.
@@ -6392,7 +6418,6 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
}
}
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
/*
* We don't need the path anymore, plus inheriting properties, adding
* ACLs, security xattrs, orphan item or adding the link, will result in
@@ -6768,8 +6793,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
return ret;
}
-static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path,
- struct folio *folio)
+static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
{
struct btrfs_file_extent_item *fi;
void *kaddr;
@@ -6967,7 +6991,7 @@ next:
ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
ASSERT(em->len == fs_info->sectorsize);
- ret = read_inline_extent(inode, path, folio);
+ ret = read_inline_extent(path, folio);
if (ret < 0)
goto out;
goto insert;
@@ -7024,8 +7048,6 @@ static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
* @orig_start: (optional) Return the original file offset of the file extent
* @orig_len: (optional) Return the original on-disk length of the file extent
* @ram_bytes: (optional) Return the ram_bytes of the file extent
- * @strict: if true, omit optimizations that might force us into unnecessary
- * cow. e.g., don't trust generation number.
*
* Return:
* >0 and update @len if we can do nocow write
@@ -7037,7 +7059,7 @@ static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
*/
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
struct btrfs_file_extent *file_extent,
- bool nowait, bool strict)
+ bool nowait)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct can_nocow_file_extent_args nocow_args = { 0 };
@@ -7090,7 +7112,6 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
nocow_args.start = offset;
nocow_args.end = offset + *len - 1;
- nocow_args.strict = strict;
nocow_args.free_path = true;
ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
@@ -7297,7 +7318,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
*
* But already submitted bio can still be finished on this folio.
* Furthermore, endio function won't skip folio which has Ordered
- * (Private2) already cleared, so it's possible for endio and
+ * already cleared, so it's possible for endio and
* invalidate_folio to do the same ordered extent accounting twice
* on one folio.
*
@@ -7363,7 +7384,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
range_len = range_end + 1 - cur;
if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
/*
- * If Ordered (Private2) is cleared, it means endio has
+ * If Ordered is cleared, it means endio has
* already been executed for the range.
* We can't delete the extent states as
* btrfs_finish_ordered_io() may still use some of them.
@@ -7436,7 +7457,7 @@ next:
}
/*
* We have iterated through all ordered extents of the page, the page
- * should not have Ordered (Private2) anymore, or the above iteration
+ * should not have Ordered anymore, or the above iteration
* did something wrong.
*/
ASSERT(!folio_test_ordered(folio));
@@ -8040,31 +8061,45 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* src is a subvolume */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else { /* src is an inode */
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
BTRFS_I(old_dentry->d_inode),
old_name, &old_rename_ctx);
- if (!ret)
- ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
/* dest is a subvolume */
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else { /* dest is an inode */
ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(new_dentry->d_inode),
new_name, &new_rename_ctx);
- if (!ret)
- ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
@@ -8300,16 +8335,23 @@ static int btrfs_rename(struct mnt_idmap *idmap,
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
} else {
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
BTRFS_I(d_inode(old_dentry)),
&old_fname.disk_name, &rename_ctx);
- if (!ret)
- ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
- }
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
if (new_inode) {
@@ -8317,18 +8359,27 @@ static int btrfs_rename(struct mnt_idmap *idmap,
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
BTRFS_I(d_inode(new_dentry)),
&new_fname.disk_name);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
- if (!ret && new_inode->i_nlink == 0)
+ if (new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans,
BTRFS_I(d_inode(new_dentry)));
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_fail;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
+ }
}
}
@@ -8668,7 +8719,6 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
d_instantiate_new(dentry, inode);
@@ -8975,28 +9025,6 @@ out_inode:
return finish_open_simple(file, ret);
}
-void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- struct folio *folio;
- u32 len;
-
- ASSERT(end + 1 - start <= U32_MAX);
- len = end + 1 - start;
- while (index <= end_index) {
- folio = __filemap_get_folio(inode->vfs_inode.i_mapping, index, 0, 0);
- ASSERT(!IS_ERR(folio)); /* folios should be in the extent_io_tree */
-
- /* This is for data, which doesn't yet support larger folio. */
- ASSERT(folio_order(folio) == 0);
- btrfs_folio_set_writeback(fs_info, folio, start, len);
- folio_put(folio);
- index++;
- }
-}
-
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type)
{
@@ -9041,12 +9069,16 @@ static ssize_t btrfs_encoded_read_inline(
unsigned long ptr;
void *tmp;
ssize_t ret;
+ const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
+
+ path->nowait = nowait;
+
ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
extent_start, 0);
if (ret) {
@@ -9109,8 +9141,9 @@ out:
}
struct btrfs_encoded_read_private {
- wait_queue_head_t wait;
- atomic_t pending;
+ struct completion done;
+ void *uring_ctx;
+ refcount_t pending_refs;
blk_status_t status;
};
@@ -9129,26 +9162,40 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
- if (!atomic_dec_return(&priv->pending))
- wake_up(&priv->wait);
+ if (refcount_dec_and_test(&priv->pending_refs)) {
+ int err = blk_status_to_errno(READ_ONCE(priv->status));
+
+ if (priv->uring_ctx) {
+ btrfs_uring_read_extent_endio(priv->uring_ctx, err);
+ kfree(priv);
+ } else {
+ complete(&priv->done);
+ }
+ }
bio_put(&bbio->bio);
}
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
- u64 file_offset, u64 disk_bytenr,
- u64 disk_io_size, struct page **pages)
+ u64 disk_bytenr, u64 disk_io_size,
+ struct page **pages, void *uring_ctx)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_encoded_read_private priv = {
- .pending = ATOMIC_INIT(1),
- };
+ struct btrfs_encoded_read_private *priv;
unsigned long i = 0;
struct btrfs_bio *bbio;
+ int ret;
+
+ priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
- init_waitqueue_head(&priv.wait);
+ init_completion(&priv->done);
+ refcount_set(&priv->pending_refs, 1);
+ priv->status = 0;
+ priv->uring_ctx = uring_ctx;
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
- btrfs_encoded_read_endio, &priv);
+ btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
@@ -9156,11 +9203,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
- atomic_inc(&priv.pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
- btrfs_encoded_read_endio, &priv);
+ btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
continue;
@@ -9171,22 +9218,33 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
disk_io_size -= bytes;
} while (disk_io_size);
- atomic_inc(&priv.pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
- if (atomic_dec_return(&priv.pending))
- io_wait_event(priv.wait, !atomic_read(&priv.pending));
- /* See btrfs_encoded_read_endio() for ordering. */
- return blk_status_to_errno(READ_ONCE(priv.status));
+ if (uring_ctx) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
+ ret = blk_status_to_errno(READ_ONCE(priv->status));
+ btrfs_uring_read_extent_endio(uring_ctx, ret);
+ kfree(priv);
+ return ret;
+ }
+
+ return -EIOCBQUEUED;
+ } else {
+ if (!refcount_dec_and_test(&priv->pending_refs))
+ wait_for_completion_io(&priv->done);
+ /* See btrfs_encoded_read_endio() for ordering. */
+ ret = blk_status_to_errno(READ_ONCE(priv->status));
+ kfree(priv);
+ return ret;
+ }
}
-static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
- struct iov_iter *iter,
- u64 start, u64 lockend,
- struct extent_state **cached_state,
- u64 disk_bytenr, u64 disk_io_size,
- size_t count, bool compressed,
- bool *unlocked)
+ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
+ u64 start, u64 lockend,
+ struct extent_state **cached_state,
+ u64 disk_bytenr, u64 disk_io_size,
+ size_t count, bool compressed, bool *unlocked)
{
struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
struct extent_io_tree *io_tree = &inode->io_tree;
@@ -9206,8 +9264,8 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
goto out;
}
- ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
- disk_io_size, pages);
+ ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
+ disk_io_size, pages, NULL);
if (ret)
goto out;
@@ -9247,21 +9305,26 @@ out:
}
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
- struct btrfs_ioctl_encoded_io_args *encoded)
+ struct btrfs_ioctl_encoded_io_args *encoded,
+ struct extent_state **cached_state,
+ u64 *disk_bytenr, u64 *disk_io_size)
{
struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
ssize_t ret;
size_t count = iov_iter_count(iter);
- u64 start, lockend, disk_bytenr, disk_io_size;
- struct extent_state *cached_state = NULL;
+ u64 start, lockend;
struct extent_map *em;
+ const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
bool unlocked = false;
file_accessed(iocb->ki_filp);
- btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
+ ret = btrfs_inode_lock(inode,
+ BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
+ if (ret)
+ return ret;
if (iocb->ki_pos >= inode->vfs_inode.i_size) {
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
@@ -9274,21 +9337,46 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
*/
lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
- for (;;) {
+ if (nowait) {
struct btrfs_ordered_extent *ordered;
- ret = btrfs_wait_ordered_range(inode, start,
- lockend - start + 1);
- if (ret)
+ if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
+ start, lockend)) {
+ ret = -EAGAIN;
goto out_unlock_inode;
- lock_extent(io_tree, start, lockend, &cached_state);
+ }
+
+ if (!try_lock_extent(io_tree, start, lockend, cached_state)) {
+ ret = -EAGAIN;
+ goto out_unlock_inode;
+ }
+
ordered = btrfs_lookup_ordered_range(inode, start,
lockend - start + 1);
- if (!ordered)
- break;
- btrfs_put_ordered_extent(ordered);
- unlock_extent(io_tree, start, lockend, &cached_state);
- cond_resched();
+ if (ordered) {
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent(io_tree, start, lockend, cached_state);
+ ret = -EAGAIN;
+ goto out_unlock_inode;
+ }
+ } else {
+ for (;;) {
+ struct btrfs_ordered_extent *ordered;
+
+ ret = btrfs_wait_ordered_range(inode, start,
+ lockend - start + 1);
+ if (ret)
+ goto out_unlock_inode;
+
+ lock_extent(io_tree, start, lockend, cached_state);
+ ordered = btrfs_lookup_ordered_range(inode, start,
+ lockend - start + 1);
+ if (!ordered)
+ break;
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent(io_tree, start, lockend, cached_state);
+ cond_resched();
+ }
}
em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
@@ -9307,9 +9395,9 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
free_extent_map(em);
em = NULL;
ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
- &cached_state, extent_start,
+ cached_state, extent_start,
count, encoded, &unlocked);
- goto out;
+ goto out_unlock_extent;
}
/*
@@ -9320,12 +9408,12 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
inode->vfs_inode.i_size) - iocb->ki_pos;
if (em->disk_bytenr == EXTENT_MAP_HOLE ||
(em->flags & EXTENT_FLAG_PREALLOC)) {
- disk_bytenr = EXTENT_MAP_HOLE;
+ *disk_bytenr = EXTENT_MAP_HOLE;
count = min_t(u64, count, encoded->len);
encoded->len = count;
encoded->unencoded_len = count;
} else if (extent_map_is_compressed(em)) {
- disk_bytenr = em->disk_bytenr;
+ *disk_bytenr = em->disk_bytenr;
/*
* Bail if the buffer isn't large enough to return the whole
* compressed extent.
@@ -9334,7 +9422,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
ret = -ENOBUFS;
goto out_em;
}
- disk_io_size = em->disk_num_bytes;
+ *disk_io_size = em->disk_num_bytes;
count = em->disk_num_bytes;
encoded->unencoded_len = em->ram_bytes;
encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
@@ -9344,47 +9432,42 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
goto out_em;
encoded->compression = ret;
} else {
- disk_bytenr = extent_map_block_start(em) + (start - em->start);
+ *disk_bytenr = extent_map_block_start(em) + (start - em->start);
if (encoded->len > count)
encoded->len = count;
/*
* Don't read beyond what we locked. This also limits the page
* allocations that we'll do.
*/
- disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
- count = start + disk_io_size - iocb->ki_pos;
+ *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
+ count = start + *disk_io_size - iocb->ki_pos;
encoded->len = count;
encoded->unencoded_len = count;
- disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
+ *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
}
free_extent_map(em);
em = NULL;
- if (disk_bytenr == EXTENT_MAP_HOLE) {
- unlock_extent(io_tree, start, lockend, &cached_state);
+ if (*disk_bytenr == EXTENT_MAP_HOLE) {
+ unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
unlocked = true;
ret = iov_iter_zero(count, iter);
if (ret != count)
ret = -EFAULT;
} else {
- ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
- &cached_state, disk_bytenr,
- disk_io_size, count,
- encoded->compression,
- &unlocked);
+ ret = -EIOCBQUEUED;
+ goto out_unlock_extent;
}
-out:
- if (ret >= 0)
- iocb->ki_pos += encoded->len;
out_em:
free_extent_map(em);
out_unlock_extent:
- if (!unlocked)
- unlock_extent(io_tree, start, lockend, &cached_state);
+ /* Leave inode and extent locked if we need to do a read. */
+ if (!unlocked && ret != -EIOCBQUEUED)
+ unlock_extent(io_tree, start, lockend, cached_state);
out_unlock_inode:
- if (!unlocked)
+ if (!unlocked && ret != -EIOCBQUEUED)
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
return ret;
}
@@ -9495,7 +9578,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
*/
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
- folios = kvcalloc(nr_folios, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
+ folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT);
if (!folios)
return -ENOMEM;
for (i = 0; i < nr_folios; i++) {
@@ -9559,7 +9642,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
if (encoded->unencoded_len == encoded->len &&
encoded->unencoded_offset == 0 &&
can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
- ret = __cow_file_range_inline(inode, start, encoded->len,
+ ret = __cow_file_range_inline(inode, encoded->len,
orig_count, compression, folios[0],
true);
if (ret <= 0) {
@@ -9779,15 +9862,25 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
- struct extent_map *em = NULL;
struct btrfs_chunk_map *map = NULL;
struct btrfs_device *device = NULL;
struct btrfs_swap_info bsi = {
.lowest_ppage = (sector_t)-1ULL,
};
+ struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
+ struct btrfs_path *path = NULL;
int ret = 0;
u64 isize;
- u64 start;
+ u64 prev_extent_end = 0;
+
+ /*
+ * Acquire the inode's mmap lock to prevent races with memory mapped
+ * writes, as they could happen after we flush delalloc below and before
+ * we lock the extent range further below. The inode was already locked
+ * up in the call chain.
+ */
+ btrfs_assert_inode_locked(BTRFS_I(inode));
+ down_write(&BTRFS_I(inode)->i_mmap_lock);
/*
* If the swap file was just created, make sure delalloc is done. If the
@@ -9796,22 +9889,32 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
*/
ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
- return ret;
+ goto out_unlock_mmap;
/*
* The inode is locked, so these flags won't change after we check them.
*/
if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
btrfs_warn(fs_info, "swapfile must not be compressed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
btrfs_warn(fs_info, "swapfile must not be copy-on-write");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
btrfs_warn(fs_info, "swapfile must not be checksummed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
+ }
+
+ path = btrfs_alloc_path();
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ if (!path || !backref_ctx) {
+ ret = -ENOMEM;
+ goto out_unlock_mmap;
}
/*
@@ -9826,7 +9929,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
btrfs_warn(fs_info,
"cannot activate swapfile while exclusive operation is running");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_mmap;
}
/*
@@ -9840,7 +9944,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because snapshot creation is in progress");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
/*
* Snapshots can create extents which require COW even if NODATACOW is
@@ -9856,11 +9961,13 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (btrfs_root_dead(root)) {
spin_unlock(&root->root_item_lock);
+ btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because subvolume %llu is being deleted",
btrfs_root_id(root));
- return -EPERM;
+ ret = -EPERM;
+ goto out_unlock_mmap;
}
atomic_inc(&root->nr_swapfiles);
spin_unlock(&root->root_item_lock);
@@ -9868,24 +9975,39 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
lock_extent(io_tree, 0, isize - 1, &cached_state);
- start = 0;
- while (start < isize) {
- u64 logical_block_start, physical_block_start;
+ while (prev_extent_end < isize) {
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *ei;
struct btrfs_block_group *bg;
- u64 len = isize - start;
+ u64 logical_block_start;
+ u64 physical_block_start;
+ u64 extent_gen;
+ u64 disk_bytenr;
+ u64 len;
- em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = prev_extent_end;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
goto out;
- }
- if (em->disk_bytenr == EXTENT_MAP_HOLE) {
+ /*
+ * If key not found it means we have an implicit hole (NO_HOLES
+ * is enabled).
+ */
+ if (ret > 0) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
- if (em->disk_bytenr == EXTENT_MAP_INLINE) {
+
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
/*
* It's unlikely we'll ever actually find ourselves
* here, as a file small enough to fit inline won't be
@@ -9897,23 +10019,45 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
ret = -EINVAL;
goto out;
}
- if (extent_map_is_compressed(em)) {
+
+ if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
btrfs_warn(fs_info, "swapfile must not be compressed");
ret = -EINVAL;
goto out;
}
- logical_block_start = extent_map_block_start(em) + (start - em->start);
- len = min(len, em->len - (start - em->start));
- free_extent_map(em);
- em = NULL;
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ if (disk_bytenr == 0) {
+ btrfs_warn(fs_info, "swapfile must not have holes");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
+ extent_gen = btrfs_file_extent_generation(leaf, ei);
+ prev_extent_end = btrfs_file_extent_end(path);
+
+ if (prev_extent_end > isize)
+ len = isize - key.offset;
+ else
+ len = btrfs_file_extent_num_bytes(leaf, ei);
- ret = can_nocow_extent(inode, start, &len, NULL, false, true);
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /*
+ * Don't need the path anymore, release to avoid deadlocks when
+ * calling btrfs_is_data_extent_shared() because when joining a
+ * transaction it can block waiting for the current one's commit
+ * which in turn may be trying to lock the same leaf to flush
+ * delayed items for example.
+ */
+ btrfs_release_path(path);
+
+ ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
+ extent_gen, backref_ctx);
if (ret < 0) {
goto out;
- } else if (ret) {
- ret = 0;
- } else {
+ } else if (ret > 0) {
btrfs_warn(fs_info,
"swapfile must not be copy-on-write");
ret = -EINVAL;
@@ -9948,7 +10092,6 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
physical_block_start = (map->stripes[0].physical +
(logical_block_start - map->start));
- len = min(len, map->chunk_len - (logical_block_start - map->start));
btrfs_free_chunk_map(map);
map = NULL;
@@ -9989,20 +10132,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (ret)
goto out;
}
- bsi.start = start;
+ bsi.start = key.offset;
bsi.block_start = physical_block_start;
bsi.block_len = len;
}
- start += len;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ cond_resched();
}
if (bsi.block_len)
ret = btrfs_add_swap_extent(sis, &bsi);
out:
- if (!IS_ERR_OR_NULL(em))
- free_extent_map(em);
if (!IS_ERR_OR_NULL(map))
btrfs_free_chunk_map(map);
@@ -10015,6 +10161,10 @@ out:
btrfs_exclop_finish(fs_info);
+out_unlock_mmap:
+ up_write(&BTRFS_I(inode)->i_mmap_lock);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ btrfs_free_path(path);
if (ret)
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 226c91fe31a7..ae98269a5e3a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -29,6 +29,7 @@
#include <linux/fileattr.h>
#include <linux/fsverity.h>
#include <linux/sched/xacct.h>
+#include <linux/io_uring/cmd.h>
#include "ctree.h"
#include "disk-io.h"
#include "export.h"
@@ -402,86 +403,6 @@ update_flags:
return ret;
}
-/*
- * Start exclusive operation @type, return true on success
- */
-bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type)
-{
- bool ret = false;
-
- spin_lock(&fs_info->super_lock);
- if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
- fs_info->exclusive_operation = type;
- ret = true;
- }
- spin_unlock(&fs_info->super_lock);
-
- return ret;
-}
-
-/*
- * Conditionally allow to enter the exclusive operation in case it's compatible
- * with the running one. This must be paired with btrfs_exclop_start_unlock and
- * btrfs_exclop_finish.
- *
- * Compatibility:
- * - the same type is already running
- * - when trying to add a device and balance has been paused
- * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
- * must check the condition first that would allow none -> @type
- */
-bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation type)
-{
- spin_lock(&fs_info->super_lock);
- if (fs_info->exclusive_operation == type ||
- (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED &&
- type == BTRFS_EXCLOP_DEV_ADD))
- return true;
-
- spin_unlock(&fs_info->super_lock);
- return false;
-}
-
-void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
-{
- spin_unlock(&fs_info->super_lock);
-}
-
-void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
-{
- spin_lock(&fs_info->super_lock);
- WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
- spin_unlock(&fs_info->super_lock);
- sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
-}
-
-void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
- enum btrfs_exclusive_operation op)
-{
- switch (op) {
- case BTRFS_EXCLOP_BALANCE_PAUSED:
- spin_lock(&fs_info->super_lock);
- ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
- fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
- fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
- fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
- fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
- spin_unlock(&fs_info->super_lock);
- break;
- case BTRFS_EXCLOP_BALANCE:
- spin_lock(&fs_info->super_lock);
- ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
- fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
- spin_unlock(&fs_info->super_lock);
- break;
- default:
- btrfs_warn(fs_info,
- "invalid exclop balance operation %d requested", op);
- }
-}
-
static int btrfs_ioctl_getversion(struct inode *inode, int __user *arg)
{
return put_user(inode->i_generation, arg);
@@ -550,17 +471,6 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
return ret;
}
-int __pure btrfs_is_empty_uuid(const u8 *uuid)
-{
- int i;
-
- for (i = 0; i < BTRFS_UUID_SIZE; i++) {
- if (uuid[i])
- return 0;
- }
- return 1;
-}
-
/*
* Calculate the number of transaction items to reserve for creating a subvolume
* or snapshot, not including the inode, directory entries, or parent directory.
@@ -1048,7 +958,6 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
struct btrfs_qgroup_inherit *inherit)
{
int ret;
- bool snapshot_force_cow = false;
/*
* Force new buffered writes to reserve space even when NOCOW is
@@ -1067,15 +976,13 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
* creation.
*/
atomic_inc(&root->snapshot_force_cow);
- snapshot_force_cow = true;
btrfs_wait_ordered_extents(root, U64_MAX, NULL);
ret = btrfs_mksubvol(parent, idmap, name, namelen,
root, readonly, inherit);
+ atomic_dec(&root->snapshot_force_cow);
out:
- if (snapshot_force_cow)
- atomic_dec(&root->snapshot_force_cow);
btrfs_drew_read_unlock(&root->snapshot_lock);
return ret;
}
@@ -1308,9 +1215,9 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
ret = btrfs_mksubvol(&file->f_path, idmap, name,
namelen, NULL, readonly, inherit);
} else {
- struct fd src = fdget(fd);
+ CLASS(fd, src)(fd);
struct inode *src_inode;
- if (!fd_file(src)) {
+ if (fd_empty(src)) {
ret = -EINVAL;
goto out_drop_write;
}
@@ -1341,7 +1248,6 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
BTRFS_I(src_inode)->root,
readonly, inherit);
}
- fdput(src);
}
out_drop_write:
mnt_drop_write_file(file);
@@ -3010,7 +2916,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
@@ -4058,8 +3963,7 @@ static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
return 0;
}
-static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
- void __user *arg)
+static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -4514,12 +4418,17 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args,
flags);
size_t copy_end;
+ struct btrfs_inode *inode = BTRFS_I(file_inode(file));
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
loff_t pos;
struct kiocb kiocb;
ssize_t ret;
+ u64 disk_bytenr, disk_io_size;
+ struct extent_state *cached_state = NULL;
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
@@ -4572,7 +4481,32 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = pos;
- ret = btrfs_encoded_read(&kiocb, &iter, &args);
+ ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
+ &disk_bytenr, &disk_io_size);
+
+ if (ret == -EIOCBQUEUED) {
+ bool unlocked = false;
+ u64 start, lockend, count;
+
+ start = ALIGN_DOWN(kiocb.ki_pos, fs_info->sectorsize);
+ lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
+
+ if (args.compression)
+ count = disk_io_size;
+ else
+ count = args.len;
+
+ ret = btrfs_encoded_read_regular(&kiocb, &iter, start, lockend,
+ &cached_state, disk_bytenr,
+ disk_io_size, count,
+ args.compression, &unlocked);
+
+ if (!unlocked) {
+ unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ }
+ }
+
if (ret >= 0) {
fsnotify_access(file);
if (copy_to_user(argp + copy_end,
@@ -4690,6 +4624,585 @@ out_acct:
return ret;
}
+/*
+ * Context that's attached to an encoded read io_uring command, in cmd->pdu. It
+ * contains the fields in btrfs_uring_read_extent that are necessary to finish
+ * off and cleanup the I/O in btrfs_uring_read_finished.
+ */
+struct btrfs_uring_priv {
+ struct io_uring_cmd *cmd;
+ struct page **pages;
+ unsigned long nr_pages;
+ struct kiocb iocb;
+ struct iovec *iov;
+ struct iov_iter iter;
+ struct extent_state *cached_state;
+ u64 count;
+ u64 start;
+ u64 lockend;
+ int err;
+ bool compressed;
+};
+
+struct io_btrfs_cmd {
+ struct btrfs_uring_priv *priv;
+};
+
+static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd);
+ struct btrfs_uring_priv *priv = bc->priv;
+ struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp));
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ unsigned long index;
+ u64 cur;
+ size_t page_offset;
+ ssize_t ret;
+
+ /* The inode lock has already been acquired in btrfs_uring_read_extent. */
+ btrfs_lockdep_inode_acquire(inode, i_rwsem);
+
+ if (priv->err) {
+ ret = priv->err;
+ goto out;
+ }
+
+ if (priv->compressed) {
+ index = 0;
+ page_offset = 0;
+ } else {
+ index = (priv->iocb.ki_pos - priv->start) >> PAGE_SHIFT;
+ page_offset = offset_in_page(priv->iocb.ki_pos - priv->start);
+ }
+ cur = 0;
+ while (cur < priv->count) {
+ size_t bytes = min_t(size_t, priv->count - cur, PAGE_SIZE - page_offset);
+
+ if (copy_page_to_iter(priv->pages[index], page_offset, bytes,
+ &priv->iter) != bytes) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ index++;
+ cur += bytes;
+ page_offset = 0;
+ }
+ ret = priv->count;
+
+out:
+ unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ add_rchar(current, ret);
+
+ for (index = 0; index < priv->nr_pages; index++)
+ __free_page(priv->pages[index]);
+
+ kfree(priv->pages);
+ kfree(priv->iov);
+ kfree(priv);
+}
+
+void btrfs_uring_read_extent_endio(void *ctx, int err)
+{
+ struct btrfs_uring_priv *priv = ctx;
+ struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(priv->cmd, struct io_btrfs_cmd);
+
+ priv->err = err;
+ bc->priv = priv;
+
+ io_uring_cmd_complete_in_task(priv->cmd, btrfs_uring_read_finished);
+}
+
+static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
+ u64 start, u64 lockend,
+ struct extent_state *cached_state,
+ u64 disk_bytenr, u64 disk_io_size,
+ size_t count, bool compressed,
+ struct iovec *iov, struct io_uring_cmd *cmd)
+{
+ struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ struct extent_io_tree *io_tree = &inode->io_tree;
+ struct page **pages;
+ struct btrfs_uring_priv *priv = NULL;
+ unsigned long nr_pages;
+ int ret;
+
+ nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ if (!pages)
+ return -ENOMEM;
+ ret = btrfs_alloc_page_array(nr_pages, pages, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ priv = kmalloc(sizeof(*priv), GFP_NOFS);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ priv->iocb = *iocb;
+ priv->iov = iov;
+ priv->iter = *iter;
+ priv->count = count;
+ priv->cmd = cmd;
+ priv->cached_state = cached_state;
+ priv->compressed = compressed;
+ priv->nr_pages = nr_pages;
+ priv->pages = pages;
+ priv->start = start;
+ priv->lockend = lockend;
+ priv->err = 0;
+
+ ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
+ disk_io_size, pages, priv);
+ if (ret && ret != -EIOCBQUEUED)
+ goto out_fail;
+
+ /*
+ * If we return -EIOCBQUEUED, we're deferring the cleanup to
+ * btrfs_uring_read_finished(), which will handle unlocking the extent
+ * and inode and freeing the allocations.
+ */
+
+ /*
+ * We're returning to userspace with the inode lock held, and that's
+ * okay - it'll get unlocked in a worker thread. Call
+ * btrfs_lockdep_inode_release() to avoid confusing lockdep.
+ */
+ btrfs_lockdep_inode_release(inode, i_rwsem);
+
+ return -EIOCBQUEUED;
+
+out_fail:
+ unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ kfree(priv);
+ return ret;
+}
+
+struct btrfs_uring_encoded_data {
+ struct btrfs_ioctl_encoded_io_args args;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov;
+ struct iov_iter iter;
+};
+
+static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
+ size_t copy_end;
+ int ret;
+ u64 disk_bytenr, disk_io_size;
+ struct file *file;
+ struct btrfs_inode *inode;
+ struct btrfs_fs_info *fs_info;
+ struct extent_io_tree *io_tree;
+ loff_t pos;
+ struct kiocb kiocb;
+ struct extent_state *cached_state = NULL;
+ u64 start, lockend;
+ void __user *sqe_addr;
+ struct btrfs_uring_encoded_data *data = io_uring_cmd_get_async_data(cmd)->op_data;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_acct;
+ }
+ file = cmd->file;
+ inode = BTRFS_I(file->f_inode);
+ fs_info = inode->root->fs_info;
+ io_tree = &inode->io_tree;
+ sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
+
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags);
+#else
+ return -ENOTTY;
+#endif
+ } else {
+ copy_end = copy_end_kernel;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_acct;
+ }
+
+ io_uring_cmd_get_async_data(cmd)->op_data = data;
+
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ if (copy_from_user(&args32, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ if (data->args.flags != 0) {
+ ret = -EINVAL;
+ goto out_acct;
+ }
+
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_DEST, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_free;
+ }
+ }
+
+ pos = data->args.offset;
+ ret = rw_verify_area(READ, file, &pos, data->args.len);
+ if (ret < 0)
+ goto out_free;
+
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = pos;
+
+ if (issue_flags & IO_URING_F_NONBLOCK)
+ kiocb.ki_flags |= IOCB_NOWAIT;
+
+ start = ALIGN_DOWN(pos, fs_info->sectorsize);
+ lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
+
+ ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state,
+ &disk_bytenr, &disk_io_size);
+ if (ret < 0 && ret != -EIOCBQUEUED)
+ goto out_free;
+
+ file_accessed(file);
+
+ if (copy_to_user(sqe_addr + copy_end,
+ (const char *)&data->args + copy_end_kernel,
+ sizeof(data->args) - copy_end_kernel)) {
+ if (ret == -EIOCBQUEUED) {
+ unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ }
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ if (ret == -EIOCBQUEUED) {
+ u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size);
+
+ /* Match ioctl by not returning past EOF if uncompressed. */
+ if (!data->args.compression)
+ count = min_t(u64, count, data->args.len);
+
+ ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend,
+ cached_state, disk_bytenr, disk_io_size,
+ count, data->args.compression,
+ data->iov, cmd);
+
+ goto out_acct;
+ }
+
+out_free:
+ kfree(data->iov);
+
+out_acct:
+ if (ret > 0)
+ add_rchar(current, ret);
+ inc_syscr(current);
+
+ return ret;
+}
+
+static int btrfs_uring_encoded_write(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ loff_t pos;
+ struct kiocb kiocb;
+ struct file *file;
+ ssize_t ret;
+ void __user *sqe_addr;
+ struct btrfs_uring_encoded_data *data = io_uring_cmd_get_async_data(cmd)->op_data;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out_acct;
+ }
+
+ file = cmd->file;
+ sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
+
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out_acct;
+ }
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_acct;
+ }
+
+ io_uring_cmd_get_async_data(cmd)->op_data = data;
+
+ if (issue_flags & IO_URING_F_COMPAT) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ if (copy_from_user(&args32, sqe_addr, sizeof(args32))) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ data->args.iov = compat_ptr(args32.iov);
+ data->args.iovcnt = args32.iovcnt;
+ data->args.offset = args32.offset;
+ data->args.flags = args32.flags;
+ data->args.len = args32.len;
+ data->args.unencoded_len = args32.unencoded_len;
+ data->args.unencoded_offset = args32.unencoded_offset;
+ data->args.compression = args32.compression;
+ data->args.encryption = args32.encryption;
+ memcpy(data->args.reserved, args32.reserved,
+ sizeof(data->args.reserved));
+#else
+ ret = -ENOTTY;
+ goto out_acct;
+#endif
+ } else {
+ if (copy_from_user(&data->args, sqe_addr, sizeof(data->args))) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+ }
+
+ ret = -EINVAL;
+ if (data->args.flags != 0)
+ goto out_acct;
+ if (memchr_inv(data->args.reserved, 0, sizeof(data->args.reserved)))
+ goto out_acct;
+ if (data->args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE &&
+ data->args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE)
+ goto out_acct;
+ if (data->args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES ||
+ data->args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES)
+ goto out_acct;
+ if (data->args.unencoded_offset > data->args.unencoded_len)
+ goto out_acct;
+ if (data->args.len > data->args.unencoded_len - data->args.unencoded_offset)
+ goto out_acct;
+
+ data->iov = data->iovstack;
+ ret = import_iovec(ITER_SOURCE, data->args.iov, data->args.iovcnt,
+ ARRAY_SIZE(data->iovstack), &data->iov,
+ &data->iter);
+ if (ret < 0)
+ goto out_acct;
+
+ if (iov_iter_count(&data->iter) == 0) {
+ ret = 0;
+ goto out_iov;
+ }
+ }
+
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ ret = -EAGAIN;
+ goto out_acct;
+ }
+
+ pos = data->args.offset;
+ ret = rw_verify_area(WRITE, file, &pos, data->args.len);
+ if (ret < 0)
+ goto out_iov;
+
+ init_sync_kiocb(&kiocb, file);
+ ret = kiocb_set_rw_flags(&kiocb, 0, WRITE);
+ if (ret)
+ goto out_iov;
+ kiocb.ki_pos = pos;
+
+ file_start_write(file);
+
+ ret = btrfs_do_write_iter(&kiocb, &data->iter, &data->args);
+ if (ret > 0)
+ fsnotify_modify(file);
+
+ file_end_write(file);
+out_iov:
+ kfree(data->iov);
+out_acct:
+ if (ret > 0)
+ add_wchar(current, ret);
+ inc_syscw(current);
+ return ret;
+}
+
+int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ switch (cmd->cmd_op) {
+ case BTRFS_IOC_ENCODED_READ:
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ case BTRFS_IOC_ENCODED_READ_32:
+#endif
+ return btrfs_uring_encoded_read(cmd, issue_flags);
+
+ case BTRFS_IOC_ENCODED_WRITE:
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ case BTRFS_IOC_ENCODED_WRITE_32:
+#endif
+ return btrfs_uring_encoded_write(cmd, issue_flags);
+ }
+
+ return -EINVAL;
+}
+
+static int btrfs_ioctl_subvol_sync(struct btrfs_fs_info *fs_info, void __user *argp)
+{
+ struct btrfs_root *root;
+ struct btrfs_ioctl_subvol_wait args = { 0 };
+ signed long sched_ret;
+ int refs;
+ u64 root_flags;
+ bool wait_for_deletion = false;
+ bool found = false;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ switch (args.mode) {
+ case BTRFS_SUBVOL_SYNC_WAIT_FOR_QUEUED:
+ /*
+ * Wait for the first one deleted that waits until all previous
+ * are cleaned.
+ */
+ spin_lock(&fs_info->trans_lock);
+ if (!list_empty(&fs_info->dead_roots)) {
+ root = list_last_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
+ args.subvolid = btrfs_root_id(root);
+ found = true;
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (!found)
+ return -ENOENT;
+
+ fallthrough;
+ case BTRFS_SUBVOL_SYNC_WAIT_FOR_ONE:
+ if ((0 < args.subvolid && args.subvolid < BTRFS_FIRST_FREE_OBJECTID) ||
+ BTRFS_LAST_FREE_OBJECTID < args.subvolid)
+ return -EINVAL;
+ break;
+ case BTRFS_SUBVOL_SYNC_COUNT:
+ spin_lock(&fs_info->trans_lock);
+ args.count = list_count_nodes(&fs_info->dead_roots);
+ spin_unlock(&fs_info->trans_lock);
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ case BTRFS_SUBVOL_SYNC_PEEK_FIRST:
+ spin_lock(&fs_info->trans_lock);
+ /* Last in the list was deleted first. */
+ if (!list_empty(&fs_info->dead_roots)) {
+ root = list_last_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
+ args.subvolid = btrfs_root_id(root);
+ } else {
+ args.subvolid = 0;
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ case BTRFS_SUBVOL_SYNC_PEEK_LAST:
+ spin_lock(&fs_info->trans_lock);
+ /* First in the list was deleted last. */
+ if (!list_empty(&fs_info->dead_roots)) {
+ root = list_first_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
+ args.subvolid = btrfs_root_id(root);
+ } else {
+ args.subvolid = 0;
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* 32bit limitation: fs_roots_radix key is not wide enough. */
+ if (sizeof(unsigned long) != sizeof(u64) && args.subvolid > U32_MAX)
+ return -EOVERFLOW;
+
+ while (1) {
+ /* Wait for the specific one. */
+ if (down_read_interruptible(&fs_info->subvol_sem) == -EINTR)
+ return -EINTR;
+ refs = -1;
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ root = radix_tree_lookup(&fs_info->fs_roots_radix,
+ (unsigned long)args.subvolid);
+ if (root) {
+ spin_lock(&root->root_item_lock);
+ refs = btrfs_root_refs(&root->root_item);
+ root_flags = btrfs_root_flags(&root->root_item);
+ spin_unlock(&root->root_item_lock);
+ }
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+ up_read(&fs_info->subvol_sem);
+
+ /* Subvolume does not exist. */
+ if (!root)
+ return -ENOENT;
+
+ /* Subvolume not deleted at all. */
+ if (refs > 0)
+ return -EEXIST;
+ /* We've waited and now the subvolume is gone. */
+ if (wait_for_deletion && refs == -1) {
+ /* Return the one we waited for as the last one. */
+ if (copy_to_user(argp, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* Subvolume not found on the first try (deleted or never existed). */
+ if (refs == -1)
+ return -ENOENT;
+
+ wait_for_deletion = true;
+ ASSERT(root_flags & BTRFS_ROOT_SUBVOL_DEAD);
+ sched_ret = schedule_timeout_interruptible(HZ);
+ /* Early wake up or error. */
+ if (sched_ret != 0)
+ return -EINTR;
+ }
+
+ return 0;
+}
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
@@ -4812,7 +5325,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_QUOTA_RESCAN_STATUS:
return btrfs_ioctl_quota_rescan_status(fs_info, argp);
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
- return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
+ return btrfs_ioctl_quota_rescan_wait(fs_info);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(fs_info, argp);
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
@@ -4831,6 +5344,8 @@ long btrfs_ioctl(struct file *file, unsigned int
return fsverity_ioctl_enable(file, (const void __user *)argp);
case FS_IOC_MEASURE_VERITY:
return fsverity_ioctl_measure(file, argp);
+ case FS_IOC_READ_VERITY_METADATA:
+ return fsverity_ioctl_read_metadata(file, argp);
case BTRFS_IOC_ENCODED_READ:
return btrfs_ioctl_encoded_read(file, argp, false);
case BTRFS_IOC_ENCODED_WRITE:
@@ -4841,6 +5356,8 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_ENCODED_WRITE_32:
return btrfs_ioctl_encoded_write(file, argp, true);
#endif
+ case BTRFS_IOC_SUBVOL_SYNC_WAIT:
+ return btrfs_ioctl_subvol_sync(fs_info, argp);
}
return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 19cd26b0244a..ce915fcda43b 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -19,8 +19,9 @@ int btrfs_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
int btrfs_ioctl_get_supported_features(void __user *arg);
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
-int __pure btrfs_is_empty_uuid(const u8 *uuid);
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_balance_args *bargs);
+int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void btrfs_uring_read_extent_endio(void *ctx, int err);
#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 6a0b7abb5bd9..9a7a7b723305 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -162,21 +162,6 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
}
/*
- * Try-lock for write.
- *
- * Return 1 if the rwlock has been taken, 0 otherwise
- */
-int btrfs_try_tree_write_lock(struct extent_buffer *eb)
-{
- if (down_write_trylock(&eb->lock)) {
- btrfs_set_eb_lock_owner(eb, current->pid);
- trace_btrfs_try_tree_write_lock(eb);
- return 1;
- }
- return 0;
-}
-
-/*
* Release read lock.
*/
void btrfs_tree_read_unlock(struct extent_buffer *eb)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 3c15c75e0582..c69e57ff804b 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -129,6 +129,16 @@ enum btrfs_lockdep_trans_states {
rwsem_release(&owner->lock##_map, _THIS_IP_)
/*
+ * Used to account for the fact that when doing io_uring encoded I/O, we can
+ * return to userspace with the inode lock still held.
+ */
+#define btrfs_lockdep_inode_acquire(owner, lock) \
+ rwsem_acquire_read(&owner->vfs_inode.lock.dep_map, 0, 0, _THIS_IP_)
+
+#define btrfs_lockdep_inode_release(owner, lock) \
+ rwsem_release(&owner->vfs_inode.lock.dep_map, _THIS_IP_)
+
+/*
* Macros for the transaction states wait events, similar to the generic wait
* event macros.
*/
@@ -180,7 +190,6 @@ static inline void btrfs_tree_read_lock(struct extent_buffer *eb)
void btrfs_tree_read_unlock(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
-int btrfs_try_tree_write_lock(struct extent_buffer *eb);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
@@ -190,8 +199,13 @@ static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
{
lockdep_assert_held_write(&eb->lock);
}
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+{
+ lockdep_assert_held_read(&eb->lock);
+}
#else
static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
#endif
void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 72856f6775f7..a45bc11f8665 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -80,7 +80,7 @@ void lzo_free_workspace(struct list_head *ws)
kfree(workspace);
}
-struct list_head *lzo_alloc_workspace(unsigned int level)
+struct list_head *lzo_alloc_workspace(void)
{
struct workspace *workspace;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 2104d60c2161..30eceaf829a7 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -111,8 +111,8 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
return NULL;
}
-static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
- u64 len)
+static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
+ u64 len)
{
if (file_offset + len <= entry->file_offset ||
entry->file_offset + entry->num_bytes <= file_offset)
@@ -346,10 +346,10 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
/*
- * Ordered (Private2) bit indicates whether we still have
+ * Ordered flag indicates whether we still have
* pending io unfinished for the ordered extent.
*
- * If there's no such bit, we need to skip to next range.
+ * If it's not set, we need to skip to next range.
*/
if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
return false;
@@ -985,7 +985,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
while (1) {
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- if (range_overlaps(entry, file_offset, len))
+ if (btrfs_range_overlaps(entry, file_offset, len))
break;
if (entry->file_offset >= file_offset + len) {
@@ -1114,12 +1114,12 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
}
if (prev) {
entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
- if (range_overlaps(entry, file_offset, len))
+ if (btrfs_range_overlaps(entry, file_offset, len))
goto out;
}
if (next) {
entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
- if (range_overlaps(entry, file_offset, len))
+ if (btrfs_range_overlaps(entry, file_offset, len))
goto out;
}
/* No ordered extent in the range */
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index c297909f1506..b90fabe302e6 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -226,8 +226,7 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
return qgroup;
}
-static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
- struct btrfs_qgroup *qgroup)
+static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
{
struct btrfs_qgroup_list *list;
@@ -258,7 +257,7 @@ static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
return -ENOENT;
rb_erase(&qgroup->node, &fs_info->qgroup_tree);
- __del_qgroup_rb(fs_info, qgroup);
+ __del_qgroup_rb(qgroup);
return 0;
}
@@ -469,7 +468,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
/*
* If a qgroup exists for a subvolume ID, it is possible
* that subvolume has been deleted, in which case
- * re-using that ID would lead to incorrect accounting.
+ * reusing that ID would lead to incorrect accounting.
*
* Ensure that we skip any such subvol ids.
*
@@ -643,7 +642,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
while ((n = rb_first(&fs_info->qgroup_tree))) {
qgroup = rb_entry(n, struct btrfs_qgroup, node);
rb_erase(n, &fs_info->qgroup_tree);
- __del_qgroup_rb(fs_info, qgroup);
+ __del_qgroup_rb(qgroup);
btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
kfree(qgroup);
}
@@ -674,9 +673,6 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
key.offset = dst;
ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
-
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
btrfs_free_path(path);
return ret;
}
@@ -753,8 +749,6 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-
btrfs_release_path(path);
key.type = BTRFS_QGROUP_LIMIT_KEY;
@@ -772,8 +766,6 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-
ret = 0;
out:
btrfs_free_path(path);
@@ -860,9 +852,6 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
-
- btrfs_mark_buffer_dirty(trans, l);
-
out:
btrfs_free_path(path);
return ret;
@@ -906,9 +895,6 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
-
- btrfs_mark_buffer_dirty(trans, l);
-
out:
btrfs_free_path(path);
return ret;
@@ -948,9 +934,6 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
btrfs_set_qgroup_status_rescan(l, ptr,
fs_info->qgroup_rescan_progress.objectid);
-
- btrfs_mark_buffer_dirty(trans, l);
-
out:
btrfs_free_path(path);
return ret;
@@ -1122,6 +1105,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
if (simple) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+ btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
} else {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -1130,8 +1114,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
BTRFS_QGROUP_STATUS_FLAGS_MASK);
btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
- btrfs_mark_buffer_dirty(trans, leaf);
-
key.objectid = 0;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = 0;
@@ -1255,8 +1237,6 @@ out_add_root:
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- if (simple)
- btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
spin_unlock(&fs_info->qgroup_lock);
/* Skip rescan for simple qgroups. */
@@ -1407,7 +1387,7 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
fs_info->quota_root = NULL;
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
- fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
+ fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
spin_unlock(&fs_info->qgroup_lock);
btrfs_free_qgroup_config(fs_info);
@@ -1840,9 +1820,19 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
* Thus its reserved space should all be zero, no matter if qgroup
* is consistent or the mode.
*/
- WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
- qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
+ if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ btrfs_warn_rl(fs_info,
+"to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
+ btrfs_qgroup_level(qgroup->qgroupid),
+ btrfs_qgroup_subvolid(qgroup->qgroupid),
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
+
+ }
/*
* The same for rfer/excl numbers, but that's only if our qgroup is
* consistent and if it's in regular qgroup mode.
@@ -1851,8 +1841,9 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
*/
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
- if (WARN_ON(qgroup->rfer || qgroup->excl ||
- qgroup->rfer_cmpr || qgroup->excl_cmpr)) {
+ if (qgroup->rfer || qgroup->excl ||
+ qgroup->rfer_cmpr || qgroup->excl_cmpr) {
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
btrfs_warn_rl(fs_info,
"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
btrfs_qgroup_level(qgroup->qgroupid),
@@ -2001,20 +1992,30 @@ out:
* Return <0 for insertion failure, caller can free @record safely.
*/
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record)
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record,
+ u64 bytenr)
{
struct btrfs_qgroup_extent_record *existing, *ret;
- unsigned long bytenr = record->bytenr;
+ const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
if (!btrfs_qgroup_full_accounting(fs_info))
return 1;
- lockdep_assert_held(&delayed_refs->lock);
- trace_btrfs_qgroup_trace_extent(fs_info, record);
+#if BITS_PER_LONG == 32
+ if (bytenr >= MAX_LFS_FILESIZE) {
+ btrfs_err_rl(fs_info,
+"qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
+ bytenr);
+ btrfs_err_32bit_limit(fs_info);
+ return -EOVERFLOW;
+ }
+#endif
+
+ trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
xa_lock(&delayed_refs->dirty_extents);
- existing = xa_load(&delayed_refs->dirty_extents, bytenr);
+ existing = xa_load(&delayed_refs->dirty_extents, index);
if (existing) {
if (record->data_rsv && !existing->data_rsv) {
existing->data_rsv = record->data_rsv;
@@ -2024,7 +2025,7 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
return 1;
}
- ret = __xa_store(&delayed_refs->dirty_extents, record->bytenr, record, GFP_ATOMIC);
+ ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
xa_unlock(&delayed_refs->dirty_extents);
if (xa_is_err(ret)) {
qgroup_mark_inconsistent(fs_info);
@@ -2056,12 +2057,17 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
* transaction committing, but not now as qgroup accounting will be wrong again.
*/
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
- struct btrfs_qgroup_extent_record *qrecord)
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr)
{
- struct btrfs_backref_walk_ctx ctx = { 0 };
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_backref_walk_ctx ctx = {
+ .bytenr = bytenr,
+ .fs_info = fs_info,
+ };
int ret;
- if (!btrfs_qgroup_full_accounting(trans->fs_info))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
/*
* We are always called in a context where we are already holding a
@@ -2084,16 +2090,13 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
*/
ASSERT(trans != NULL);
- if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
return 0;
- ctx.bytenr = qrecord->bytenr;
- ctx.fs_info = trans->fs_info;
-
ret = btrfs_find_all_roots(&ctx, true);
if (ret < 0) {
- qgroup_mark_inconsistent(trans->fs_info);
- btrfs_warn(trans->fs_info,
+ qgroup_mark_inconsistent(fs_info);
+ btrfs_warn(fs_info,
"error accounting new delayed refs extent (err code: %d), quota inconsistent",
ret);
return 0;
@@ -2128,7 +2131,8 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_qgroup_extent_record *record;
- struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
+ const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
int ret;
if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
@@ -2137,26 +2141,21 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
if (!record)
return -ENOMEM;
- if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents, bytenr, GFP_NOFS)) {
+ if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
kfree(record);
return -ENOMEM;
}
- delayed_refs = &trans->transaction->delayed_refs;
- record->bytenr = bytenr;
record->num_bytes = num_bytes;
- record->old_roots = NULL;
- spin_lock(&delayed_refs->lock);
- ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
- spin_unlock(&delayed_refs->lock);
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
if (ret) {
/* Clean up if insertion fails or item exists. */
- xa_release(&delayed_refs->dirty_extents, record->bytenr);
+ xa_release(&delayed_refs->dirty_extents, index);
kfree(record);
return 0;
}
- return btrfs_qgroup_trace_extent_post(trans, record);
+ return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
}
/*
@@ -2641,7 +2640,6 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
if (!extent_buffer_uptodate(root_eb)) {
struct btrfs_tree_parent_check check = {
- .has_first_key = false,
.transid = root_gen,
.level = root_level
};
@@ -3032,14 +3030,16 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
delayed_refs = &trans->transaction->delayed_refs;
qgroup_to_skip = delayed_refs->qgroup_to_skip;
xa_for_each(&delayed_refs->dirty_extents, index, record) {
+ const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
+
num_dirty_extents++;
- trace_btrfs_qgroup_account_extents(fs_info, record);
+ trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
if (!ret && !(fs_info->qgroup_flags &
BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
struct btrfs_backref_walk_ctx ctx = { 0 };
- ctx.bytenr = record->bytenr;
+ ctx.bytenr = bytenr;
ctx.fs_info = fs_info;
/*
@@ -3081,7 +3081,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
ulist_del(record->old_roots, qgroup_to_skip,
0);
}
- ret = btrfs_qgroup_account_extent(trans, record->bytenr,
+ ret = btrfs_qgroup_account_extent(trans, bytenr,
record->num_bytes,
record->old_roots,
new_roots);
@@ -4185,13 +4185,20 @@ static int try_flush_qgroup(struct btrfs_root *root)
return 0;
}
- btrfs_run_delayed_iputs(root->fs_info);
- btrfs_wait_on_delayed_iputs(root->fs_info);
ret = btrfs_start_delalloc_snapshot(root, true);
if (ret < 0)
goto out;
btrfs_wait_ordered_extents(root, U64_MAX, NULL);
+ /*
+ * After waiting for ordered extents run delayed iputs in order to free
+ * space from unlinked files before committing the current transaction,
+ * as ordered extents may have been holding the last reference of an
+ * inode and they add a delayed iput when they complete.
+ */
+ btrfs_run_delayed_iputs(root->fs_info);
+ btrfs_wait_on_delayed_iputs(root->fs_info);
+
ret = btrfs_commit_current_transaction(root);
out:
clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
@@ -4676,8 +4683,7 @@ out:
* BOTH POINTERS ARE BEFORE TREE SWAP
* @last_snapshot: last snapshot generation of the subvolume tree
*/
-int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
- struct btrfs_root *subvol_root,
+int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
@@ -4883,17 +4889,6 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
xa_destroy(&trans->delayed_refs.dirty_extents);
}
-void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
-{
- if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
- return;
-
- if (!is_fstree(root))
- return;
-
- btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA);
-}
-
int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
const struct btrfs_squota_delta *delta)
{
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 98adf4ec7b01..e233cc79af18 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -121,11 +121,18 @@ struct btrfs_inode;
#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1ULL << 63)
#define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1ULL << 62)
+#define BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT (3)
+
/*
* Record a dirty extent, and info qgroup to update quota on it
*/
struct btrfs_qgroup_extent_record {
- u64 bytenr;
+ /*
+ * The bytenr of the extent is given by its index in the dirty_extents
+ * xarray of struct btrfs_delayed_ref_root left shifted by
+ * fs_info->sectorsize_bits.
+ */
+
u64 num_bytes;
/*
@@ -343,9 +350,11 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_trace_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record);
+ struct btrfs_qgroup_extent_record *record,
+ u64 bytenr);
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
- struct btrfs_qgroup_extent_record *qrecord);
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr);
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes);
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
@@ -430,8 +439,7 @@ void btrfs_qgroup_init_swapped_blocks(
struct btrfs_qgroup_swapped_blocks *swapped_blocks);
void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
-int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
- struct btrfs_root *subvol_root,
+int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
@@ -440,7 +448,6 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info);
-void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes);
int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
const struct btrfs_squota_delta *delta);
diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
index 4c859b550f6c..1834011ccc49 100644
--- a/fs/btrfs/raid-stripe-tree.c
+++ b/fs/btrfs/raid-stripe-tree.c
@@ -13,6 +13,56 @@
#include "volumes.h"
#include "print-tree.h"
+static int btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *oldkey,
+ u64 newlen, u64 frontpad)
+{
+ struct btrfs_root *stripe_root = trans->fs_info->stripe_root;
+ struct btrfs_stripe_extent *extent, *newitem;
+ struct extent_buffer *leaf;
+ int slot;
+ size_t item_size;
+ struct btrfs_key newkey = {
+ .objectid = oldkey->objectid + frontpad,
+ .type = BTRFS_RAID_STRIPE_KEY,
+ .offset = newlen,
+ };
+ int ret;
+
+ ASSERT(newlen > 0);
+ ASSERT(oldkey->type == BTRFS_RAID_STRIPE_KEY);
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ item_size = btrfs_item_size(leaf, slot);
+
+ newitem = kzalloc(item_size, GFP_NOFS);
+ if (!newitem)
+ return -ENOMEM;
+
+ extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
+
+ for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) {
+ struct btrfs_raid_stride *stride = &extent->strides[i];
+ u64 phys;
+
+ phys = btrfs_raid_stride_physical(leaf, stride) + frontpad;
+ btrfs_set_stack_raid_stride_physical(&newitem->strides[i], phys);
+ }
+
+ ret = btrfs_del_item(trans, stripe_root, path);
+ if (ret)
+ goto out;
+
+ btrfs_release_path(path);
+ ret = btrfs_insert_item(trans, stripe_root, &newkey, newitem, item_size);
+
+out:
+ kfree(newitem);
+ return ret;
+}
+
int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -26,9 +76,22 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
int slot;
int ret;
- if (!stripe_root)
+ if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE) || !stripe_root)
return 0;
+ if (!btrfs_is_testing(fs_info)) {
+ struct btrfs_chunk_map *map;
+ bool use_rst;
+
+ map = btrfs_find_chunk_map(fs_info, start, length);
+ if (!map)
+ return -EINVAL;
+ use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
+ btrfs_free_chunk_map(map);
+ if (!use_rst)
+ return 0;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -36,23 +99,55 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
while (1) {
key.objectid = start;
key.type = BTRFS_RAID_STRIPE_KEY;
- key.offset = length;
+ key.offset = 0;
ret = btrfs_search_slot(trans, stripe_root, &key, path, -1, 1);
if (ret < 0)
break;
- if (ret > 0) {
- ret = 0;
- if (path->slots[0] == 0)
- break;
+
+ if (path->slots[0] == btrfs_header_nritems(path->nodes[0]))
path->slots[0]--;
- }
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
found_start = key.objectid;
found_end = found_start + key.offset;
+ ret = 0;
+
+ /*
+ * The stripe extent starts before the range we want to delete,
+ * but the range spans more than one stripe extent:
+ *
+ * |--- RAID Stripe Extent ---||--- RAID Stripe Extent ---|
+ * |--- keep ---|--- drop ---|
+ *
+ * This means we have to get the previous item, truncate its
+ * length and then restart the search.
+ */
+ if (found_start > start) {
+ if (slot == 0) {
+ ret = btrfs_previous_item(stripe_root, path, start,
+ BTRFS_RAID_STRIPE_KEY);
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
+ break;
+ }
+ } else {
+ path->slots[0]--;
+ }
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ found_start = key.objectid;
+ found_end = found_start + key.offset;
+ ASSERT(found_start <= start);
+ }
+
+ if (key.type != BTRFS_RAID_STRIPE_KEY)
+ break;
/* That stripe ends before we start, we're done. */
if (found_end <= start)
@@ -61,7 +156,98 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
trace_btrfs_raid_extent_delete(fs_info, start, end,
found_start, found_end);
- ASSERT(found_start >= start && found_end <= end);
+ /*
+ * The stripe extent starts before the range we want to delete
+ * and ends after the range we want to delete, i.e. we're
+ * punching a hole in the stripe extent:
+ *
+ * |--- RAID Stripe Extent ---|
+ * | keep |--- drop ---| keep |
+ *
+ * This means we need to a) truncate the existing item and b)
+ * create a second item for the remaining range.
+ */
+ if (found_start < start && found_end > end) {
+ size_t item_size;
+ u64 diff_start = start - found_start;
+ u64 diff_end = found_end - end;
+ struct btrfs_stripe_extent *extent;
+ struct btrfs_key newkey = {
+ .objectid = end,
+ .type = BTRFS_RAID_STRIPE_KEY,
+ .offset = diff_end,
+ };
+
+ /* The "right" item. */
+ ret = btrfs_duplicate_item(trans, stripe_root, path, &newkey);
+ if (ret)
+ break;
+
+ item_size = btrfs_item_size(leaf, path->slots[0]);
+ extent = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_stripe_extent);
+
+ for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) {
+ struct btrfs_raid_stride *stride = &extent->strides[i];
+ u64 phys;
+
+ phys = btrfs_raid_stride_physical(leaf, stride);
+ phys += diff_start + length;
+ btrfs_set_raid_stride_physical(leaf, stride, phys);
+ }
+
+ /* The "left" item. */
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_partially_delete_raid_extent(trans, path, &key,
+ diff_start, 0);
+ break;
+ }
+
+ /*
+ * The stripe extent starts before the range we want to delete:
+ *
+ * |--- RAID Stripe Extent ---|
+ * |--- keep ---|--- drop ---|
+ *
+ * This means we have to duplicate the tree item, truncate the
+ * length to the new size and then re-insert the item.
+ */
+ if (found_start < start) {
+ u64 diff_start = start - found_start;
+
+ btrfs_partially_delete_raid_extent(trans, path, &key,
+ diff_start, 0);
+
+ start += (key.offset - diff_start);
+ length -= (key.offset - diff_start);
+ if (length == 0)
+ break;
+
+ btrfs_release_path(path);
+ continue;
+ }
+
+ /*
+ * The stripe extent ends after the range we want to delete:
+ *
+ * |--- RAID Stripe Extent ---|
+ * |--- drop ---|--- keep ---|
+ *
+ * This means we have to duplicate the tree item, truncate the
+ * length to the new size and then re-insert the item.
+ */
+ if (found_end > end) {
+ u64 diff_end = found_end - end;
+
+ btrfs_partially_delete_raid_extent(trans, path, &key,
+ key.offset - length,
+ length);
+ ASSERT(key.offset - diff_end == length);
+ break;
+ }
+
+ /* Finally we can delete the whole item, no more special cases. */
ret = btrfs_del_item(trans, stripe_root, path);
if (ret)
break;
@@ -102,14 +288,14 @@ static int update_raid_extent_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, stripe_extent, btrfs_item_ptr_offset(leaf, slot),
item_size);
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
return ret;
}
-static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
- struct btrfs_io_context *bioc)
+EXPORT_FOR_TESTS
+int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_io_context *bioc)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_key stripe_key;
@@ -131,12 +317,8 @@ static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
for (int i = 0; i < num_stripes; i++) {
u64 devid = bioc->stripes[i].dev->devid;
u64 physical = bioc->stripes[i].physical;
- u64 length = bioc->stripes[i].length;
struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i];
- if (length == 0)
- length = bioc->size;
-
btrfs_set_stack_raid_stride_devid(raid_stride, devid);
btrfs_set_stack_raid_stride_physical(raid_stride, physical);
}
@@ -233,7 +415,7 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
found_end = found_logical + found_length;
if (found_logical > end) {
- ret = -ENOENT;
+ ret = -ENODATA;
goto out;
}
@@ -279,10 +461,10 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
}
/* If we're here, we haven't found the requested devid in the stripe. */
- ret = -ENOENT;
+ ret = -ENODATA;
out:
if (ret > 0)
- ret = -ENOENT;
+ ret = -ENODATA;
if (ret && ret != -EIO && !stripe->rst_search_commit_root) {
btrfs_debug(fs_info,
"cannot find raid-stripe for logical [%llu, %llu] devid %llu, profile %s",
diff --git a/fs/btrfs/raid-stripe-tree.h b/fs/btrfs/raid-stripe-tree.h
index 1ac1c21aac2f..541836421778 100644
--- a/fs/btrfs/raid-stripe-tree.h
+++ b/fs/btrfs/raid-stripe-tree.h
@@ -28,6 +28,11 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_extent *ordered_extent);
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_io_context *bioc);
+#endif
+
static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info,
u64 map_type)
{
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 39bec672df0c..cdd373c27784 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1272,8 +1272,7 @@ static inline void bio_list_put(struct bio_list *bio_list)
static void assert_rbio(struct btrfs_raid_bio *rbio)
{
- if (!IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
- !IS_ENABLED(CONFIG_BTRFS_ASSERT))
+ if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
return;
/*
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 9522a8b79d22..2928abf7eb82 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -857,6 +857,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
"dropping a ref for a root that doesn't have a ref on the block");
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
+ rb_erase(&ref->node, &be->refs);
kfree(ref);
kfree(ra);
goto out_unlock;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index f3834f8d26b4..af0969b70b53 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -342,12 +342,6 @@ static bool handle_useless_nodes(struct reloc_control *rc,
if (cur == node)
ret = true;
- /* The node is the lowest node */
- if (cur->lowest) {
- list_del_init(&cur->lower);
- cur->lowest = 0;
- }
-
/* Cleanup the lower edges */
while (!list_empty(&cur->lower)) {
struct btrfs_backref_edge *edge;
@@ -373,7 +367,6 @@ static bool handle_useless_nodes(struct reloc_control *rc,
* cache to avoid unnecessary backref lookup.
*/
if (cur->level > 0) {
- list_add(&cur->list, &cache->detached);
cur->detached = 1;
} else {
rb_erase(&cur->rb_node, &cache->rb_root);
@@ -426,7 +419,6 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
goto out;
}
- node->lowest = 1;
cur = node;
/* Breadth-first search to build backref cache */
@@ -470,92 +462,6 @@ out:
}
/*
- * helper to add backref node for the newly created snapshot.
- * the backref node is created by cloning backref node that
- * corresponds to root of source tree
- */
-static int clone_backref_node(struct btrfs_trans_handle *trans,
- struct reloc_control *rc,
- const struct btrfs_root *src,
- struct btrfs_root *dest)
-{
- struct btrfs_root *reloc_root = src->reloc_root;
- struct btrfs_backref_cache *cache = &rc->backref_cache;
- struct btrfs_backref_node *node = NULL;
- struct btrfs_backref_node *new_node;
- struct btrfs_backref_edge *edge;
- struct btrfs_backref_edge *new_edge;
- struct rb_node *rb_node;
-
- rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
- if (node->detached)
- node = NULL;
- else
- BUG_ON(node->new_bytenr != reloc_root->node->start);
- }
-
- if (!node) {
- rb_node = rb_simple_search(&cache->rb_root,
- reloc_root->commit_root->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct btrfs_backref_node,
- rb_node);
- BUG_ON(node->detached);
- }
- }
-
- if (!node)
- return 0;
-
- new_node = btrfs_backref_alloc_node(cache, dest->node->start,
- node->level);
- if (!new_node)
- return -ENOMEM;
-
- new_node->lowest = node->lowest;
- new_node->checked = 1;
- new_node->root = btrfs_grab_root(dest);
- ASSERT(new_node->root);
-
- if (!node->lowest) {
- list_for_each_entry(edge, &node->lower, list[UPPER]) {
- new_edge = btrfs_backref_alloc_edge(cache);
- if (!new_edge)
- goto fail;
-
- btrfs_backref_link_edge(new_edge, edge->node[LOWER],
- new_node, LINK_UPPER);
- }
- } else {
- list_add_tail(&new_node->lower, &cache->leaves);
- }
-
- rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
- &new_node->rb_node);
- if (rb_node)
- btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
-
- if (!new_node->lowest) {
- list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
- list_add_tail(&new_edge->list[LOWER],
- &new_edge->node[LOWER]->upper);
- }
- }
- return 0;
-fail:
- while (!list_empty(&new_node->lower)) {
- new_edge = list_entry(new_node->lower.next,
- struct btrfs_backref_edge, list[UPPER]);
- list_del(&new_edge->list[UPPER]);
- btrfs_backref_free_edge(cache, new_edge);
- }
- btrfs_backref_free_node(cache, new_node);
- return -ENOMEM;
-}
-
-/*
* helper to add 'address of tree root -> reloc tree' mapping
*/
static int __add_reloc_root(struct btrfs_root *root)
@@ -950,7 +856,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
u32 i;
int ret = 0;
int first = 1;
- int dirty = 0;
if (rc->stage != UPDATE_DATA_PTRS)
return 0;
@@ -1030,7 +935,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
}
btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
- dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
ref.action = BTRFS_ADD_DELAYED_REF;
@@ -1061,8 +965,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
break;
}
}
- if (dirty)
- btrfs_mark_buffer_dirty(trans, leaf);
if (inode)
btrfs_add_delayed_iput(inode);
return ret;
@@ -1244,7 +1146,7 @@ again:
* The real subtree rescan is delayed until we have new
* CoW on the subtree root node before transaction commit.
*/
- ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
+ ret = btrfs_qgroup_add_swapped_blocks(dest,
rc->block_group, parent, slot,
path->nodes[level], path->slots[level],
last_snapshot);
@@ -1255,13 +1157,11 @@ again:
*/
btrfs_set_node_blockptr(parent, slot, new_bytenr);
btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
- btrfs_mark_buffer_dirty(trans, parent);
btrfs_set_node_blockptr(path->nodes[level],
path->slots[level], old_bytenr);
btrfs_set_node_ptr_generation(path->nodes[level],
path->slots[level], old_ptr_gen);
- btrfs_mark_buffer_dirty(trans, path->nodes[level]);
ref.action = BTRFS_ADD_DELAYED_REF;
ref.bytenr = old_bytenr;
@@ -2058,100 +1958,72 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
int index = 0;
int ret;
- next = node;
- while (1) {
- cond_resched();
- next = walk_up_backref(next, edges, &index);
- root = next->root;
+ next = walk_up_backref(node, edges, &index);
+ root = next->root;
- /*
- * If there is no root, then our references for this block are
- * incomplete, as we should be able to walk all the way up to a
- * block that is owned by a root.
- *
- * This path is only for SHAREABLE roots, so if we come upon a
- * non-SHAREABLE root then we have backrefs that resolve
- * improperly.
- *
- * Both of these cases indicate file system corruption, or a bug
- * in the backref walking code.
- */
- if (!root) {
- ASSERT(0);
- btrfs_err(trans->fs_info,
- "bytenr %llu doesn't have a backref path ending in a root",
- node->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
- if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
- ASSERT(0);
- btrfs_err(trans->fs_info,
- "bytenr %llu has multiple refs with one ending in a non-shareable root",
- node->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
-
- if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
- ret = record_reloc_root_in_trans(trans, root);
- if (ret)
- return ERR_PTR(ret);
- break;
- }
+ /*
+ * If there is no root, then our references for this block are
+ * incomplete, as we should be able to walk all the way up to a block
+ * that is owned by a root.
+ *
+ * This path is only for SHAREABLE roots, so if we come upon a
+ * non-SHAREABLE root then we have backrefs that resolve improperly.
+ *
+ * Both of these cases indicate file system corruption, or a bug in the
+ * backref walking code.
+ */
+ if (unlikely(!root)) {
+ btrfs_err(trans->fs_info,
+ "bytenr %llu doesn't have a backref path ending in a root",
+ node->bytenr);
+ return ERR_PTR(-EUCLEAN);
+ }
+ if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
+ btrfs_err(trans->fs_info,
+ "bytenr %llu has multiple refs with one ending in a non-shareable root",
+ node->bytenr);
+ return ERR_PTR(-EUCLEAN);
+ }
- ret = btrfs_record_root_in_trans(trans, root);
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
+ ret = record_reloc_root_in_trans(trans, root);
if (ret)
return ERR_PTR(ret);
- root = root->reloc_root;
-
- /*
- * We could have raced with another thread which failed, so
- * root->reloc_root may not be set, return ENOENT in this case.
- */
- if (!root)
- return ERR_PTR(-ENOENT);
+ goto found;
+ }
- if (next->new_bytenr != root->node->start) {
- /*
- * We just created the reloc root, so we shouldn't have
- * ->new_bytenr set and this shouldn't be in the changed
- * list. If it is then we have multiple roots pointing
- * at the same bytenr which indicates corruption, or
- * we've made a mistake in the backref walking code.
- */
- ASSERT(next->new_bytenr == 0);
- ASSERT(list_empty(&next->list));
- if (next->new_bytenr || !list_empty(&next->list)) {
- btrfs_err(trans->fs_info,
- "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
- node->bytenr, next->bytenr);
- return ERR_PTR(-EUCLEAN);
- }
+ ret = btrfs_record_root_in_trans(trans, root);
+ if (ret)
+ return ERR_PTR(ret);
+ root = root->reloc_root;
- next->new_bytenr = root->node->start;
- btrfs_put_root(next->root);
- next->root = btrfs_grab_root(root);
- ASSERT(next->root);
- list_add_tail(&next->list,
- &rc->backref_cache.changed);
- mark_block_processed(rc, next);
- break;
- }
+ /*
+ * We could have raced with another thread which failed, so
+ * root->reloc_root may not be set, return ENOENT in this case.
+ */
+ if (!root)
+ return ERR_PTR(-ENOENT);
- WARN_ON(1);
- root = NULL;
- next = walk_down_backref(edges, &index);
- if (!next || next->level <= node->level)
- break;
- }
- if (!root) {
+ if (next->new_bytenr) {
/*
- * This can happen if there's fs corruption or if there's a bug
- * in the backref lookup code.
+ * We just created the reloc root, so we shouldn't have
+ * ->new_bytenr set yet. If it is then we have multiple roots
+ * pointing at the same bytenr which indicates corruption, or
+ * we've made a mistake in the backref walking code.
*/
- ASSERT(0);
- return ERR_PTR(-ENOENT);
+ ASSERT(next->new_bytenr == 0);
+ btrfs_err(trans->fs_info,
+ "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
+ node->bytenr, next->bytenr);
+ return ERR_PTR(-EUCLEAN);
}
+ next->new_bytenr = root->node->start;
+ btrfs_put_root(next->root);
+ next->root = btrfs_grab_root(root);
+ ASSERT(next->root);
+ mark_block_processed(rc, next);
+found:
next = node;
/* setup backref node path for btrfs_reloc_cow_block */
while (1) {
@@ -2247,17 +2119,11 @@ static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
return num_bytes;
}
-static int reserve_metadata_space(struct btrfs_trans_handle *trans,
- struct reloc_control *rc,
- struct btrfs_backref_node *node)
+static int refill_metadata_space(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc, u64 num_bytes)
{
- struct btrfs_root *root = rc->extent_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
- u64 num_bytes;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- u64 tmp;
-
- num_bytes = calcu_metadata_size(rc, node) * 2;
trans->block_rsv = rc->block_rsv;
rc->reserved_bytes += num_bytes;
@@ -2270,7 +2136,8 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
- tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
+ u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
+
while (tmp <= rc->reserved_bytes)
tmp <<= 1;
/*
@@ -2288,6 +2155,16 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
return 0;
}
+static int reserve_metadata_space(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc,
+ struct btrfs_backref_node *node)
+{
+ u64 num_bytes;
+
+ num_bytes = calcu_metadata_size(rc, node) * 2;
+ return refill_metadata_space(trans, rc, num_bytes);
+}
+
/*
* relocate a block tree, and then update pointers in upper level
* blocks that reference the block to point to the new location.
@@ -2442,7 +2319,7 @@ next:
if (!ret && node->pending) {
btrfs_backref_drop_node_buffer(node);
- list_move_tail(&node->list, &rc->backref_cache.changed);
+ list_del_init(&node->list);
node->pending = 0;
}
@@ -2605,8 +2482,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
/*
* This block was the root block of a root, and this is
* the first time we're processing the block and thus it
- * should not have had the ->new_bytenr modified and
- * should have not been included on the changed list.
+ * should not have had the ->new_bytenr modified.
*
* However in the case of corruption we could have
* multiple refs pointing to the same block improperly,
@@ -2616,8 +2492,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
* normal user in the case of corruption.
*/
ASSERT(node->new_bytenr == 0);
- ASSERT(list_empty(&node->list));
- if (node->new_bytenr || !list_empty(&node->list)) {
+ if (node->new_bytenr) {
btrfs_err(root->fs_info,
"bytenr %llu has improper references to it",
node->bytenr);
@@ -2640,17 +2515,12 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
btrfs_put_root(node->root);
node->root = btrfs_grab_root(root);
ASSERT(node->root);
- list_add_tail(&node->list, &rc->backref_cache.changed);
} else {
- path->lowest_level = node->level;
- if (root == root->fs_info->chunk_root)
- btrfs_reserve_chunk_metadata(trans, false);
- ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- btrfs_release_path(path);
- if (root == root->fs_info->chunk_root)
- btrfs_trans_release_chunk_metadata(trans);
- if (ret > 0)
- ret = 0;
+ btrfs_err(root->fs_info,
+ "bytenr %llu resolved to a non-shareable root",
+ node->bytenr);
+ ret = -EUCLEAN;
+ goto out;
}
if (!ret)
update_processed_blocks(rc, node);
@@ -2658,11 +2528,50 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
ret = do_relocation(trans, rc, node, key, path, 1);
}
out:
- if (ret || node->level == 0 || node->cowonly)
+ if (ret || node->level == 0)
btrfs_backref_cleanup_node(&rc->backref_cache, node);
return ret;
}
+static int relocate_cowonly_block(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc, struct tree_block *block,
+ struct btrfs_path *path)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root;
+ u64 num_bytes;
+ int nr_levels;
+ int ret;
+
+ root = btrfs_get_fs_root(fs_info, block->owner, true);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1;
+
+ num_bytes = fs_info->nodesize * nr_levels;
+ ret = refill_metadata_space(trans, rc, num_bytes);
+ if (ret) {
+ btrfs_put_root(root);
+ return ret;
+ }
+ path->lowest_level = block->level;
+ if (root == root->fs_info->chunk_root)
+ btrfs_reserve_chunk_metadata(trans, false);
+
+ ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1);
+ path->lowest_level = 0;
+ btrfs_release_path(path);
+
+ if (root == root->fs_info->chunk_root)
+ btrfs_trans_release_chunk_metadata(trans);
+ if (ret > 0)
+ ret = 0;
+ btrfs_put_root(root);
+
+ return ret;
+}
+
/*
* relocate a list of blocks
*/
@@ -2702,6 +2611,20 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Do tree relocation */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
+ /*
+ * For COWonly blocks, or the data reloc tree, we only need to
+ * COW down to the block, there's no need to generate a backref
+ * tree.
+ */
+ if (block->owner &&
+ (!is_fstree(block->owner) ||
+ block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
+ ret = relocate_cowonly_block(trans, rc, block, path);
+ if (ret)
+ break;
+ continue;
+ }
+
node = build_backref_tree(trans, rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
@@ -2902,6 +2825,7 @@ static int relocate_one_folio(struct reloc_control *rc,
const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
ASSERT(index <= last_index);
+again:
folio = filemap_lock_folio(inode->i_mapping, index);
if (IS_ERR(folio)) {
@@ -2937,11 +2861,16 @@ static int relocate_one_folio(struct reloc_control *rc,
ret = -EIO;
goto release_folio;
}
+ if (folio->mapping != inode->i_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
}
/*
* We could have lost folio private when we dropped the lock to read the
- * folio above, make sure we set_page_extent_mapped here so we have any
+ * folio above, make sure we set_folio_extent_mapped() here so we have any
* of the subpage blocksize stuff we need in place.
*/
ret = set_folio_extent_mapped(folio);
@@ -3793,7 +3722,6 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
- btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@@ -4399,8 +4327,18 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
WARN_ON(!first_cow && level == 0);
node = rc->backref_cache.path[level];
- BUG_ON(node->bytenr != buf->start &&
- node->new_bytenr != buf->start);
+
+ /*
+ * If node->bytenr != buf->start and node->new_bytenr !=
+ * buf->start then we've got the wrong backref node for what we
+ * expected to see here and the cache is incorrect.
+ */
+ if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
+ btrfs_err(fs_info,
+"bytenr %llu was found but our backref cache was expecting %llu or %llu",
+ buf->start, node->bytenr, node->new_bytenr);
+ return -EUCLEAN;
+ }
btrfs_backref_drop_node_buffer(node);
atomic_inc(&cow->refs);
@@ -4500,10 +4438,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
return ret;
}
new_root->reloc_root = btrfs_grab_root(reloc_root);
-
- if (rc->create_reloc_tree)
- ret = clone_backref_node(trans, rc, root, reloc_root);
- return ret;
+ return 0;
}
/*
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 33962671a96c..e22e6b06927a 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -197,7 +197,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
write_extent_buffer(l, item, ptr, sizeof(*item));
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
out:
btrfs_free_path(path);
return ret;
@@ -447,7 +446,6 @@ again:
btrfs_set_root_ref_name_len(leaf, ref, name->len);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(leaf, name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(trans, leaf);
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(path);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 3a3427428074..531312efee8d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1541,6 +1541,10 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
u64 extent_gen;
int ret;
+ if (unlikely(!extent_root)) {
+ btrfs_err(fs_info, "no valid extent root for scrub");
+ return -EUCLEAN;
+ }
memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
stripe->nr_sectors);
scrub_stripe_reset_bitmaps(stripe);
@@ -1656,8 +1660,7 @@ static u32 stripe_length(const struct scrub_stripe *stripe)
stripe->bg->start + stripe->bg->length - stripe->logical);
}
-static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
- struct scrub_stripe *stripe)
+static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct btrfs_bio *bbio = NULL;
@@ -1704,8 +1707,18 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
&stripe_len, &bioc, &io_stripe, &mirror);
btrfs_put_bioc(bioc);
if (err < 0) {
- set_bit(i, &stripe->io_error_bitmap);
- set_bit(i, &stripe->error_bitmap);
+ if (err != -ENODATA) {
+ /*
+ * Earlier btrfs_get_raid_extent_offset()
+ * returned -ENODATA, which means there's
+ * no entry for the corresponding range
+ * in the stripe tree. But if it's in
+ * the extent tree, then it's a preallocated
+ * extent and not an error.
+ */
+ set_bit(i, &stripe->io_error_bitmap);
+ set_bit(i, &stripe->error_bitmap);
+ }
continue;
}
@@ -1743,7 +1756,7 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
- scrub_submit_extent_sector_read(sctx, stripe);
+ scrub_submit_extent_sector_read(stripe);
return;
}
@@ -1954,7 +1967,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
ASSERT(sctx->raid56_data_stripes);
/*
- * For data stripe search, we cannot re-use the same extent/csum paths,
+ * For data stripe search, we cannot reuse the same extent/csum paths,
* as the data stripe bytenr may be smaller than previous extent. Thus
* we have to use our own extent/csum paths.
*/
@@ -2103,7 +2116,6 @@ out:
*/
static int scrub_simple_mirror(struct scrub_ctx *sctx,
struct btrfs_block_group *bg,
- struct btrfs_chunk_map *map,
u64 logical_start, u64 logical_length,
struct btrfs_device *device,
u64 physical, int mirror_num)
@@ -2222,7 +2234,7 @@ static int scrub_simple_stripe(struct scrub_ctx *sctx,
* just RAID1, so we can reuse scrub_simple_mirror() to scrub
* this stripe.
*/
- ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
+ ret = scrub_simple_mirror(sctx, bg, cur_logical,
BTRFS_STRIPE_LEN, device, cur_physical,
mirror_num);
if (ret)
@@ -2256,7 +2268,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
/* Offset inside the chunk */
u64 offset;
u64 stripe_logical;
- int stop_loop = 0;
/* Extent_path should be released by now. */
ASSERT(sctx->extent_path.nodes[0] == NULL);
@@ -2307,7 +2318,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
* Only @physical and @mirror_num needs to calculated using
* @stripe_index.
*/
- ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
+ ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length,
scrub_dev, map->stripes[stripe_index].physical,
stripe_index + 1);
offset = 0;
@@ -2362,7 +2373,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
* We can reuse scrub_simple_mirror() here, as the repair part
* is still based on @mirror_num.
*/
- ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
+ ret = scrub_simple_mirror(sctx, bg, logical, BTRFS_STRIPE_LEN,
scrub_dev, physical, 1);
if (ret < 0)
goto out;
@@ -2370,14 +2381,8 @@ next:
logical += increment;
physical += BTRFS_STRIPE_LEN;
spin_lock(&sctx->stat_lock);
- if (stop_loop)
- sctx->stat.last_physical =
- map->stripes[stripe_index].physical + dev_stripe_len;
- else
- sctx->stat.last_physical = physical;
+ sctx->stat.last_physical = physical;
spin_unlock(&sctx->stat_lock);
- if (stop_loop)
- break;
}
out:
ret2 = flush_scrub_stripes(sctx);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 27306d98ec43..f437138fefbc 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -980,9 +980,7 @@ static int get_inode_gen(struct btrfs_root *root, u64 ino, u64 *gen)
return ret;
}
-typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
- struct fs_path *p,
- void *ctx);
+typedef int (*iterate_inode_ref_t)(u64 dir, struct fs_path *p, void *ctx);
/*
* Helper function to iterate the entries in ONE btrfs_inode_ref or
@@ -1007,8 +1005,6 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
u32 name_len;
char *start;
int ret = 0;
- int num = 0;
- int index;
u64 dir;
unsigned long name_off;
unsigned long elem_size;
@@ -1043,13 +1039,11 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
iref = (struct btrfs_inode_ref *)(ptr + cur);
name_len = btrfs_inode_ref_name_len(eb, iref);
name_off = (unsigned long)(iref + 1);
- index = btrfs_inode_ref_index(eb, iref);
dir = found_key->offset;
} else {
extref = (struct btrfs_inode_extref *)(ptr + cur);
name_len = btrfs_inode_extref_name_len(eb, extref);
name_off = (unsigned long)&extref->name;
- index = btrfs_inode_extref_index(eb, extref);
dir = btrfs_inode_extref_parent(eb, extref);
}
@@ -1094,10 +1088,9 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
}
cur += elem_size + name_len;
- ret = iterate(num, dir, index, p, ctx);
+ ret = iterate(dir, p, ctx);
if (ret)
goto out;
- num++;
}
out:
@@ -1227,8 +1220,7 @@ out:
return ret;
}
-static int __copy_first_ref(int num, u64 dir, int index,
- struct fs_path *p, void *ctx)
+static int __copy_first_ref(u64 dir, struct fs_path *p, void *ctx)
{
int ret;
struct fs_path *pt = ctx;
@@ -3768,7 +3760,6 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref,
const bool is_orphan)
{
- struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key di_key;
@@ -3797,7 +3788,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
goto out;
}
- di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
+ di = btrfs_match_dir_item_name(path, parent_ref->name,
parent_ref->name_len);
if (!di) {
ret = 0;
@@ -4708,8 +4699,7 @@ out:
return ret;
}
-static int record_new_ref_if_needed(int num, u64 dir, int index,
- struct fs_path *name, void *ctx)
+static int record_new_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
{
int ret = 0;
struct send_ctx *sctx = ctx;
@@ -4738,8 +4728,7 @@ out:
return ret;
}
-static int record_deleted_ref_if_needed(int num, u64 dir, int index,
- struct fs_path *name, void *ctx)
+static int record_deleted_ref_if_needed(u64 dir, struct fs_path *name, void *ctx)
{
int ret = 0;
struct send_ctx *sctx = ctx;
@@ -5291,6 +5280,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
unsigned cur_len = min_t(unsigned, len,
PAGE_SIZE - pg_offset);
+again:
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
page_cache_sync_readahead(mapping,
@@ -5323,6 +5313,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
ret = -EIO;
break;
}
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
}
memcpy_from_folio(sctx->send_buf + sctx->send_size, folio,
@@ -5677,10 +5672,11 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
* Note that send_buf is a mapping of send_buf_pages, so this is really
* reading into send_buf.
*/
- ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset,
+ ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode),
disk_bytenr, disk_num_bytes,
sctx->send_buf_pages +
- (data_offset >> PAGE_SHIFT));
+ (data_offset >> PAGE_SHIFT),
+ NULL);
if (ret)
goto out;
@@ -7190,13 +7186,11 @@ static int changed_extent(struct send_ctx *sctx,
static int changed_verity(struct send_ctx *sctx, enum btrfs_compare_tree_result result)
{
- int ret = 0;
-
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result == BTRFS_COMPARE_TREE_NEW)
sctx->cur_inode_needs_verity = true;
}
- return ret;
+ return 0;
}
static int dir_changed(struct send_ctx *sctx, u64 dir)
@@ -7265,7 +7259,7 @@ static int changed_cb(struct btrfs_path *left_path,
enum btrfs_compare_tree_result result,
struct send_ctx *sctx)
{
- int ret = 0;
+ int ret;
/*
* We can not hold the commit root semaphore here. This is because in
@@ -7325,7 +7319,6 @@ static int changed_cb(struct btrfs_path *left_path,
return 0;
}
result = BTRFS_COMPARE_TREE_CHANGED;
- ret = 0;
}
sctx->left_path = left_path;
@@ -8137,7 +8130,20 @@ long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_a
* making it RW. This also protects against deletion.
*/
spin_lock(&send_root->root_item_lock);
- if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
+ /*
+ * Unlikely but possible, if the subvolume is marked for deletion but
+ * is slow to remove the directory entry, send can still be started.
+ */
+ if (btrfs_root_dead(send_root)) {
+ spin_unlock(&send_root->root_item_lock);
+ return -EPERM;
+ }
+ /* Userspace tools do the checks and warn the user if it's not RO. */
+ if (!btrfs_root_readonly(send_root)) {
+ spin_unlock(&send_root->root_item_lock);
+ return -EPERM;
+ }
+ if (send_root->dedupe_in_progress) {
dedupe_in_progress_warn(send_root);
spin_unlock(&send_root->root_item_lock);
return -EAGAIN;
@@ -8146,15 +8152,6 @@ long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_a
spin_unlock(&send_root->root_item_lock);
/*
- * Userspace tools do the checks and warn the user if it's
- * not RO.
- */
- if (!btrfs_root_readonly(send_root)) {
- ret = -EPERM;
- goto out;
- }
-
- /*
* Check that we don't overflow at later allocations, we request
* clone_sources_count + 1 items, and compare to unsigned long inside
* access_ok. Also set an upper limit for allocation size so this can't
@@ -8219,15 +8216,6 @@ long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_a
}
sctx->send_root = send_root;
- /*
- * Unlikely but possible, if the subvolume is marked for deletion but
- * is slow to remove the directory entry, send can still be started
- */
- if (btrfs_root_dead(sctx->send_root)) {
- ret = -EPERM;
- goto out;
- }
-
sctx->clone_roots_cnt = arg->clone_sources_count;
if (sctx->proto >= 2) {
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index b07f4aa66878..9309886c5ea1 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -16,7 +16,7 @@ struct btrfs_ioctl_send_args;
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
/* Conditional support for the upcoming protocol version. */
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
#define BTRFS_SEND_STREAM_VERSION 3
#else
#define BTRFS_SEND_STREAM_VERSION 2
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index d5a9cd8a4fd8..a341d087567a 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -14,6 +14,7 @@
#include "fs.h"
#include "accessors.h"
#include "extent-tree.h"
+#include "zoned.h"
/*
* HOW DOES SPACE RESERVATION WORK
@@ -127,6 +128,14 @@
* churn a lot and we can avoid making some extent tree modifications if we
* are able to delay for as long as possible.
*
+ * RESET_ZONES
+ * This state works only for the zoned mode. On the zoned mode, we cannot
+ * reuse once allocated then freed region until we reset the zone, due to
+ * the sequential write zone requirement. The RESET_ZONES state resets the
+ * zones of an unused block group and let us reuse the space. The reusing
+ * is faster than removing the block group and allocating another block
+ * group on the zones.
+ *
* ALLOC_CHUNK
* We will skip this the first time through space reservation, because of
* overcommit and we don't want to have a lot of useless metadata space when
@@ -316,7 +325,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
found->bytes_used += block_group->used;
found->disk_used += block_group->used * factor;
found->bytes_readonly += block_group->bytes_super;
- btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
+ btrfs_space_info_update_bytes_zone_unusable(found, block_group->zone_unusable);
if (block_group->length > 0)
found->full = 0;
btrfs_try_granting_tickets(info, found);
@@ -489,9 +498,7 @@ again:
if ((used + ticket->bytes <= space_info->total_bytes) ||
btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
flush)) {
- btrfs_space_info_update_bytes_may_use(fs_info,
- space_info,
- ticket->bytes);
+ btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes);
remove_ticket(space_info, ticket);
ticket->bytes = 0;
space_info->tickets_id++;
@@ -834,6 +841,9 @@ static void flush_space(struct btrfs_fs_info *fs_info,
*/
ret = btrfs_commit_current_transaction(root);
break;
+ case RESET_ZONES:
+ ret = btrfs_reset_unused_block_groups(space_info, num_bytes);
+ break;
default:
ret = -ENOSPC;
break;
@@ -1086,9 +1096,14 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
enum btrfs_flush_state flush_state;
int commit_cycles = 0;
u64 last_tickets_id;
+ enum btrfs_flush_state final_state;
fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ if (btrfs_is_zoned(fs_info))
+ final_state = RESET_ZONES;
+ else
+ final_state = COMMIT_TRANS;
spin_lock(&space_info->lock);
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
@@ -1141,7 +1156,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
flush_state++;
- if (flush_state > COMMIT_TRANS) {
+ if (flush_state > final_state) {
commit_cycles++;
if (commit_cycles > 2) {
if (maybe_fail_all_tickets(fs_info, space_info)) {
@@ -1155,7 +1170,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
}
}
spin_unlock(&space_info->lock);
- } while (flush_state <= COMMIT_TRANS);
+ } while (flush_state <= final_state);
}
/*
@@ -1279,13 +1294,17 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* If we are freeing inodes, we want to make sure all delayed iputs have
* completed, because they could have been on an inode with i_nlink == 0, and
* thus have been truncated and freed up space. But again this space is not
- * immediately re-usable, it comes in the form of a delayed ref, which must be
+ * immediately reusable, it comes in the form of a delayed ref, which must be
* run and then the transaction must be committed.
*
* COMMIT_TRANS
* This is where we reclaim all of the pinned space generated by running the
* iputs
*
+ * RESET_ZONES
+ * This state works only for the zoned mode. We scan the unused block group
+ * list and reset the zones and reuse the block group.
+ *
* ALLOC_CHUNK_FORCE
* For data we start with alloc chunk force, however we could have been full
* before, and then the transaction commit could have freed new block groups,
@@ -1295,6 +1314,7 @@ static const enum btrfs_flush_state data_flush_states[] = {
FLUSH_DELALLOC_FULL,
RUN_DELAYED_IPUTS,
COMMIT_TRANS,
+ RESET_ZONES,
ALLOC_CHUNK_FORCE,
};
@@ -1386,6 +1406,7 @@ void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
static const enum btrfs_flush_state priority_flush_states[] = {
FLUSH_DELAYED_ITEMS_NR,
FLUSH_DELAYED_ITEMS,
+ RESET_ZONES,
ALLOC_CHUNK,
};
@@ -1399,6 +1420,7 @@ static const enum btrfs_flush_state evict_flush_states[] = {
FLUSH_DELALLOC_FULL,
ALLOC_CHUNK,
COMMIT_TRANS,
+ RESET_ZONES,
};
static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
@@ -1488,8 +1510,7 @@ static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
spin_unlock(&space_info->lock);
}
-static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+static void wait_reserve_ticket(struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
@@ -1547,7 +1568,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
case BTRFS_RESERVE_FLUSH_DATA:
case BTRFS_RESERVE_FLUSH_ALL:
case BTRFS_RESERVE_FLUSH_ALL_STEAL:
- wait_reserve_ticket(fs_info, space_info, ticket);
+ wait_reserve_ticket(space_info, ticket);
break;
case BTRFS_RESERVE_FLUSH_LIMIT:
priority_reclaim_metadata_space(fs_info, space_info, ticket,
@@ -1691,8 +1712,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
if (!pending_tickets &&
((used + orig_bytes <= space_info->total_bytes) ||
btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
- btrfs_space_info_update_bytes_may_use(fs_info, space_info,
- orig_bytes);
+ btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
ret = 0;
}
@@ -1704,8 +1724,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
used = btrfs_space_info_used(space_info, false);
if (used + orig_bytes <= space_info->total_bytes) {
- btrfs_space_info_update_bytes_may_use(fs_info, space_info,
- orig_bytes);
+ btrfs_space_info_update_bytes_may_use(space_info, orig_bytes);
ret = 0;
}
}
@@ -1984,8 +2003,7 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
return unalloc < data_chunk_size;
}
-static void do_reclaim_sweep(const struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, int raid)
+static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
{
struct btrfs_block_group *bg;
int thresh_pct;
@@ -2081,6 +2099,35 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
if (!btrfs_should_periodic_reclaim(space_info))
continue;
for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
- do_reclaim_sweep(fs_info, space_info, raid);
+ do_reclaim_sweep(space_info, raid);
+ }
+}
+
+void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+
+ lockdep_assert_held(&space_info->lock);
+
+ /* Prioritize the global reservation to receive the freed space. */
+ if (global_rsv->space_info != space_info)
+ goto grant;
+
+ spin_lock(&global_rsv->lock);
+ if (!global_rsv->full) {
+ u64 to_add = min(len, global_rsv->size - global_rsv->reserved);
+
+ global_rsv->reserved += to_add;
+ btrfs_space_info_update_bytes_may_use(space_info, to_add);
+ if (global_rsv->reserved >= global_rsv->size)
+ global_rsv->full = 1;
+ len -= to_add;
}
+ spin_unlock(&global_rsv->lock);
+
+grant:
+ /* Add to any tickets we may have. */
+ if (len)
+ btrfs_try_granting_tickets(fs_info, space_info);
}
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index efbecc0c5258..a96efdb5e681 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -79,6 +79,10 @@ enum btrfs_reserve_flush_enum {
BTRFS_RESERVE_FLUSH_EMERGENCY,
};
+/*
+ * Please be aware that the order of enum values will be the order of the reclaim
+ * process in btrfs_async_reclaim_metadata_space().
+ */
enum btrfs_flush_state {
FLUSH_DELAYED_ITEMS_NR = 1,
FLUSH_DELAYED_ITEMS = 2,
@@ -91,6 +95,7 @@ enum btrfs_flush_state {
ALLOC_CHUNK_FORCE = 9,
RUN_DELAYED_IPUTS = 10,
COMMIT_TRANS = 11,
+ RESET_ZONES = 12,
};
struct btrfs_space_info {
@@ -229,10 +234,10 @@ static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_i
*/
#define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \
static inline void \
-btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
- struct btrfs_space_info *sinfo, \
+btrfs_space_info_update_##name(struct btrfs_space_info *sinfo, \
s64 bytes) \
{ \
+ struct btrfs_fs_info *fs_info = sinfo->fs_info; \
const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \
lockdep_assert_held(&sinfo->lock); \
trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \
@@ -275,13 +280,12 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
static inline void btrfs_space_info_free_bytes_may_use(
- struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
spin_lock(&space_info->lock);
- btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
- btrfs_try_granting_tickets(fs_info, space_info);
+ btrfs_space_info_update_bytes_may_use(space_info, -num_bytes);
+ btrfs_try_granting_tickets(space_info->fs_info, space_info);
spin_unlock(&space_info->lock);
}
int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
@@ -295,5 +299,6 @@ void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool
bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info);
void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info);
+void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len);
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index fe4d719d506b..722acf768396 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -140,12 +140,10 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
return ERR_PTR(-ENOMEM);
spin_lock_init(&ret->lock);
- if (type == BTRFS_SUBPAGE_METADATA) {
+ if (type == BTRFS_SUBPAGE_METADATA)
atomic_set(&ret->eb_refs, 0);
- } else {
- atomic_set(&ret->readers, 0);
- atomic_set(&ret->writers, 0);
- }
+ else
+ atomic_set(&ret->nr_locked, 0);
return ret;
}
@@ -221,62 +219,6 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
__start_bit; \
})
-void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = folio_get_private(folio);
- const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
- const int nbits = len >> fs_info->sectorsize_bits;
- unsigned long flags;
-
-
- btrfs_subpage_assert(fs_info, folio, start, len);
-
- spin_lock_irqsave(&subpage->lock, flags);
- /*
- * Even though it's just for reading the page, no one should have
- * locked the subpage range.
- */
- ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
- bitmap_set(subpage->bitmaps, start_bit, nbits);
- atomic_add(nbits, &subpage->readers);
- spin_unlock_irqrestore(&subpage->lock, flags);
-}
-
-void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = folio_get_private(folio);
- const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
- const int nbits = len >> fs_info->sectorsize_bits;
- unsigned long flags;
- bool is_data;
- bool last;
-
- btrfs_subpage_assert(fs_info, folio, start, len);
- is_data = is_data_inode(BTRFS_I(folio->mapping->host));
-
- spin_lock_irqsave(&subpage->lock, flags);
-
- /* The range should have already been locked. */
- ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
- ASSERT(atomic_read(&subpage->readers) >= nbits);
-
- bitmap_clear(subpage->bitmaps, start_bit, nbits);
- last = atomic_sub_and_test(nbits, &subpage->readers);
-
- /*
- * For data we need to unlock the page if the last read has finished.
- *
- * And please don't replace @last with atomic_sub_and_test() call
- * inside if () condition.
- * As we want the atomic_sub_and_test() to be always executed.
- */
- if (is_data && last)
- folio_unlock(folio);
- spin_unlock_irqrestore(&subpage->lock, flags);
-}
-
static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
{
u64 orig_start = *start;
@@ -295,28 +237,8 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
orig_start + orig_len) - *start;
}
-static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage = folio_get_private(folio);
- const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
- const int nbits = (len >> fs_info->sectorsize_bits);
- unsigned long flags;
- int ret;
-
- btrfs_subpage_assert(fs_info, folio, start, len);
-
- spin_lock_irqsave(&subpage->lock, flags);
- ASSERT(atomic_read(&subpage->readers) == 0);
- ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
- bitmap_set(subpage->bitmaps, start_bit, nbits);
- ret = atomic_add_return(nbits, &subpage->writers);
- ASSERT(ret == nbits);
- spin_unlock_irqrestore(&subpage->lock, flags);
-}
-
-static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
+static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
{
struct btrfs_subpage *subpage = folio_get_private(folio);
const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
@@ -334,9 +256,9 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
* extent_clear_unlock_delalloc() for compression path.
*
* This @locked_page is locked by plain lock_page(), thus its
- * subpage::writers is 0. Handle them in a special way.
+ * subpage::locked is 0. Handle them in a special way.
*/
- if (atomic_read(&subpage->writers) == 0) {
+ if (atomic_read(&subpage->nr_locked) == 0) {
spin_unlock_irqrestore(&subpage->lock, flags);
return true;
}
@@ -345,40 +267,13 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
clear_bit(bit, subpage->bitmaps);
cleared++;
}
- ASSERT(atomic_read(&subpage->writers) >= cleared);
- last = atomic_sub_and_test(cleared, &subpage->writers);
+ ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
+ last = atomic_sub_and_test(cleared, &subpage->nr_locked);
spin_unlock_irqrestore(&subpage->lock, flags);
return last;
}
/*
- * Lock a folio for delalloc page writeback.
- *
- * Return -EAGAIN if the page is not properly initialized.
- * Return 0 with the page locked, and writer counter updated.
- *
- * Even with 0 returned, the page still need extra check to make sure
- * it's really the correct page, as the caller is using
- * filemap_get_folios_contig(), which can race with page invalidating.
- */
-int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
-{
- if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
- folio_lock(folio);
- return 0;
- }
- folio_lock(folio);
- if (!folio_test_private(folio) || !folio_get_private(folio)) {
- folio_unlock(folio);
- return -EAGAIN;
- }
- btrfs_subpage_clamp_range(folio, &start, &len);
- btrfs_subpage_start_writer(fs_info, folio, start, len);
- return 0;
-}
-
-/*
* Handle different locked folios:
*
* - Non-subpage folio
@@ -394,8 +289,8 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
* bitmap, reduce the writer lock number, and unlock the page if that's
* the last locked range.
*/
-void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
+void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
{
struct btrfs_subpage *subpage = folio_get_private(folio);
@@ -408,24 +303,24 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
/*
* For subpage case, there are two types of locked page. With or
- * without writers number.
+ * without locked number.
*
- * Since we own the page lock, no one else could touch subpage::writers
+ * Since we own the page lock, no one else could touch subpage::locked
* and we are safe to do several atomic operations without spinlock.
*/
- if (atomic_read(&subpage->writers) == 0) {
- /* No writers, locked by plain lock_page(). */
+ if (atomic_read(&subpage->nr_locked) == 0) {
+ /* No subpage lock, locked by plain lock_page(). */
folio_unlock(folio);
return;
}
btrfs_subpage_clamp_range(folio, &start, &len);
- if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
+ if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
folio_unlock(folio);
}
-void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
- struct folio *folio, unsigned long bitmap)
+void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, unsigned long bitmap)
{
struct btrfs_subpage *subpage = folio_get_private(folio);
const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
@@ -434,13 +329,13 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
int cleared = 0;
int bit;
- if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
+ if (!btrfs_is_subpage(fs_info, folio->mapping)) {
folio_unlock(folio);
return;
}
- if (atomic_read(&subpage->writers) == 0) {
- /* No writers, locked by plain lock_page(). */
+ if (atomic_read(&subpage->nr_locked) == 0) {
+ /* No subpage lock, locked by plain lock_page(). */
folio_unlock(folio);
return;
}
@@ -450,8 +345,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
cleared++;
}
- ASSERT(atomic_read(&subpage->writers) >= cleared);
- last = atomic_sub_and_test(cleared, &subpage->writers);
+ ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
+ last = atomic_sub_and_test(cleared, &subpage->nr_locked);
spin_unlock_irqrestore(&subpage->lock, flags);
if (last)
folio_unlock(folio);
@@ -740,6 +635,28 @@ IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
folio_test_checked);
+#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
+{ \
+ const int sectors_per_page = fs_info->sectors_per_page; \
+ \
+ ASSERT(sectors_per_page < BITS_PER_LONG); \
+ *dst = bitmap_read(subpage->bitmaps, \
+ sectors_per_page * btrfs_bitmap_nr_##name, \
+ sectors_per_page); \
+}
+
+#define SUBPAGE_DUMP_BITMAP(fs_info, folio, name, start, len) \
+{ \
+ const struct btrfs_subpage *subpage = folio_get_private(folio); \
+ unsigned long bitmap; \
+ \
+ GET_SUBPAGE_BITMAP(subpage, fs_info, name, &bitmap); \
+ btrfs_warn(fs_info, \
+ "dumpping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
+ start, len, folio_pos(folio), \
+ fs_info->sectors_per_page, &bitmap); \
+}
+
/*
* Make sure not only the page dirty bit is cleared, but also subpage dirty bit
* is cleared.
@@ -765,6 +682,10 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
subpage = folio_get_private(folio);
ASSERT(subpage);
spin_lock_irqsave(&subpage->lock, flags);
+ if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, dirty, start, len);
+ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ }
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
spin_unlock_irqrestore(&subpage->lock, flags);
}
@@ -776,8 +697,8 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
* This populates the involved subpage ranges so that subpage helpers can
* properly unlock them.
*/
-void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
+void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len)
{
struct btrfs_subpage *subpage;
unsigned long flags;
@@ -794,70 +715,16 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
nbits = len >> fs_info->sectorsize_bits;
spin_lock_irqsave(&subpage->lock, flags);
/* Target range should not yet be locked. */
- ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
+ SUBPAGE_DUMP_BITMAP(fs_info, folio, locked, start, len);
+ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ }
bitmap_set(subpage->bitmaps, start_bit, nbits);
- ret = atomic_add_return(nbits, &subpage->writers);
+ ret = atomic_add_return(nbits, &subpage->nr_locked);
ASSERT(ret <= fs_info->sectors_per_page);
spin_unlock_irqrestore(&subpage->lock, flags);
}
-/*
- * Find any subpage writer locked range inside @folio, starting at file offset
- * @search_start. The caller should ensure the folio is locked.
- *
- * Return true and update @found_start_ret and @found_len_ret to the first
- * writer locked range.
- * Return false if there is no writer locked range.
- */
-bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 search_start,
- u64 *found_start_ret, u32 *found_len_ret)
-{
- struct btrfs_subpage *subpage = folio_get_private(folio);
- const u32 sectors_per_page = fs_info->sectors_per_page;
- const unsigned int len = PAGE_SIZE - offset_in_page(search_start);
- const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
- locked, search_start, len);
- const unsigned int locked_bitmap_start = sectors_per_page * btrfs_bitmap_nr_locked;
- const unsigned int locked_bitmap_end = locked_bitmap_start + sectors_per_page;
- unsigned long flags;
- int first_zero;
- int first_set;
- bool found = false;
-
- ASSERT(folio_test_locked(folio));
- spin_lock_irqsave(&subpage->lock, flags);
- first_set = find_next_bit(subpage->bitmaps, locked_bitmap_end, start_bit);
- if (first_set >= locked_bitmap_end)
- goto out;
-
- found = true;
-
- *found_start_ret = folio_pos(folio) +
- ((first_set - locked_bitmap_start) << fs_info->sectorsize_bits);
- /*
- * Since @first_set is ensured to be smaller than locked_bitmap_end
- * here, @found_start_ret should be inside the folio.
- */
- ASSERT(*found_start_ret < folio_pos(folio) + PAGE_SIZE);
-
- first_zero = find_next_zero_bit(subpage->bitmaps, locked_bitmap_end, first_set);
- *found_len_ret = (first_zero - first_set) << fs_info->sectorsize_bits;
-out:
- spin_unlock_irqrestore(&subpage->lock, flags);
- return found;
-}
-
-#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
-{ \
- const int sectors_per_page = fs_info->sectors_per_page; \
- \
- ASSERT(sectors_per_page < BITS_PER_LONG); \
- *dst = bitmap_read(subpage->bitmaps, \
- sectors_per_page * btrfs_bitmap_nr_##name, \
- sectors_per_page); \
-}
-
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
@@ -868,6 +735,7 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
unsigned long writeback_bitmap;
unsigned long ordered_bitmap;
unsigned long checked_bitmap;
+ unsigned long locked_bitmap;
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
@@ -880,15 +748,16 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
- GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &locked_bitmap);
spin_unlock_irqrestore(&subpage->lock, flags);
dump_page(folio_page(folio, 0), "btrfs subpage dump");
btrfs_warn(fs_info,
-"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
+"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
start, len, folio_pos(folio),
sectors_per_page, &uptodate_bitmap,
sectors_per_page, &dirty_bitmap,
+ sectors_per_page, &locked_bitmap,
sectors_per_page, &writeback_bitmap,
sectors_per_page, &ordered_bitmap,
sectors_per_page, &checked_bitmap);
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index 4b85d91d0e18..44fff1f4eac4 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -45,14 +45,6 @@ enum {
struct btrfs_subpage {
/* Common members for both data and metadata pages */
spinlock_t lock;
- /*
- * Both data and metadata needs to track how many readers are for the
- * page.
- * Data relies on @readers to unlock the page when last reader finished.
- * While metadata doesn't need page unlock, it needs to prevent
- * page::private get cleared before the last end_page_read().
- */
- atomic_t readers;
union {
/*
* Structures only used by metadata
@@ -62,8 +54,12 @@ struct btrfs_subpage {
*/
atomic_t eb_refs;
- /* Structures only used by data */
- atomic_t writers;
+ /*
+ * Structures only used by data,
+ *
+ * How many sectors inside the page is locked.
+ */
+ atomic_t nr_locked;
};
unsigned long bitmaps[];
};
@@ -95,23 +91,12 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage);
void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
-void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len);
-void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len);
-
-int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len);
-void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len);
-void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len);
-void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
- struct folio *folio, unsigned long bitmap);
-bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 search_start,
- u64 *found_start_ret, u32 *found_len_ret);
-
+void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
+void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, u64 start, u32 len);
+void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, unsigned long bitmap);
/*
* Template for subpage related operations.
*
@@ -152,6 +137,19 @@ DECLARE_BTRFS_SUBPAGE_OPS(writeback);
DECLARE_BTRFS_SUBPAGE_OPS(ordered);
DECLARE_BTRFS_SUBPAGE_OPS(checked);
+/*
+ * Helper for error cleanup, where a folio will have its dirty flag cleared,
+ * with writeback started and finished.
+ */
+static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
+ struct folio *locked_folio,
+ u64 start, u32 len)
+{
+ btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
+ btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
+ btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
+}
+
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 98fa0f382480..f809c3200c21 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -28,7 +28,6 @@
#include <linux/btrfs.h>
#include <linux/security.h>
#include <linux/fs_parser.h>
-#include <linux/swap.h>
#include "messages.h"
#include "delayed-inode.h"
#include "ctree.h"
@@ -340,6 +339,15 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
fallthrough;
case Opt_compress:
case Opt_compress_type:
+ /*
+ * Provide the same semantics as older kernels that don't use fs
+ * context, specifying the "compress" option clears
+ * "force-compress" without the need to pass
+ * "compress-force=[no|none]" before specifying "compress".
+ */
+ if (opt != Opt_compress_force && opt != Opt_compress_force_type)
+ btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
+
if (opt == Opt_compress || opt == Opt_compress_force) {
ctx->compress_type = BTRFS_COMPRESS_ZLIB;
ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
@@ -937,8 +945,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
}
static int btrfs_fill_super(struct super_block *sb,
- struct btrfs_fs_devices *fs_devices,
- void *data)
+ struct btrfs_fs_devices *fs_devices)
{
struct inode *inode;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -962,9 +969,9 @@ static int btrfs_fill_super(struct super_block *sb,
return err;
}
- err = open_ctree(sb, fs_devices, (char *)data);
+ err = open_ctree(sb, fs_devices);
if (err) {
- btrfs_err(fs_info, "open_ctree failed");
+ btrfs_err(fs_info, "open_ctree failed: %d", err);
return err;
}
@@ -1498,8 +1505,7 @@ static int btrfs_reconfigure(struct fs_context *fc)
sync_filesystem(sb);
set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
- if (!mount_reconfigure &&
- !btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags))
+ if (!btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags))
return -EINVAL;
ret = btrfs_check_features(fs_info, !(fc->sb_flags & SB_RDONLY));
@@ -1879,18 +1885,21 @@ static int btrfs_get_tree_super(struct fs_context *fc)
if (sb->s_root) {
btrfs_close_devices(fs_devices);
- if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY)
- ret = -EBUSY;
+ /*
+ * At this stage we may have RO flag mismatch between
+ * fc->sb_flags and sb->s_flags. Caller should detect such
+ * mismatch and reconfigure with sb->s_umount rwsem held if
+ * needed.
+ */
} else {
snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);
btrfs_sb(sb)->bdev_holder = &btrfs_fs_type;
- ret = btrfs_fill_super(sb, fs_devices, NULL);
- }
-
- if (ret) {
- deactivate_locked_super(sb);
- return ret;
+ ret = btrfs_fill_super(sb, fs_devices);
+ if (ret) {
+ deactivate_locked_super(sb);
+ return ret;
+ }
}
btrfs_clear_oneshot_options(fs_info);
@@ -1971,59 +1980,23 @@ error:
* fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem
* in fc->sb_flags.
*
- * This disambiguation has rather positive consequences. Mounting a subvolume
- * ro will not also turn the superblock ro. Only the mount for the subvolume
- * will become ro.
- *
- * So, if the superblock creation request comes from the new mount API the
- * caller must have explicitly done:
- *
- * fsconfig(FSCONFIG_SET_FLAG, "ro")
- * fsmount/mount_setattr(MOUNT_ATTR_RDONLY)
- *
- * IOW, at some point the caller must have explicitly turned the whole
- * superblock ro and we shouldn't just undo it like we did for the old mount
- * API. In any case, it lets us avoid the hack in the new mount API.
- *
- * Consequently, the remounting hack must only be used for requests originating
- * from the old mount API and should be marked for full deprecation so it can be
- * turned off in a couple of years.
- *
- * The new mount API has no reason to support this hack.
+ * But, currently the util-linux mount command already utilizes the new mount
+ * API and is still setting fsconfig(FSCONFIG_SET_FLAG, "ro") no matter if it's
+ * btrfs or not, setting the whole super block RO. To make per-subvolume mounting
+ * work with different options work we need to keep backward compatibility.
*/
-static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc)
+static int btrfs_reconfigure_for_mount(struct fs_context *fc, struct vfsmount *mnt)
{
- struct vfsmount *mnt;
- int ret;
- const bool ro2rw = !(fc->sb_flags & SB_RDONLY);
-
- /*
- * We got an EBUSY because our SB_RDONLY flag didn't match the existing
- * super block, so invert our setting here and retry the mount so we
- * can get our vfsmount.
- */
- if (ro2rw)
- fc->sb_flags |= SB_RDONLY;
- else
- fc->sb_flags &= ~SB_RDONLY;
-
- mnt = fc_mount(fc);
- if (IS_ERR(mnt))
- return mnt;
+ int ret = 0;
- if (!fc->oldapi || !ro2rw)
- return mnt;
+ if (fc->sb_flags & SB_RDONLY)
+ return ret;
- /* We need to convert to rw, call reconfigure. */
- fc->sb_flags &= ~SB_RDONLY;
down_write(&mnt->mnt_sb->s_umount);
- ret = btrfs_reconfigure(fc);
+ if (!(fc->sb_flags & SB_RDONLY) && (mnt->mnt_sb->s_flags & SB_RDONLY))
+ ret = btrfs_reconfigure(fc);
up_write(&mnt->mnt_sb->s_umount);
- if (ret) {
- mntput(mnt);
- return ERR_PTR(ret);
- }
- return mnt;
+ return ret;
}
static int btrfs_get_tree_subvol(struct fs_context *fc)
@@ -2033,6 +2006,7 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
struct fs_context *dup_fc;
struct dentry *dentry;
struct vfsmount *mnt;
+ int ret = 0;
/*
* Setup a dummy root and fs_info for test/set super. This is because
@@ -2075,11 +2049,16 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
fc->security = NULL;
mnt = fc_mount(dup_fc);
- if (PTR_ERR_OR_ZERO(mnt) == -EBUSY)
- mnt = btrfs_reconfigure_for_mount(dup_fc);
- put_fs_context(dup_fc);
- if (IS_ERR(mnt))
+ if (IS_ERR(mnt)) {
+ put_fs_context(dup_fc);
return PTR_ERR(mnt);
+ }
+ ret = btrfs_reconfigure_for_mount(dup_fc, mnt);
+ put_fs_context(dup_fc);
+ if (ret) {
+ mntput(mnt);
+ return ret;
+ }
/*
* This free's ->subvol_name, because if it isn't set we have to
@@ -2198,7 +2177,8 @@ static struct file_system_type btrfs_fs_type = {
.init_fs_context = btrfs_init_fs_context,
.parameters = btrfs_fs_parameters,
.kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA |
+ FS_ALLOW_IDMAP | FS_MGTIME,
};
MODULE_ALIAS_FS("btrfs");
@@ -2263,7 +2243,10 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
if (IS_ERR_OR_NULL(device)) {
mutex_unlock(&uuid_mutex);
- ret = PTR_ERR(device);
+ if (IS_ERR(device))
+ ret = PTR_ERR(device);
+ else
+ ret = 0;
break;
}
ret = !(device->fs_devices->num_devices ==
@@ -2402,13 +2385,7 @@ static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_contro
trace_btrfs_extent_map_shrinker_count(fs_info, nr);
- /*
- * Only report the real number for DEBUG builds, as there are reports of
- * serious performance degradation caused by too frequent shrinks.
- */
- if (IS_ENABLED(CONFIG_BTRFS_DEBUG))
- return nr;
- return 0;
+ return nr;
}
static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc)
@@ -2416,16 +2393,10 @@ static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_cont
const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan);
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- /*
- * We may be called from any task trying to allocate memory and we don't
- * want to slow it down with scanning and dropping extent maps. It would
- * also cause heavy lock contention if many tasks concurrently enter
- * here. Therefore only allow kswapd tasks to scan and drop extent maps.
- */
- if (!current_is_kswapd())
- return 0;
+ btrfs_free_extent_maps(fs_info, nr_to_scan);
- return btrfs_free_extent_maps(fs_info, nr_to_scan);
+ /* The extent map shrinker runs asynchronously, so always return 0. */
+ return 0;
}
static const struct super_operations btrfs_super_ops = {
@@ -2475,6 +2446,9 @@ static __cold void btrfs_interface_exit(void)
static int __init btrfs_print_mod_info(void)
{
static const char options[] = ""
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ ", experimental=on"
+#endif
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
@@ -2495,7 +2469,17 @@ static int __init btrfs_print_mod_info(void)
", fsverity=no"
#endif
;
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ if (btrfs_get_mod_read_policy() == NULL)
+ pr_info("Btrfs loaded%s\n", options);
+ else
+ pr_info("Btrfs loaded%s, read_policy=%s\n",
+ options, btrfs_get_mod_read_policy());
+#else
pr_info("Btrfs loaded%s\n", options);
+#endif
+
return 0;
}
@@ -2553,6 +2537,11 @@ static const struct init_sequence mod_init_seq[] = {
}, {
.init_func = extent_map_init,
.exit_func = extent_map_exit,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ }, {
+ .init_func = btrfs_read_policy_init,
+ .exit_func = NULL,
+#endif
}, {
.init_func = ordered_data_init,
.exit_func = ordered_data_exit,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 03926ad467c9..53b846d99ece 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -295,7 +295,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(simple_quota, SIMPLE_QUOTA);
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
#endif
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
/* Remove once support for extent tree v2 is feature complete */
BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2);
/* Remove once support for raid stripe tree is feature complete. */
@@ -329,7 +329,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_PTR(zoned),
#endif
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
BTRFS_FEAT_ATTR_PTR(extent_tree_v2),
BTRFS_FEAT_ATTR_PTR(raid_stripe_tree),
#endif
@@ -1118,7 +1118,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize);
+ return sysfs_emit(buf, "%u\n", fs_info->nodesize);
}
BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
@@ -1128,7 +1128,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
@@ -1180,7 +1180,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
@@ -1305,7 +1305,73 @@ static ssize_t btrfs_temp_fsid_show(struct kobject *kobj,
}
BTRFS_ATTR(, temp_fsid, btrfs_temp_fsid_show);
-static const char * const btrfs_read_policy_name[] = { "pid" };
+static const char *btrfs_read_policy_name[] = {
+ "pid",
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ "round-robin",
+ "devid",
+#endif
+};
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+
+/* Global module configuration parameters. */
+static char *read_policy;
+char *btrfs_get_mod_read_policy(void)
+{
+ return read_policy;
+}
+
+/* Set perms to 0, disable /sys/module/btrfs/parameter/read_policy interface. */
+module_param(read_policy, charp, 0);
+MODULE_PARM_DESC(read_policy,
+"Global read policy: pid (default), round-robin[:<min_contig_read>], devid[:<devid>]");
+#endif
+
+int btrfs_read_policy_to_enum(const char *str, s64 *value_ret)
+{
+ char param[32] = { 0 };
+ char __maybe_unused *value_str;
+
+ if (!str || strlen(str) == 0)
+ return 0;
+
+ strncpy(param, str, sizeof(param) - 1);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* Separate value from input in policy:value format. */
+ value_str = strchr(param, ':');
+ if (value_str) {
+ int ret;
+
+ *value_str = 0;
+ value_str++;
+ if (!value_ret)
+ return -EINVAL;
+ ret = kstrtos64(value_str, 10, value_ret);
+ if (ret)
+ return -EINVAL;
+ if (*value_ret < 0)
+ return -ERANGE;
+ }
+#endif
+
+ return sysfs_match_string(btrfs_read_policy_name, param);
+}
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+int __init btrfs_read_policy_init(void)
+{
+ s64 value;
+
+ if (btrfs_read_policy_to_enum(read_policy, &value) == -EINVAL) {
+ btrfs_err(NULL, "invalid read policy or value %s", read_policy);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
static ssize_t btrfs_read_policy_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -1316,14 +1382,25 @@ static ssize_t btrfs_read_policy_show(struct kobject *kobj,
int i;
for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
- if (policy == i)
- ret += sysfs_emit_at(buf, ret, "%s[%s]",
- (ret == 0 ? "" : " "),
- btrfs_read_policy_name[i]);
- else
- ret += sysfs_emit_at(buf, ret, "%s%s",
- (ret == 0 ? "" : " "),
- btrfs_read_policy_name[i]);
+ if (ret != 0)
+ ret += sysfs_emit_at(buf, ret, " ");
+
+ if (i == policy)
+ ret += sysfs_emit_at(buf, ret, "[");
+
+ ret += sysfs_emit_at(buf, ret, "%s", btrfs_read_policy_name[i]);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ if (i == BTRFS_READ_POLICY_RR)
+ ret += sysfs_emit_at(buf, ret, ":%u",
+ READ_ONCE(fs_devices->rr_min_contig_read));
+
+ if (i == BTRFS_READ_POLICY_DEVID)
+ ret += sysfs_emit_at(buf, ret, ":%llu",
+ READ_ONCE(fs_devices->read_devid));
+#endif
+ if (i == policy)
+ ret += sysfs_emit_at(buf, ret, "]");
}
ret += sysfs_emit_at(buf, ret, "\n");
@@ -1336,21 +1413,80 @@ static ssize_t btrfs_read_policy_store(struct kobject *kobj,
const char *buf, size_t len)
{
struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
- int i;
+ int index;
+ s64 value = -1;
- for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
- if (sysfs_streq(buf, btrfs_read_policy_name[i])) {
- if (i != READ_ONCE(fs_devices->read_policy)) {
- WRITE_ONCE(fs_devices->read_policy, i);
- btrfs_info(fs_devices->fs_info,
- "read policy set to '%s'",
- btrfs_read_policy_name[i]);
+ index = btrfs_read_policy_to_enum(buf, &value);
+ if (index < 0)
+ return -EINVAL;
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* If moving from RR then disable collecting fs stats. */
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR && index != BTRFS_READ_POLICY_RR)
+ fs_devices->collect_fs_stats = false;
+
+ if (index == BTRFS_READ_POLICY_RR) {
+ if (value != -1) {
+ const u32 sectorsize = fs_devices->fs_info->sectorsize;
+
+ if (!IS_ALIGNED(value, sectorsize)) {
+ u64 temp_value = round_up(value, sectorsize);
+
+ btrfs_debug(fs_devices->fs_info,
+"read_policy: min contig read %lld should be multiple of sectorsize %u, rounded to %llu",
+ value, sectorsize, temp_value);
+ value = temp_value;
}
- return len;
+ } else {
+ value = BTRFS_DEFAULT_RR_MIN_CONTIG_READ;
}
+
+ if (index != READ_ONCE(fs_devices->read_policy) ||
+ value != READ_ONCE(fs_devices->rr_min_contig_read)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ WRITE_ONCE(fs_devices->rr_min_contig_read, value);
+
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s:%lld'",
+ btrfs_read_policy_name[index], value);
+ }
+
+ fs_devices->collect_fs_stats = true;
+
+ return len;
}
- return -EINVAL;
+ if (index == BTRFS_READ_POLICY_DEVID) {
+ if (value != -1) {
+ BTRFS_DEV_LOOKUP_ARGS(args);
+
+ /* Validate input devid. */
+ args.devid = value;
+ if (btrfs_find_device(fs_devices, &args) == NULL)
+ return -EINVAL;
+ } else {
+ /* Set default devid to the devid of the latest device. */
+ value = fs_devices->latest_dev->devid;
+ }
+
+ if (index != READ_ONCE(fs_devices->read_policy) ||
+ value != READ_ONCE(fs_devices->read_devid)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ WRITE_ONCE(fs_devices->read_devid, value);
+
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s:%llu'",
+ btrfs_read_policy_name[index], value);
+ }
+
+ return len;
+ }
+#endif
+ if (index != READ_ONCE(fs_devices->read_policy)) {
+ WRITE_ONCE(fs_devices->read_policy, index);
+ btrfs_info(fs_devices->fs_info, "read policy set to '%s'",
+ btrfs_read_policy_name[index]);
+ }
+
+ return len;
}
BTRFS_ATTR_RW(, read_policy, btrfs_read_policy_show, btrfs_read_policy_store);
@@ -1390,7 +1526,7 @@ static ssize_t btrfs_bg_reclaim_threshold_store(struct kobject *kobj,
BTRFS_ATTR_RW(, bg_reclaim_threshold, btrfs_bg_reclaim_threshold_show,
btrfs_bg_reclaim_threshold_store);
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
static ssize_t btrfs_offload_csum_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
@@ -1450,7 +1586,7 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, bg_reclaim_threshold),
BTRFS_ATTR_PTR(, commit_stats),
BTRFS_ATTR_PTR(, temp_fsid),
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
BTRFS_ATTR_PTR(, offload_csum),
#endif
NULL,
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index e6a284c59809..3fc5c6f90dc4 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -47,5 +47,11 @@ void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info);
int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup);
+int btrfs_read_policy_to_enum(const char *str, s64 *value);
+
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+int __init btrfs_read_policy_init(void);
+char *btrfs_get_mod_read_policy(void);
+#endif
#endif
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index ce50847e1e01..5eff8d7d2360 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -29,6 +29,8 @@ const char *test_error[] = {
[TEST_ALLOC_BLOCK_GROUP] = "cannot allocate block group",
[TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map",
[TEST_ALLOC_CHUNK_MAP] = "cannot allocate chunk map",
+ [TEST_ALLOC_IO_CONTEXT] = "cannot allocate io context",
+ [TEST_ALLOC_TRANSACTION] = "cannot allocate transaction",
};
static const struct super_operations btrfs_test_super_ops = {
@@ -141,6 +143,11 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
+
+ /* CRC32C csum size. */
+ fs_info->csum_size = 4;
+ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) /
+ fs_info->csum_size;
set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
test_mnt->mnt_sb->s_fs_info = fs_info;
@@ -246,6 +253,15 @@ void btrfs_free_dummy_block_group(struct btrfs_block_group *cache)
kfree(cache);
}
+void btrfs_init_dummy_transaction(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info)
+{
+ memset(trans, 0, sizeof(*trans));
+ trans->fs_info = fs_info;
+ xa_init(&trans->delayed_refs.head_refs);
+ xa_init(&trans->delayed_refs.dirty_extents);
+ spin_lock_init(&trans->delayed_refs.lock);
+}
+
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
@@ -291,6 +307,12 @@ int btrfs_run_sanity_tests(void)
ret = btrfs_test_free_space_tree(sectorsize, nodesize);
if (ret)
goto out;
+ ret = btrfs_test_raid_stripe_tree(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_delayed_refs(sectorsize, nodesize);
+ if (ret)
+ goto out;
}
}
ret = btrfs_test_extent_map();
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index dc2f2ab15fa5..4307bdaa6749 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_TESTS_H
#define BTRFS_TESTS_H
+#include <linux/types.h>
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_run_sanity_tests(void);
@@ -24,12 +26,15 @@ enum {
TEST_ALLOC_BLOCK_GROUP,
TEST_ALLOC_EXTENT_MAP,
TEST_ALLOC_CHUNK_MAP,
+ TEST_ALLOC_IO_CONTEXT,
+ TEST_ALLOC_TRANSACTION,
};
extern const char *test_error[];
struct btrfs_root;
struct btrfs_trans_handle;
+struct btrfs_transaction;
int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
@@ -37,7 +42,9 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
+int btrfs_test_raid_stripe_tree(u32 sectorsize, u32 nodesize);
int btrfs_test_extent_map(void);
+int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
@@ -47,6 +54,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long lengt
void btrfs_free_dummy_block_group(struct btrfs_block_group *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
+void btrfs_init_dummy_transaction(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info);
struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info);
#else
static inline int btrfs_run_sanity_tests(void)
diff --git a/fs/btrfs/tests/delayed-refs-tests.c b/fs/btrfs/tests/delayed-refs-tests.c
new file mode 100644
index 000000000000..6558508c2ddf
--- /dev/null
+++ b/fs/btrfs/tests/delayed-refs-tests.c
@@ -0,0 +1,1015 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/sizes.h>
+#include "btrfs-tests.h"
+#include "../transaction.h"
+#include "../delayed-ref.h"
+#include "../extent-tree.h"
+
+#define FAKE_ROOT_OBJECTID 256
+#define FAKE_BYTENR 0
+#define FAKE_LEVEL 1
+#define FAKE_INO 256
+#define FAKE_FILE_OFFSET 0
+#define FAKE_PARENT SZ_1M
+
+struct ref_head_check {
+ u64 bytenr;
+ u64 num_bytes;
+ int ref_mod;
+ int total_ref_mod;
+ int must_insert;
+};
+
+struct ref_node_check {
+ u64 bytenr;
+ u64 num_bytes;
+ int ref_mod;
+ enum btrfs_delayed_ref_action action;
+ u8 type;
+ u64 parent;
+ u64 root;
+ u64 owner;
+ u64 offset;
+};
+
+static enum btrfs_ref_type ref_type_from_disk_ref_type(u8 type)
+{
+ if ((type == BTRFS_TREE_BLOCK_REF_KEY) ||
+ (type == BTRFS_SHARED_BLOCK_REF_KEY))
+ return BTRFS_REF_METADATA;
+ return BTRFS_REF_DATA;
+}
+
+static void delete_delayed_ref_head(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_delayed_ref_unlock(head);
+ btrfs_put_delayed_ref_head(head);
+}
+
+static void delete_delayed_ref_node(struct btrfs_delayed_ref_head *head,
+ struct btrfs_delayed_ref_node *node)
+{
+ rb_erase_cached(&node->ref_node, &head->ref_tree);
+ RB_CLEAR_NODE(&node->ref_node);
+ if (!list_empty(&node->add_list))
+ list_del_init(&node->add_list);
+ btrfs_put_delayed_ref(node);
+}
+
+static int validate_ref_head(struct btrfs_delayed_ref_head *head,
+ struct ref_head_check *check)
+{
+ if (head->bytenr != check->bytenr) {
+ test_err("invalid bytenr have: %llu want: %llu", head->bytenr,
+ check->bytenr);
+ return -EINVAL;
+ }
+
+ if (head->num_bytes != check->num_bytes) {
+ test_err("invalid num_bytes have: %llu want: %llu",
+ head->num_bytes, check->num_bytes);
+ return -EINVAL;
+ }
+
+ if (head->ref_mod != check->ref_mod) {
+ test_err("invalid ref_mod have: %d want: %d", head->ref_mod,
+ check->ref_mod);
+ return -EINVAL;
+ }
+
+ if (head->total_ref_mod != check->total_ref_mod) {
+ test_err("invalid total_ref_mod have: %d want: %d",
+ head->total_ref_mod, check->total_ref_mod);
+ return -EINVAL;
+ }
+
+ if (head->must_insert_reserved != check->must_insert) {
+ test_err("invalid must_insert have: %d want: %d",
+ head->must_insert_reserved, check->must_insert);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_ref_node(struct btrfs_delayed_ref_node *node,
+ struct ref_node_check *check)
+{
+ if (node->bytenr != check->bytenr) {
+ test_err("invalid bytenr have: %llu want: %llu", node->bytenr,
+ check->bytenr);
+ return -EINVAL;
+ }
+
+ if (node->num_bytes != check->num_bytes) {
+ test_err("invalid num_bytes have: %llu want: %llu",
+ node->num_bytes, check->num_bytes);
+ return -EINVAL;
+ }
+
+ if (node->ref_mod != check->ref_mod) {
+ test_err("invalid ref_mod have: %d want: %d", node->ref_mod,
+ check->ref_mod);
+ return -EINVAL;
+ }
+
+ if (node->action != check->action) {
+ test_err("invalid action have: %d want: %d", node->action,
+ check->action);
+ return -EINVAL;
+ }
+
+ if (node->parent != check->parent) {
+ test_err("invalid parent have: %llu want: %llu", node->parent,
+ check->parent);
+ return -EINVAL;
+ }
+
+ if (node->ref_root != check->root) {
+ test_err("invalid root have: %llu want: %llu", node->ref_root,
+ check->root);
+ return -EINVAL;
+ }
+
+ if (node->type != check->type) {
+ test_err("invalid type have: %d want: %d", node->type,
+ check->type);
+ return -EINVAL;
+ }
+
+ if (btrfs_delayed_ref_owner(node) != check->owner) {
+ test_err("invalid owner have: %llu want: %llu",
+ btrfs_delayed_ref_owner(node), check->owner);
+ return -EINVAL;
+ }
+
+ if (btrfs_delayed_ref_offset(node) != check->offset) {
+ test_err("invalid offset have: %llu want: %llu",
+ btrfs_delayed_ref_offset(node), check->offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int simple_test(struct btrfs_trans_handle *trans,
+ struct ref_head_check *head_check,
+ struct ref_node_check *node_check)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = ref_type_from_disk_ref_type(node_check->type),
+ .action = node_check->action,
+ .parent = node_check->parent,
+ .ref_root = node_check->root,
+ .bytenr = node_check->bytenr,
+ .num_bytes = fs_info->nodesize,
+ };
+ int ret;
+
+ if (ref.type == BTRFS_REF_METADATA)
+ btrfs_init_tree_ref(&ref, node_check->owner, node_check->root,
+ false);
+ else
+ btrfs_init_data_ref(&ref, node_check->owner, node_check->offset,
+ node_check->root, true);
+
+ if (ref.type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, head_check))
+ goto out;
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, node_check))
+ goto out;
+ ret = 0;
+out:
+ btrfs_unselect_ref_head(delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+/*
+ * These are simple tests, make sure that our btrfs_ref's get turned into the
+ * appropriate btrfs_delayed_ref_node based on their settings and action.
+ */
+static int simple_tests(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .total_ref_mod = 1,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .type = BTRFS_TREE_BLOCK_REF_KEY,
+ .parent = 0,
+ .root = FAKE_ROOT_OBJECTID,
+ .owner = FAKE_LEVEL,
+ .offset = 0,
+ };
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add tree block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add extent data failed");
+ return -EINVAL;
+ }
+
+ node_check.parent = FAKE_PARENT;
+ node_check.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add shared block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_SHARED_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single add shared data failed");
+ return -EINVAL;
+ }
+
+ head_check.ref_mod = -1;
+ head_check.total_ref_mod = -1;
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.type = BTRFS_TREE_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+ node_check.parent = 0;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop tree block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop extent data failed");
+ return -EINVAL;
+ }
+
+ node_check.parent = FAKE_PARENT;
+ node_check.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ node_check.offset = 0;
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop shared block failed");
+ return -EINVAL;
+ }
+
+ node_check.type = BTRFS_SHARED_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+ if (simple_test(trans, &head_check, &node_check)) {
+ test_err("single drop shared data failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Merge tests, validate that we do delayed ref merging properly, the ref counts
+ * all end up properly, and delayed refs are deleted once they're no longer
+ * needed.
+ */
+static int merge_tests(struct btrfs_trans_handle *trans,
+ enum btrfs_ref_type type)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = type,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .parent = 0,
+ .ref_root = FAKE_ROOT_OBJECTID,
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ };
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 0,
+ .total_ref_mod = 0,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 2,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .parent = 0,
+ .root = FAKE_ROOT_OBJECTID,
+ };
+ int ret;
+
+ /*
+ * First add a ref and then drop it, make sure we get a head ref with a
+ * 0 total ref mod and no nodes.
+ */
+ if (type == BTRFS_REF_METADATA) {
+ node_check.type = BTRFS_TREE_BLOCK_REF_KEY;
+ node_check.owner = FAKE_LEVEL;
+ btrfs_init_tree_ref(&ref, FAKE_LEVEL, FAKE_ROOT_OBJECTID, false);
+ } else {
+ node_check.type = BTRFS_EXTENT_DATA_REF_KEY;
+ node_check.owner = FAKE_INO;
+ node_check.offset = FAKE_FILE_OFFSET;
+ btrfs_init_data_ref(&ref, FAKE_INO, FAKE_FILE_OFFSET,
+ FAKE_ROOT_OBJECTID, true);
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("single add and drop failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Add a ref, then add another ref, make sure we get a head ref with a
+ * 2 total ref mod and 1 node.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ head_check.ref_mod = 2;
+ head_check.total_ref_mod = 2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double add failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Add two drop refs, make sure they are merged properly. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ goto out;
+ }
+
+ head_check.ref_mod = -2;
+ head_check.total_ref_mod = -2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double drop failed");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Add multiple refs, then drop until we go negative again. */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 10; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 12; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = -2;
+ head_check.total_ref_mod = -2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("double drop failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /* Drop multiple refs, then add until we go positive again. */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 10; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 12; i++) {
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = 2;
+ head_check.total_ref_mod = 2;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("add and drop to positive failed");
+ goto out;
+ }
+
+ node_check.action = BTRFS_ADD_DELAYED_REF;
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Add a bunch of refs with different roots and parents, then drop them
+ * all, make sure everything is properly merged.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ for (int i = 0; i < 50; i++) {
+ if (!(i % 2)) {
+ ref.parent = 0;
+ ref.ref_root = FAKE_ROOT_OBJECTID + i;
+ } else {
+ ref.parent = FAKE_PARENT + (i * fs_info->nodesize);
+ }
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ for (int i = 0; i < 50; i++) {
+ if (!(i % 2)) {
+ ref.parent = 0;
+ ref.ref_root = FAKE_ROOT_OBJECTID + i;
+ } else {
+ ref.parent = FAKE_PARENT + (i * fs_info->nodesize);
+ }
+ if (type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ else
+ ret = btrfs_add_delayed_data_ref(trans, &ref, 0);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+ }
+
+ head = btrfs_select_ref_head(fs_info, &trans->transaction->delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ head_check.ref_mod = 0;
+ head_check.total_ref_mod = 0;
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("add and drop multiple failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (node) {
+ test_err("found node when none should exist");
+ goto out;
+ }
+ ret = 0;
+out:
+ if (!IS_ERR_OR_NULL(head))
+ btrfs_unselect_ref_head(&trans->transaction->delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+/*
+ * Basic test to validate we always get the add operations first followed by any
+ * delete operations.
+ */
+static int select_delayed_refs_test(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_ref ref = {
+ .type = BTRFS_REF_METADATA,
+ .action = BTRFS_DROP_DELAYED_REF,
+ .parent = 0,
+ .ref_root = FAKE_ROOT_OBJECTID,
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ };
+ struct ref_head_check head_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 0,
+ .total_ref_mod = 0,
+ };
+ struct ref_node_check node_check = {
+ .bytenr = FAKE_BYTENR,
+ .num_bytes = fs_info->nodesize,
+ .ref_mod = 1,
+ .action = BTRFS_ADD_DELAYED_REF,
+ .type = BTRFS_TREE_BLOCK_REF_KEY,
+ .parent = 0,
+ .owner = FAKE_LEVEL,
+ .offset = 0,
+ };
+ int ret;
+
+ /* Add the drop first. */
+ btrfs_init_tree_ref(&ref, FAKE_LEVEL, FAKE_ROOT_OBJECTID, false);
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ return ret;
+ }
+
+ /*
+ * Now add the add, and make it a different root so it's logically later
+ * in the rb tree.
+ */
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 1;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ head = NULL;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("head check failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.root = FAKE_ROOT_OBJECTID + 1;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+ delete_delayed_ref_head(trans, head);
+ head = NULL;
+
+ /*
+ * Now we're going to do the same thing, but we're going to have an add
+ * that gets deleted because of a merge, and make sure we still have
+ * another add in place.
+ */
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 1;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.ref_root = FAKE_ROOT_OBJECTID + 2;
+ ret = btrfs_add_delayed_tree_ref(trans, &ref, NULL);
+ if (ret) {
+ test_err("failed ref action %d", ret);
+ goto out;
+ }
+
+ head = btrfs_select_ref_head(fs_info, delayed_refs);
+ if (IS_ERR_OR_NULL(head)) {
+ if (IS_ERR(head))
+ test_err("failed to select delayed ref head: %ld",
+ PTR_ERR(head));
+ else
+ test_err("failed to find delayed ref head");
+ ret = -EINVAL;
+ head = NULL;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (validate_ref_head(head, &head_check)) {
+ test_err("head check failed");
+ goto out;
+ }
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_ADD_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID + 2;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+
+ spin_lock(&head->lock);
+ node = btrfs_select_delayed_ref(head);
+ spin_unlock(&head->lock);
+ if (!node) {
+ test_err("failed to select delayed ref");
+ goto out;
+ }
+
+ node_check.action = BTRFS_DROP_DELAYED_REF;
+ node_check.root = FAKE_ROOT_OBJECTID;
+ if (validate_ref_node(node, &node_check)) {
+ test_err("node check failed");
+ goto out;
+ }
+ delete_delayed_ref_node(head, node);
+ ret = 0;
+out:
+ if (head)
+ btrfs_unselect_ref_head(delayed_refs, head);
+ btrfs_destroy_delayed_refs(trans->transaction);
+ return ret;
+}
+
+int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_transaction *transaction;
+ struct btrfs_trans_handle trans;
+ struct btrfs_fs_info *fs_info;
+ int ret;
+
+ test_msg("running delayed refs tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+ transaction = kmalloc(sizeof(*transaction), GFP_KERNEL);
+ if (!transaction) {
+ test_std_err(TEST_ALLOC_TRANSACTION);
+ ret = -ENOMEM;
+ goto out_free_fs_info;
+ }
+ btrfs_init_dummy_trans(&trans, fs_info);
+ btrfs_init_dummy_transaction(transaction, fs_info);
+ trans.transaction = transaction;
+
+ ret = simple_tests(&trans);
+ if (!ret) {
+ test_msg("running delayed refs merg tests on metadata refs");
+ ret = merge_tests(&trans, BTRFS_REF_METADATA);
+ }
+
+ if (!ret) {
+ test_msg("running delayed refs merg tests on data refs");
+ ret = merge_tests(&trans, BTRFS_REF_DATA);
+ }
+
+ if (!ret)
+ ret = select_delayed_refs_test(&trans);
+
+out_free_fs_info:
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
diff --git a/fs/btrfs/tests/raid-stripe-tree-tests.c b/fs/btrfs/tests/raid-stripe-tree-tests.c
new file mode 100644
index 000000000000..a7bc58a5c1e2
--- /dev/null
+++ b/fs/btrfs/tests/raid-stripe-tree-tests.c
@@ -0,0 +1,1161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/sizes.h>
+#include "../fs.h"
+#include "../disk-io.h"
+#include "../transaction.h"
+#include "../volumes.h"
+#include "../raid-stripe-tree.h"
+#include "btrfs-tests.h"
+
+#define RST_TEST_NUM_DEVICES (2)
+#define RST_TEST_RAID1_TYPE (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_RAID1)
+
+#define SZ_48K (SZ_32K + SZ_16K)
+
+typedef int (*test_func_t)(struct btrfs_trans_handle *trans);
+
+static struct btrfs_device *btrfs_device_by_devid(struct btrfs_fs_devices *fs_devices,
+ u64 devid)
+{
+ struct btrfs_device *dev;
+
+ list_for_each_entry(dev, &fs_devices->devices, dev_list) {
+ if (dev->devid == devid)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/*
+ * Test creating a range of three extents and then punch a hole in the middle,
+ * deleting all of the middle extents and partially deleting the "book ends".
+ */
+static int test_punch_hole_3extents(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 len1 = SZ_1M;
+ u64 logical2 = logical1 + len1;
+ u64 len2 = SZ_1M;
+ u64 logical3 = logical2 + len2;
+ u64 len3 = SZ_1M;
+ u64 hole_start = logical1 + SZ_256K;
+ u64 hole_len = SZ_2M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+
+ /* Prepare for the test, 1st create 3 x 1M extents. */
+ bioc->map_type = map_type;
+ bioc->size = len1;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ bioc->size = len2;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical3;
+ bioc->size = len3;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical3 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ /*
+ * Delete a range starting at logical1 + 256K and 2M in length. Extent
+ * 1 is truncated to 256k length, extent 2 is completely dropped and
+ * extent 3 is moved 256K to the right.
+ */
+ ret = btrfs_delete_raid_extent(trans, hole_start, hole_len);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ hole_start, hole_start + hole_len);
+ goto out;
+ }
+
+ /* Get the first extent and check its size. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len1 != SZ_256K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_256K, len1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the second extent and check it's absent. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded should fail",
+ logical2, logical2 + len2);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the third extent and check its size. */
+ logical3 += SZ_256K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical3, &len3, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical3, logical3 + len3);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical3) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical3 + SZ_256K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len3 != SZ_1M - SZ_256K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_1M - SZ_256K, len3);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1, len1);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical3, len3);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+static int test_delete_two_extents(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 len1 = SZ_1M;
+ u64 logical2 = logical1 + len1;
+ u64 len2 = SZ_1M;
+ u64 logical3 = logical2 + len2;
+ u64 len3 = SZ_1M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+
+ /* Prepare for the test, 1st create 3 x 1M extents. */
+ bioc->map_type = map_type;
+ bioc->size = len1;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ bioc->size = len2;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical3;
+ bioc->size = len3;
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical3 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ /*
+ * Delete a range starting at logical1 and 2M in length. Extents 1
+ * and 2 are dropped and extent 3 is kept as is.
+ */
+ ret = btrfs_delete_raid_extent(trans, logical1, len1 + len2);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1 + len2);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical1, len1);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical2, len2);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical3, &len3, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical3, len3);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical3) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical3, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len3 != SZ_1M) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_1M, len3);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical3, len3);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/* Test punching a hole into a single RAID stripe-extent. */
+static int test_punch_hole(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 hole_start = logical1 + SZ_32K;
+ u64 hole_len = SZ_64K;
+ u64 logical2 = hole_start + hole_len;
+ u64 len = SZ_1M;
+ u64 len1 = SZ_32K;
+ u64 len2 = len - len1 - hole_len;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len, map_type, 0,
+ &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical1,
+ logical1 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_1M) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_1M, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, hole_start, hole_len);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ hole_start, hole_start + hole_len);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical1, logical1 + len1);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len1 != SZ_32K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_32K, len1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type,
+ 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical2,
+ logical2 + len2);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical2) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical2, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len2 != len - len1 - hole_len) {
+ test_err("invalid length, expected %llu, got %llu",
+ len - len1 - hole_len, len2);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check for the absence of the hole. */
+ ret = btrfs_get_raid_extent_offset(fs_info, hole_start, &hole_len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ ret = -EINVAL;
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ hole_start, hole_start + SZ_64K);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1, len1);
+ if (ret)
+ goto out;
+
+ ret = btrfs_delete_raid_extent(trans, logical2, len2);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 1M RST write that spans two adjacent RST items on disk and then
+ * delete a portion starting in the first item and spanning into the second
+ * item. This is similar to test_front_delete(), but spanning multiple items.
+ */
+static int test_front_delete_prev_item(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical1 = SZ_1M;
+ u64 logical2 = SZ_2M;
+ u64 len = SZ_1M;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ /* Insert RAID extent 1. */
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical1 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ bioc->logical = logical2;
+ /* Insert RAID extent 2, directly adjacent to it. */
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical2 + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical1 + SZ_512K, SZ_1M);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical1 + SZ_512K, (u64)SZ_1M);
+ goto out;
+ }
+
+ /* Verify item 1 is truncated to 512K. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len, map_type, 0,
+ &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical1,
+ logical1 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical1) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical1, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_512K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_512K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Verify item 2's start is moved by 512K. */
+ ret = btrfs_get_raid_extent_offset(fs_info, logical2 + SZ_512K, &len,
+ map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical2 + SZ_512K, logical2 + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical2 + SZ_512K) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical2 + SZ_512K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_512K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_512K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Verify there's a hole at [1M+512K, 2M+512K] . */
+ len = SZ_1M;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical1 + SZ_512K, &len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID [%llu, %llu] succeeded, should fail",
+ logical1 + SZ_512K, logical1 + SZ_512K + len);
+ goto out;
+ }
+
+ /* Clean up after us. */
+ ret = btrfs_delete_raid_extent(trans, logical1, SZ_512K);
+ if (ret)
+ goto out;
+
+ ret = btrfs_delete_raid_extent(trans, logical2 + SZ_512K, SZ_512K);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 64K RST write on a 2 disk RAID1 at a logical address of 1M and then
+ * delete the 1st 32K, making the new start address 1M+32K.
+ */
+static int test_front_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, SZ_16K);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + SZ_16K);
+ goto out;
+ }
+
+ len -= SZ_16K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical + SZ_16K, &len,
+ map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed",
+ logical + SZ_16K, logical + SZ_64K);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical + SZ_16K) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical + SZ_16K, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_48K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_48K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ ret = -EINVAL;
+ test_err("lookup of RAID extent [%llu, %llu] succeeded, should fail",
+ logical, logical + SZ_16K);
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical + SZ_16K, SZ_48K);
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 64K RST write on a 2 disk RAID1 at a logical address of 1M and then
+ * truncate the stripe extent down to 32K.
+ */
+static int test_tail_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ if (!io_stripe.dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical + SZ_48K, SZ_16K);
+ if (ret) {
+ test_err("deleting RAID extent [%llu, %llu] failed",
+ logical + SZ_48K, logical + SZ_64K);
+ goto out;
+ }
+
+ len = SZ_48K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_48K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_48K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len = SZ_16K;
+ ret = btrfs_get_raid_extent_offset(fs_info, logical + SZ_48K, &len,
+ map_type, 0, &io_stripe);
+ if (ret != -ENODATA) {
+ test_err("lookup of RAID extent [%llu, %llu] succeeded should fail",
+ logical + SZ_48K, logical + SZ_64K);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, len);
+ if (ret)
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a 64K RST write on a 2 disk RAID1 at a logical address of 1M and then
+ * overwrite the whole range giving it new physical address at an offset of 1G.
+ * The intent of this test is to exercise the 'update_raid_extent_item()'
+ * function called be btrfs_insert_one_raid_extent().
+ */
+static int test_create_update_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ bioc->map_type = map_type;
+ bioc->size = len;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ if (!io_stripe.dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = SZ_1G + logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("updating RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical + SZ_1G) {
+ test_err("invalid physical address, expected %llu, got %llu",
+ logical + SZ_1G, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu, got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, len);
+ if (ret)
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+/*
+ * Test a simple 64K RST write on a 2 disk RAID1 at a logical address of 1M.
+ * The "physical" copy on device 0 is at 1M, on device 1 it is at 1G+1M.
+ */
+static int test_simple_create_delete(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_io_context *bioc;
+ struct btrfs_io_stripe io_stripe = { 0 };
+ u64 map_type = RST_TEST_RAID1_TYPE;
+ u64 logical = SZ_1M;
+ u64 len = SZ_64K;
+ int ret;
+
+ bioc = alloc_btrfs_io_context(fs_info, logical, RST_TEST_NUM_DEVICES);
+ if (!bioc) {
+ test_std_err(TEST_ALLOC_IO_CONTEXT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ bioc->map_type = map_type;
+ bioc->size = SZ_64K;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_io_stripe *stripe = &bioc->stripes[i];
+
+ stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i);
+ if (!stripe->dev) {
+ test_err("cannot find device with devid %d", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stripe->physical = logical + i * SZ_1G;
+ }
+
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret) {
+ test_err("inserting RAID extent failed: %d", ret);
+ goto out;
+ }
+
+ io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0);
+ if (!io_stripe.dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_get_raid_extent_offset(fs_info, logical, &len, map_type, 0, &io_stripe);
+ if (ret) {
+ test_err("lookup of RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+ goto out;
+ }
+
+ if (io_stripe.physical != logical) {
+ test_err("invalid physical address, expected %llu got %llu",
+ logical, io_stripe.physical);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len != SZ_64K) {
+ test_err("invalid stripe length, expected %llu got %llu",
+ (u64)SZ_64K, len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_delete_raid_extent(trans, logical, len);
+ if (ret)
+ test_err("deleting RAID extent [%llu, %llu] failed", logical,
+ logical + len);
+
+out:
+ btrfs_put_bioc(bioc);
+ return ret;
+}
+
+static const test_func_t tests[] = {
+ test_simple_create_delete,
+ test_create_update_delete,
+ test_tail_delete,
+ test_front_delete,
+ test_front_delete_prev_item,
+ test_punch_hole,
+ test_punch_hole_3extents,
+ test_delete_two_extents,
+};
+
+static int run_test(test_func_t test, u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_trans_handle trans;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_root *root = NULL;
+ int ret;
+
+ fs_info = btrfs_alloc_dummy_fs_info(sectorsize, nodesize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ btrfs_set_super_incompat_flags(root->fs_info->super_copy,
+ BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE);
+ root->root_key.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ fs_info->stripe_root = root;
+ root->fs_info->tree_root = root;
+
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
+ if (IS_ERR(root->node)) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+ ret = PTR_ERR(root->node);
+ goto out;
+ }
+ btrfs_set_header_level(root->node, 0);
+ btrfs_set_header_nritems(root->node, 0);
+ root->alloc_bytenr += 2 * nodesize;
+
+ for (int i = 0; i < RST_TEST_NUM_DEVICES; i++) {
+ struct btrfs_device *dev;
+
+ dev = btrfs_alloc_dummy_device(fs_info);
+ if (IS_ERR(dev)) {
+ test_err("cannot allocate device");
+ ret = PTR_ERR(dev);
+ goto out;
+ }
+ dev->devid = i;
+ }
+
+ btrfs_init_dummy_trans(&trans, root->fs_info);
+ ret = test(&trans);
+ if (ret)
+ goto out;
+
+out:
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+
+ return ret;
+}
+
+int btrfs_test_raid_stripe_tree(u32 sectorsize, u32 nodesize)
+{
+ int ret = 0;
+
+ test_msg("running raid-stripe-tree tests");
+ for (int i = 0; i < ARRAY_SIZE(tests); i++) {
+ ret = run_test(tests[i], sectorsize, nodesize);
+ if (ret) {
+ test_err("test-case %ps failed with %d\n", tests[i], ret);
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 0fc873af891f..15312013f2a3 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -141,8 +141,7 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
WARN_ON(refcount_read(&transaction->use_count) == 0);
if (refcount_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
- WARN_ON(!RB_EMPTY_ROOT(
- &transaction->delayed_refs.href_root.rb_root));
+ WARN_ON(!xa_empty(&transaction->delayed_refs.head_refs));
WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
@@ -349,9 +348,8 @@ loop:
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
- cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
+ xa_init(&cur_trans->delayed_refs.head_refs);
xa_init(&cur_trans->delayed_refs.dirty_extents);
- atomic_set(&cur_trans->delayed_refs.num_entries, 0);
/*
* although the tree mod log is per file system and not per transaction,
@@ -797,8 +795,7 @@ alloc_fail:
if (num_bytes)
btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL);
if (delayed_refs_bytes)
- btrfs_space_info_free_bytes_may_use(fs_info, trans_rsv->space_info,
- delayed_refs_bytes);
+ btrfs_space_info_free_bytes_may_use(trans_rsv->space_info, delayed_refs_bytes);
reserve_fail:
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
return ERR_PTR(ret);
@@ -2052,7 +2049,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
spin_unlock(&fs_info->trans_lock);
- btrfs_cleanup_one_transaction(trans->transaction, fs_info);
+ btrfs_cleanup_one_transaction(trans->transaction);
spin_lock(&fs_info->trans_lock);
if (cur_trans == fs_info->running_transaction)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index dd9ce9b9f69e..9f7c777af635 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -33,7 +33,7 @@ struct btrfs_path;
*/
#define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1)
-/* Radix-tree tag for roots that are part of the trasaction. */
+/* Radix-tree tag for roots that are part of the transaction. */
#define BTRFS_ROOT_TRANS_TAG 0
enum btrfs_trans_state {
@@ -227,7 +227,21 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
delayed_refs->qgroup_to_skip = 0;
}
-bool __cold abort_should_print_stack(int error);
+/*
+ * We want the transaction abort to print stack trace only for errors where the
+ * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
+ * caused by external factors.
+ */
+static inline bool btrfs_abort_should_print_stack(int error)
+{
+ switch (error) {
+ case -EIO:
+ case -EROFS:
+ case -ENOMEM:
+ return false;
+ }
+ return true;
+}
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
@@ -240,7 +254,7 @@ do { \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
__first = true; \
- if (WARN(abort_should_print_stack(error), \
+ if (WARN(btrfs_abort_should_print_stack(error), \
KERN_ERR \
"BTRFS: Transaction aborted (error %d)\n", \
(error))) { \
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 7b50263723bc..43979891f7c8 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -764,22 +764,19 @@ static int check_block_group_item(struct extent_buffer *leaf,
return 0;
}
-__printf(4, 5)
+__printf(5, 6)
__cold
-static void chunk_err(const struct extent_buffer *leaf,
+static void chunk_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
const struct btrfs_chunk *chunk, u64 logical,
const char *fmt, ...)
{
- const struct btrfs_fs_info *fs_info = leaf->fs_info;
- bool is_sb;
+ bool is_sb = !leaf;
struct va_format vaf;
va_list args;
int i;
int slot = -1;
- /* Only superblock eb is able to have such small offset */
- is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
-
if (!is_sb) {
/*
* Get the slot number by iterating through all slots, this
@@ -812,13 +809,17 @@ static void chunk_err(const struct extent_buffer *leaf,
/*
* The common chunk check which could also work on super block sys chunk array.
*
+ * If @leaf is NULL, then @chunk must be an on-stack chunk item.
+ * (For superblock sys_chunk array, and fs_info->sectorsize is unreliable)
+ *
* Return -EUCLEAN if anything is corrupted.
* Return 0 if everything is OK.
*/
-int btrfs_check_chunk_valid(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 logical)
+int btrfs_check_chunk_valid(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
+ const struct btrfs_chunk *chunk, u64 logical,
+ u32 sectorsize)
{
- struct btrfs_fs_info *fs_info = leaf->fs_info;
u64 length;
u64 chunk_end;
u64 stripe_len;
@@ -826,63 +827,73 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
u16 sub_stripes;
u64 type;
u64 features;
+ u32 chunk_sector_size;
bool mixed = false;
int raid_index;
int nparity;
int ncopies;
- length = btrfs_chunk_length(leaf, chunk);
- stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
- num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
- type = btrfs_chunk_type(leaf, chunk);
+ if (leaf) {
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+ chunk_sector_size = btrfs_chunk_sector_size(leaf, chunk);
+ } else {
+ length = btrfs_stack_chunk_length(chunk);
+ stripe_len = btrfs_stack_chunk_stripe_len(chunk);
+ num_stripes = btrfs_stack_chunk_num_stripes(chunk);
+ sub_stripes = btrfs_stack_chunk_sub_stripes(chunk);
+ type = btrfs_stack_chunk_type(chunk);
+ chunk_sector_size = btrfs_stack_chunk_sector_size(chunk);
+ }
raid_index = btrfs_bg_flags_to_raid_index(type);
ncopies = btrfs_raid_array[raid_index].ncopies;
nparity = btrfs_raid_array[raid_index].nparity;
if (unlikely(!num_stripes)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes, have %u", num_stripes);
return -EUCLEAN;
}
if (unlikely(num_stripes < ncopies)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes < ncopies, have %u < %d",
num_stripes, ncopies);
return -EUCLEAN;
}
if (unlikely(nparity && num_stripes == nparity)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes == nparity, have %u == %d",
num_stripes, nparity);
return -EUCLEAN;
}
- if (unlikely(!IS_ALIGNED(logical, fs_info->sectorsize))) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(!IS_ALIGNED(logical, sectorsize))) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk logical, have %llu should aligned to %u",
- logical, fs_info->sectorsize);
+ logical, sectorsize);
return -EUCLEAN;
}
- if (unlikely(btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize)) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(chunk_sector_size != sectorsize)) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk sectorsize, have %u expect %u",
- btrfs_chunk_sector_size(leaf, chunk),
- fs_info->sectorsize);
+ chunk_sector_size, sectorsize);
return -EUCLEAN;
}
- if (unlikely(!length || !IS_ALIGNED(length, fs_info->sectorsize))) {
- chunk_err(leaf, chunk, logical,
+ if (unlikely(!length || !IS_ALIGNED(length, sectorsize))) {
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk length, have %llu", length);
return -EUCLEAN;
}
if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk logical start and length, have logical start %llu length %llu",
logical, length);
return -EUCLEAN;
}
if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk stripe length: %llu",
stripe_len);
return -EUCLEAN;
@@ -896,30 +907,29 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
* Thus it should be a good way to catch obvious bitflips.
*/
if (unlikely(length >= btrfs_stripe_nr_to_offset(U32_MAX))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"chunk length too large: have %llu limit %llu",
length, btrfs_stripe_nr_to_offset(U32_MAX));
return -EUCLEAN;
}
if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"unrecognized chunk type: 0x%llx",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
- BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- btrfs_chunk_type(leaf, chunk));
+ BTRFS_BLOCK_GROUP_PROFILE_MASK) & type);
return -EUCLEAN;
}
if (unlikely(!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EUCLEAN;
}
if (unlikely((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0)) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
type, BTRFS_BLOCK_GROUP_TYPE_MASK);
return -EUCLEAN;
@@ -928,7 +938,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
if (unlikely((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
(type & (BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_DATA)))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"system chunk with data or metadata type: 0x%llx",
type);
return -EUCLEAN;
@@ -941,7 +951,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
if (!mixed) {
if (unlikely((type & BTRFS_BLOCK_GROUP_METADATA) &&
(type & BTRFS_BLOCK_GROUP_DATA))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EUCLEAN;
}
@@ -963,7 +973,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
num_stripes != btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes))) {
- chunk_err(leaf, chunk, logical,
+ chunk_err(fs_info, leaf, chunk, logical,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
@@ -983,14 +993,15 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
struct btrfs_chunk *chunk,
struct btrfs_key *key, int slot)
{
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
int num_stripes;
if (unlikely(btrfs_item_size(leaf, slot) < sizeof(struct btrfs_chunk))) {
- chunk_err(leaf, chunk, key->offset,
+ chunk_err(fs_info, leaf, chunk, key->offset,
"invalid chunk item size: have %u expect [%zu, %u)",
btrfs_item_size(leaf, slot),
sizeof(struct btrfs_chunk),
- BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
+ BTRFS_LEAF_DATA_SIZE(fs_info));
return -EUCLEAN;
}
@@ -1001,14 +1012,15 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
if (unlikely(btrfs_chunk_item_size(num_stripes) !=
btrfs_item_size(leaf, slot))) {
- chunk_err(leaf, chunk, key->offset,
+ chunk_err(fs_info, leaf, chunk, key->offset,
"invalid chunk item size: have %u expect %lu",
btrfs_item_size(leaf, slot),
btrfs_chunk_item_size(num_stripes));
return -EUCLEAN;
}
out:
- return btrfs_check_chunk_valid(leaf, chunk, key->offset);
+ return btrfs_check_chunk_valid(fs_info, leaf, chunk, key->offset,
+ fs_info->sectorsize);
}
__printf(3, 4)
@@ -1527,6 +1539,11 @@ static int check_extent_item(struct extent_buffer *leaf,
dref_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_extent_data_ref_count(leaf, dref);
break;
/* Contains parent bytenr and ref count */
@@ -1539,6 +1556,11 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_shared_data_ref_count(leaf, sref);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
@@ -1611,8 +1633,18 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf,
{
u32 expect_item_size = 0;
- if (key->type == BTRFS_SHARED_DATA_REF_KEY)
+ if (key->type == BTRFS_SHARED_DATA_REF_KEY) {
+ struct btrfs_shared_data_ref *sref;
+
+ sref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref);
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
+
expect_item_size = sizeof(struct btrfs_shared_data_ref);
+ }
if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) {
generic_err(leaf, slot,
@@ -1689,6 +1721,11 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
offset, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
}
return 0;
}
@@ -2183,8 +2220,8 @@ int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner)
return 0;
}
-int btrfs_verify_level_key(struct extent_buffer *eb, int level,
- struct btrfs_key *first_key, u64 parent_transid)
+int btrfs_verify_level_key(struct extent_buffer *eb,
+ const struct btrfs_tree_parent_check *check)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
int found_level;
@@ -2192,16 +2229,16 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
int ret;
found_level = btrfs_header_level(eb);
- if (found_level != level) {
+ if (found_level != check->level) {
WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
KERN_ERR "BTRFS: tree level check failed\n");
btrfs_err(fs_info,
"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
- eb->start, level, found_level);
+ eb->start, check->level, found_level);
return -EIO;
}
- if (!first_key)
+ if (!check->has_first_key)
return 0;
/*
@@ -2226,15 +2263,15 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
btrfs_node_key_to_cpu(eb, &found_key, 0);
else
btrfs_item_key_to_cpu(eb, &found_key, 0);
- ret = btrfs_comp_cpu_keys(first_key, &found_key);
+ ret = btrfs_comp_cpu_keys(&check->first_key, &found_key);
if (ret) {
WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
KERN_ERR "BTRFS: tree first key check failed\n");
btrfs_err(fs_info,
"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
- eb->start, parent_transid, first_key->objectid,
- first_key->type, first_key->offset,
+ eb->start, check->transid, check->first_key.objectid,
+ check->first_key.type, check->first_key.offset,
found_key.objectid, found_key.type,
found_key.offset);
}
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
index 01669cfa6578..eb201f4ec3c7 100644
--- a/fs/btrfs/tree-checker.h
+++ b/fs/btrfs/tree-checker.h
@@ -10,6 +10,7 @@
#include <uapi/linux/btrfs_tree.h>
struct extent_buffer;
+struct btrfs_fs_info;
struct btrfs_chunk;
struct btrfs_key;
@@ -66,10 +67,12 @@ enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node);
int btrfs_check_leaf(struct extent_buffer *leaf);
int btrfs_check_node(struct extent_buffer *node);
-int btrfs_check_chunk_valid(struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 logical);
+int btrfs_check_chunk_valid(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
+ const struct btrfs_chunk *chunk, u64 logical,
+ u32 sectorsize);
int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner);
-int btrfs_verify_level_key(struct extent_buffer *eb, int level,
- struct btrfs_key *first_key, u64 parent_transid);
+int btrfs_verify_level_key(struct extent_buffer *eb,
+ const struct btrfs_tree_parent_check *check);
#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e2ed2a791f8f..955d1677e865 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -590,7 +590,6 @@ insert:
}
}
no_copy:
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
return 0;
}
@@ -1374,7 +1373,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
struct inode *inode = NULL;
unsigned long ref_ptr;
unsigned long ref_end;
- struct fscrypt_str name;
+ struct fscrypt_str name = { 0 };
int ret;
int log_ref_ver = 0;
u64 parent_objectid;
@@ -1845,7 +1844,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
struct btrfs_dir_item *di,
struct btrfs_key *key)
{
- struct fscrypt_str name;
+ struct fscrypt_str name = { 0 };
struct btrfs_dir_item *dir_dst_di;
struct btrfs_dir_item *index_dst_di;
bool dir_dst_matches = false;
@@ -2125,7 +2124,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
struct extent_buffer *eb;
int slot;
struct btrfs_dir_item *di;
- struct fscrypt_str name;
+ struct fscrypt_str name = { 0 };
struct inode *inode = NULL;
struct btrfs_key location;
@@ -3588,7 +3587,6 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
last_offset = max(last_offset, curr_end);
}
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
- btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
return 0;
}
@@ -4566,7 +4564,6 @@ copy_item:
dst_index++;
}
- btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
btrfs_release_path(dst_path);
out:
kfree(ins_data);
@@ -4776,7 +4773,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, &fi,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(fi));
- btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
@@ -6204,7 +6200,6 @@ static int log_delayed_deletions_full(struct btrfs_trans_handle *trans,
static int batch_delete_dir_index_items(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_path *path,
- struct btrfs_log_ctx *ctx,
const struct list_head *delayed_del_list,
const struct btrfs_delayed_item *first,
const struct btrfs_delayed_item **last_ret)
@@ -6265,7 +6260,7 @@ static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans,
if (ret < 0) {
return ret;
} else if (ret == 0) {
- ret = batch_delete_dir_index_items(trans, inode, path, ctx,
+ ret = batch_delete_dir_index_items(trans, inode, path,
delayed_del_list, curr,
&last);
if (ret)
diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c
index b382a4c443d4..1ac2678fc4ca 100644
--- a/fs/btrfs/tree-mod-log.c
+++ b/fs/btrfs/tree-mod-log.c
@@ -909,7 +909,6 @@ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
* is freed (its refcount is decremented).
*/
struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
struct extent_buffer *eb,
u64 time_seq)
{
diff --git a/fs/btrfs/tree-mod-log.h b/fs/btrfs/tree-mod-log.h
index 6308c577a4a4..1c12566040db 100644
--- a/fs/btrfs/tree-mod-log.h
+++ b/fs/btrfs/tree-mod-log.h
@@ -41,7 +41,6 @@ int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot,
enum btrfs_mod_log_op op);
int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb);
struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
struct extent_buffer *eb,
u64 time_seq);
struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index aca2861f2187..17b5e81123a1 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -140,8 +140,6 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
ret = 0;
subid_le = cpu_to_le64(subid_cpu);
write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
- btrfs_mark_buffer_dirty(trans, eb);
-
out:
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8f340ad1d938..0a0776489055 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -13,8 +13,8 @@
#include <linux/list_sort.h>
#include <linux/namei.h>
#include "misc.h"
-#include "ctree.h"
#include "disk-io.h"
+#include "extent-tree.h"
#include "transaction.h"
#include "volumes.h"
#include "raid56.h"
@@ -48,6 +48,7 @@ struct btrfs_io_geometry {
u64 raid56_full_stripe_start;
int max_errors;
enum btrfs_map_op op;
+ bool use_rst;
};
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
@@ -733,6 +734,118 @@ const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
}
/*
+ * We can have very weird soft links passed in.
+ * One example is "/proc/self/fd/<fd>", which can be a soft link to
+ * a block device.
+ *
+ * But it's never a good idea to use those weird names.
+ * Here we check if the path (not following symlinks) is a good one inside
+ * "/dev/".
+ */
+static bool is_good_dev_path(const char *dev_path)
+{
+ struct path path = { .mnt = NULL, .dentry = NULL };
+ char *path_buf = NULL;
+ char *resolved_path;
+ bool is_good = false;
+ int ret;
+
+ if (!dev_path)
+ goto out;
+
+ path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!path_buf)
+ goto out;
+
+ /*
+ * Do not follow soft link, just check if the original path is inside
+ * "/dev/".
+ */
+ ret = kern_path(dev_path, 0, &path);
+ if (ret)
+ goto out;
+ resolved_path = d_path(&path, path_buf, PATH_MAX);
+ if (IS_ERR(resolved_path))
+ goto out;
+ if (strncmp(resolved_path, "/dev/", strlen("/dev/")))
+ goto out;
+ is_good = true;
+out:
+ kfree(path_buf);
+ path_put(&path);
+ return is_good;
+}
+
+static int get_canonical_dev_path(const char *dev_path, char *canonical)
+{
+ struct path path = { .mnt = NULL, .dentry = NULL };
+ char *path_buf = NULL;
+ char *resolved_path;
+ int ret;
+
+ if (!dev_path) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!path_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = kern_path(dev_path, LOOKUP_FOLLOW, &path);
+ if (ret)
+ goto out;
+ resolved_path = d_path(&path, path_buf, PATH_MAX);
+ if (IS_ERR(resolved_path)) {
+ ret = PTR_ERR(resolved_path);
+ goto out;
+ }
+ ret = strscpy(canonical, resolved_path, PATH_MAX);
+out:
+ kfree(path_buf);
+ path_put(&path);
+ return ret;
+}
+
+static bool is_same_device(struct btrfs_device *device, const char *new_path)
+{
+ struct path old = { .mnt = NULL, .dentry = NULL };
+ struct path new = { .mnt = NULL, .dentry = NULL };
+ char *old_path = NULL;
+ bool is_same = false;
+ int ret;
+
+ if (!device->name)
+ goto out;
+
+ old_path = kzalloc(PATH_MAX, GFP_NOFS);
+ if (!old_path)
+ goto out;
+
+ rcu_read_lock();
+ ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX);
+ rcu_read_unlock();
+ if (ret < 0)
+ goto out;
+
+ ret = kern_path(old_path, LOOKUP_FOLLOW, &old);
+ if (ret)
+ goto out;
+ ret = kern_path(new_path, LOOKUP_FOLLOW, &new);
+ if (ret)
+ goto out;
+ if (path_equal(&old, &new))
+ is_same = true;
+out:
+ kfree(old_path);
+ path_put(&old);
+ path_put(&new);
+ return is_same;
+}
+
+/*
* Add new device to list of registered devices
*
* Returns:
@@ -852,7 +965,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
MAJOR(path_devt), MINOR(path_devt),
current->comm, task_pid_nr(current));
- } else if (!device->name || strcmp(device->name->str, path)) {
+ } else if (!device->name || !is_same_device(device, path)) {
/*
* When FS is already mounted.
* 1. If you are here and if the device->name is NULL that
@@ -1105,6 +1218,7 @@ static void btrfs_close_one_device(struct btrfs_device *device)
if (device->bdev) {
fs_devices->open_devices--;
device->bdev = NULL;
+ device->bdev_file = NULL;
}
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
btrfs_destroy_dev_zone_info(device);
@@ -1189,6 +1303,7 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
struct btrfs_device *tmp_device;
+ s64 __maybe_unused value = 0;
int ret = 0;
list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
@@ -1218,7 +1333,23 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
fs_devices->latest_dev = latest_dev;
fs_devices->total_rw_bytes = 0;
fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ fs_devices->rr_min_contig_read = BTRFS_DEFAULT_RR_MIN_CONTIG_READ;
+ fs_devices->read_devid = latest_dev->devid;
+ fs_devices->read_policy = btrfs_read_policy_to_enum(btrfs_get_mod_read_policy(),
+ &value);
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
+ fs_devices->collect_fs_stats = true;
+
+ if (value) {
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR)
+ fs_devices->rr_min_contig_read = value;
+ if (fs_devices->read_policy == BTRFS_READ_POLICY_DEVID)
+ fs_devices->read_devid = value;
+ }
+#else
fs_devices->read_policy = BTRFS_READ_POLICY_PID;
+#endif
return 0;
}
@@ -1382,12 +1513,23 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct file *bdev_file;
+ char *canonical_path = NULL;
u64 bytenr;
dev_t devt;
int ret;
lockdep_assert_held(&uuid_mutex);
+ if (!is_good_dev_path(path)) {
+ canonical_path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (canonical_path) {
+ ret = get_canonical_dev_path(path, canonical_path);
+ if (ret < 0) {
+ kfree(canonical_path);
+ canonical_path = NULL;
+ }
+ }
+ }
/*
* Avoid an exclusive open here, as the systemd-udev may initiate the
* device scan which may race with the user's mount or mkfs command,
@@ -1432,7 +1574,8 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
goto free_disk_super;
}
- device = device_list_add(path, disk_super, &new_device_added);
+ device = device_list_add(canonical_path ? : path, disk_super,
+ &new_device_added);
if (!IS_ERR(device) && new_device_added)
btrfs_free_stale_devices(device->devt, device);
@@ -1441,6 +1584,7 @@ free_disk_super:
error_bdev_put:
fput(bdev_file);
+ kfree(canonical_path);
return device;
}
@@ -1923,7 +2067,6 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_fsid(dev_item);
write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
ptr, BTRFS_FSID_SIZE);
- btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
out:
@@ -2619,11 +2762,9 @@ next_slot:
device = btrfs_find_device(fs_info->fs_devices, &args);
BUG_ON(!device); /* Logic error */
- if (device->fs_devices->seeding) {
+ if (device->fs_devices->seeding)
btrfs_set_device_generation(leaf, dev_item,
device->generation);
- btrfs_mark_buffer_dirty(trans, leaf);
- }
path->slots[0]++;
goto next_slot;
@@ -2720,8 +2861,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
set_blocksize(device->bdev_file, BTRFS_BDEV_BLOCKSIZE);
if (seeding_dev) {
- btrfs_clear_sb_rdonly(sb);
-
/* GFP_KERNEL allocation must not be under device_list_mutex */
seed_devices = btrfs_init_sprout(fs_info);
if (IS_ERR(seed_devices)) {
@@ -2864,8 +3003,6 @@ error_sysfs:
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
error_trans:
- if (seeding_dev)
- btrfs_set_sb_rdonly(sb);
if (trans)
btrfs_end_transaction(trans);
error_free_zone:
@@ -2920,8 +3057,6 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
- btrfs_mark_buffer_dirty(trans, leaf);
-
out:
btrfs_free_path(path);
return ret;
@@ -3630,10 +3765,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
btrfs_set_balance_meta(leaf, item, &disk_bargs);
btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
btrfs_set_balance_sys(leaf, item, &disk_bargs);
-
btrfs_set_balance_flags(leaf, item, bctl->flags);
-
- btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
err = btrfs_commit_transaction(trans);
@@ -5309,7 +5441,7 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
- /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
+ /* stripe_size is fixed in zoned filesystem. Reduce ndevs instead. */
if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
ctl->stripe_size) + ctl->nparity,
@@ -5395,33 +5527,34 @@ void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_ma
btrfs_free_chunk_map(map);
}
+static int btrfs_chunk_map_cmp(const struct rb_node *new,
+ const struct rb_node *exist)
+{
+ const struct btrfs_chunk_map *new_map =
+ rb_entry(new, struct btrfs_chunk_map, rb_node);
+ const struct btrfs_chunk_map *exist_map =
+ rb_entry(exist, struct btrfs_chunk_map, rb_node);
+
+ if (new_map->start == exist_map->start)
+ return 0;
+ if (new_map->start < exist_map->start)
+ return -1;
+ return 1;
+}
+
EXPORT_FOR_TESTS
int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
{
- struct rb_node **p;
- struct rb_node *parent = NULL;
- bool leftmost = true;
+ struct rb_node *exist;
write_lock(&fs_info->mapping_tree_lock);
- p = &fs_info->mapping_tree.rb_root.rb_node;
- while (*p) {
- struct btrfs_chunk_map *entry;
-
- parent = *p;
- entry = rb_entry(parent, struct btrfs_chunk_map, rb_node);
-
- if (map->start < entry->start) {
- p = &(*p)->rb_left;
- } else if (map->start > entry->start) {
- p = &(*p)->rb_right;
- leftmost = false;
- } else {
- write_unlock(&fs_info->mapping_tree_lock);
- return -EEXIST;
- }
+ exist = rb_find_add_cached(&map->rb_node, &fs_info->mapping_tree,
+ btrfs_chunk_map_cmp);
+
+ if (exist) {
+ write_unlock(&fs_info->mapping_tree_lock);
+ return -EEXIST;
}
- rb_link_node(&map->rb_node, parent, p);
- rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost);
chunk_map_device_set_bits(map, CHUNK_ALLOCATED);
chunk_map_device_clear_bits(map, CHUNK_TRIMMED);
write_unlock(&fs_info->mapping_tree_lock);
@@ -5841,23 +5974,75 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
return len;
}
-int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+static int btrfs_read_preferred(struct btrfs_chunk_map *map, int first, int num_stripes)
{
- struct btrfs_chunk_map *map;
- int ret = 0;
+ for (int index = first; index < first + num_stripes; index++) {
+ const struct btrfs_device *device = map->stripes[index].dev;
- if (!btrfs_fs_incompat(fs_info, RAID56))
- return 0;
+ if (device->devid == READ_ONCE(device->fs_devices->read_devid))
+ return index;
+ }
- map = btrfs_get_chunk_map(fs_info, logical, len);
+ /* If no read-preferred device is set use the first stripe. */
+ return first;
+}
- if (!WARN_ON(IS_ERR(map))) {
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
- ret = 1;
- btrfs_free_chunk_map(map);
+struct stripe_mirror {
+ u64 devid;
+ int num;
+};
+
+static int btrfs_cmp_devid(const void *a, const void *b)
+{
+ const struct stripe_mirror *s1 = (const struct stripe_mirror *)a;
+ const struct stripe_mirror *s2 = (const struct stripe_mirror *)b;
+
+ if (s1->devid < s2->devid)
+ return -1;
+ if (s1->devid > s2->devid)
+ return 1;
+ return 0;
+}
+
+/*
+ * Select a stripe for reading using the round-robin algorithm.
+ *
+ * 1. Compute the read cycle as the total sectors read divided by the minimum
+ * sectors per device.
+ * 2. Determine the stripe number for the current read by taking the modulus
+ * of the read cycle with the total number of stripes:
+ *
+ * stripe index = (total sectors / min sectors per dev) % num stripes
+ *
+ * The calculated stripe index is then used to select the corresponding device
+ * from the list of devices, which is ordered by devid.
+ */
+static int btrfs_read_rr(const struct btrfs_chunk_map *map, int first, int num_stripes)
+{
+ struct stripe_mirror stripes[BTRFS_RAID1_MAX_MIRRORS] = { 0 };
+ struct btrfs_device *device = map->stripes[first].dev;
+ struct btrfs_fs_info *fs_info = device->fs_devices->fs_info;
+ unsigned int read_cycle;
+ unsigned int total_reads;
+ unsigned int min_reads_per_dev;
+
+ total_reads = percpu_counter_sum(&fs_info->stats_read_blocks);
+ min_reads_per_dev = READ_ONCE(fs_info->fs_devices->rr_min_contig_read) >>
+ fs_info->sectorsize_bits;
+
+ for (int index = 0, i = first; i < first + num_stripes; i++) {
+ stripes[index].devid = map->stripes[i].dev->devid;
+ stripes[index].num = i;
+ index++;
}
- return ret;
+ sort(stripes, num_stripes, sizeof(struct stripe_mirror),
+ btrfs_cmp_devid, NULL);
+
+ read_cycle = total_reads / min_reads_per_dev;
+ return stripes[read_cycle % num_stripes].num;
}
+#endif
static int find_live_mirror(struct btrfs_fs_info *fs_info,
struct btrfs_chunk_map *map, int first,
@@ -5888,6 +6073,14 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
case BTRFS_READ_POLICY_PID:
preferred_mirror = first + (current->pid % num_stripes);
break;
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ case BTRFS_READ_POLICY_RR:
+ preferred_mirror = btrfs_read_rr(map, first, num_stripes);
+ break;
+ case BTRFS_READ_POLICY_DEVID:
+ preferred_mirror = btrfs_read_preferred(map, first, num_stripes);
+ break;
+#endif
}
if (dev_replace_is_ongoing &&
@@ -5919,9 +6112,9 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
return preferred_mirror;
}
-static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
- u64 logical,
- u16 total_stripes)
+EXPORT_FOR_TESTS
+struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
+ u64 logical, u16 total_stripes)
{
struct btrfs_io_context *bioc;
@@ -6246,8 +6439,7 @@ static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical,
{
dst->dev = map->stripes[io_geom->stripe_index].dev;
- if (io_geom->op == BTRFS_MAP_READ &&
- btrfs_need_stripe_tree_update(fs_info, map->type))
+ if (io_geom->op == BTRFS_MAP_READ && io_geom->use_rst)
return btrfs_get_raid_extent_offset(fs_info, logical, length,
map->type,
io_geom->stripe_index, dst);
@@ -6262,7 +6454,7 @@ static bool is_single_device_io(struct btrfs_fs_info *fs_info,
const struct btrfs_io_stripe *smap,
const struct btrfs_chunk_map *map,
int num_alloc_stripes,
- enum btrfs_map_op op, int mirror_num)
+ struct btrfs_io_geometry *io_geom)
{
if (!smap)
return false;
@@ -6270,10 +6462,10 @@ static bool is_single_device_io(struct btrfs_fs_info *fs_info,
if (num_alloc_stripes != 1)
return false;
- if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ)
+ if (io_geom->use_rst && io_geom->op != BTRFS_MAP_READ)
return false;
- if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)
+ if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && io_geom->mirror_num > 1)
return false;
return true;
@@ -6479,14 +6671,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
io_geom.raid56_full_stripe_start = (u64)-1;
max_len = btrfs_max_io_len(map, map_offset, &io_geom);
*length = min_t(u64, map->chunk_len - map_offset, max_len);
+ io_geom.use_rst = btrfs_need_stripe_tree_update(fs_info, map->type);
+
+ if (dev_replace->replace_task != current)
+ down_read(&dev_replace->rwsem);
- down_read(&dev_replace->rwsem);
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
/*
* Hold the semaphore for read during the whole operation, write is
* requested at commit time but must wait.
*/
- if (!dev_replace_is_ongoing)
+ if (!dev_replace_is_ongoing && dev_replace->replace_task != current)
up_read(&dev_replace->rwsem);
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
@@ -6545,8 +6740,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* physical block information on the stack instead of allocating an
* I/O context structure.
*/
- if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op,
- io_geom.mirror_num)) {
+ if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, &io_geom)) {
ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom);
if (mirror_num_ret)
*mirror_num_ret = io_geom.mirror_num;
@@ -6560,6 +6754,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
goto out;
}
bioc->map_type = map->type;
+ bioc->use_rst = io_geom.use_rst;
/*
* For RAID56 full map, we need to make sure the stripes[] follows the
@@ -6626,7 +6821,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
bioc->mirror_num = io_geom.mirror_num;
out:
- if (dev_replace_is_ongoing) {
+ if (dev_replace_is_ongoing && dev_replace->replace_task != current) {
lockdep_assert_held(&dev_replace->rwsem);
/* Unlock and let waiting writers proceed */
up_read(&dev_replace->rwsem);
@@ -6900,16 +7095,6 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
warn_32bit_meta_chunk(fs_info, logical, length, type);
#endif
- /*
- * Only need to verify chunk item if we're reading from sys chunk array,
- * as chunk item in tree block is already verified by tree-checker.
- */
- if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
- ret = btrfs_check_chunk_valid(leaf, chunk, logical);
- if (ret)
- return ret;
- }
-
map = btrfs_find_chunk_map(fs_info, logical, 1);
/* already mapped? */
@@ -7167,16 +7352,11 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
{
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct extent_buffer *sb;
- struct btrfs_disk_key *disk_key;
- struct btrfs_chunk *chunk;
u8 *array_ptr;
unsigned long sb_array_offset;
int ret = 0;
- u32 num_stripes;
u32 array_size;
- u32 len = 0;
u32 cur_offset;
- u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
@@ -7199,10 +7379,15 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
cur_offset = 0;
while (cur_offset < array_size) {
- disk_key = (struct btrfs_disk_key *)array_ptr;
- len = sizeof(*disk_key);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ struct btrfs_chunk *chunk;
+ struct btrfs_disk_key *disk_key = (struct btrfs_disk_key *)array_ptr;
+ u32 len = sizeof(*disk_key);
+
+ /*
+ * The sys_chunk_array has been already verified at super block
+ * read time. Only do ASSERT()s for basic checks.
+ */
+ ASSERT(cur_offset + len <= array_size);
btrfs_disk_key_to_cpu(&key, disk_key);
@@ -7210,44 +7395,14 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
sb_array_offset += len;
cur_offset += len;
- if (key.type != BTRFS_CHUNK_ITEM_KEY) {
- btrfs_err(fs_info,
- "unexpected item type %u in sys_array at offset %u",
- (u32)key.type, cur_offset);
- ret = -EIO;
- break;
- }
+ ASSERT(key.type == BTRFS_CHUNK_ITEM_KEY);
chunk = (struct btrfs_chunk *)sb_array_offset;
- /*
- * At least one btrfs_chunk with one stripe must be present,
- * exact stripe count check comes afterwards
- */
- len = btrfs_chunk_item_size(1);
- if (cur_offset + len > array_size)
- goto out_short_read;
-
- num_stripes = btrfs_chunk_num_stripes(sb, chunk);
- if (!num_stripes) {
- btrfs_err(fs_info,
- "invalid number of stripes %u in sys_array at offset %u",
- num_stripes, cur_offset);
- ret = -EIO;
- break;
- }
+ ASSERT(btrfs_chunk_type(sb, chunk) & BTRFS_BLOCK_GROUP_SYSTEM);
- type = btrfs_chunk_type(sb, chunk);
- if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
- btrfs_err(fs_info,
- "invalid chunk type %llu in sys_array at offset %u",
- type, cur_offset);
- ret = -EIO;
- break;
- }
+ len = btrfs_chunk_item_size(btrfs_chunk_num_stripes(sb, chunk));
- len = btrfs_chunk_item_size(num_stripes);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ ASSERT(cur_offset + len <= array_size);
ret = read_one_chunk(&key, sb, chunk);
if (ret)
@@ -7260,13 +7415,6 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return ret;
-
-out_short_read:
- btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
- len, cur_offset);
- clear_extent_buffer_uptodate(sb);
- free_extent_buffer_stale(sb);
- return -EIO;
}
/*
@@ -7466,8 +7614,6 @@ int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
struct btrfs_device *device;
int ret = 0;
- fs_devices->fs_info = fs_info;
-
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list)
device->fs_info = fs_info;
@@ -7643,8 +7789,6 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_set_dev_stats_value(eb, ptr, i,
btrfs_dev_stat_read(device, i));
- btrfs_mark_buffer_dirty(trans, eb);
-
out:
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 03d2d60afe0c..120f65e21eeb 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -30,6 +30,12 @@ struct btrfs_zoned_device_info;
#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
+/*
+ * Arbitratry maximum size of one discard request to limit potentially long time
+ * spent in blkdev_issue_discard().
+ */
+#define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G)
+
extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN SZ_64K
@@ -290,6 +296,9 @@ enum btrfs_chunk_allocation_policy {
BTRFS_CHUNK_ALLOC_ZONED,
};
+#define BTRFS_DEFAULT_RR_MIN_CONTIG_READ (SZ_256K)
+/* Keep in sync with raid_attr table, current maximum is RAID1C4. */
+#define BTRFS_RAID1_MAX_MIRRORS (4)
/*
* Read policies for mirrored block group profiles, read picks the stripe based
* on these policies.
@@ -297,10 +306,16 @@ enum btrfs_chunk_allocation_policy {
enum btrfs_read_policy {
/* Use process PID to choose the stripe */
BTRFS_READ_POLICY_PID,
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /* Balancing RAID1 reads across all striped devices (round-robin). */
+ BTRFS_READ_POLICY_RR,
+ /* Read from a specific device. */
+ BTRFS_READ_POLICY_DEVID,
+#endif
BTRFS_NR_READ_POLICY,
};
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
/*
* Checksum mode - offload it to workqueues or do it synchronously in
* btrfs_submit_chunk().
@@ -411,6 +426,8 @@ struct btrfs_fs_devices {
bool seeding;
/* The mount needs to use a randomly generated fsid. */
bool temp_fsid;
+ /* Enable/disable the filesystem stats tracking. */
+ bool collect_fs_stats;
struct btrfs_fs_info *fs_info;
/* sysfs kobjects */
@@ -424,7 +441,16 @@ struct btrfs_fs_devices {
/* Policy used to read the mirrored stripes. */
enum btrfs_read_policy read_policy;
-#ifdef CONFIG_BTRFS_DEBUG
+#ifdef CONFIG_BTRFS_EXPERIMENTAL
+ /*
+ * Minimum contiguous reads before switching to next device, the unit
+ * is one block/sectorsize.
+ */
+ u32 rr_min_contig_read;
+
+ /* Device to be used for reading in case of RAID1. */
+ u64 read_devid;
+
/* Checksum mode - offload it or do it synchronously. */
enum btrfs_offload_csum_mode offload_csum_mode;
#endif
@@ -479,6 +505,7 @@ struct btrfs_io_context {
struct bio *orig_bio;
atomic_t error;
u16 max_errors;
+ bool use_rst;
u64 logical;
u64 size;
@@ -735,8 +762,6 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
-int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
- u64 logical, u64 len);
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map);
@@ -834,4 +859,9 @@ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb);
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
+ u64 logical, u16 total_stripes);
+#endif
+
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index ce464cd8e0ac..3e0edbcf73e1 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -85,7 +85,6 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
{
struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
size_t name_len = strlen(name);
int ret = 0;
@@ -143,14 +142,14 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
*/
ret = 0;
btrfs_assert_tree_write_locked(path->nodes[0]);
- di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ di = btrfs_match_dir_item_name(path, name, name_len);
if (!di && !(flags & XATTR_REPLACE)) {
ret = -ENOSPC;
goto out;
}
} else if (ret == -EEXIST) {
ret = 0;
- di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ di = btrfs_match_dir_item_name(path, name, name_len);
ASSERT(di); /* logic error */
} else if (ret) {
goto out;
@@ -205,7 +204,6 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
btrfs_set_dir_data_len(leaf, di, size);
data_ptr = ((unsigned long)(di + 1)) + name_len;
write_extent_buffer(leaf, value, data_ptr, size);
- btrfs_mark_buffer_dirty(trans, leaf);
} else {
/*
* Insert, and we had space for the xattr, so path->slots[0] is
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 100abc00b794..c9e92c6941ec 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -174,10 +174,10 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
copy_page(workspace->buf + i * PAGE_SIZE,
data_in);
start += PAGE_SIZE;
- workspace->strm.avail_in =
- (in_buf_folios << PAGE_SHIFT);
}
workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = min(bytes_left,
+ in_buf_folios << PAGE_SHIFT);
} else {
unsigned int pg_off;
unsigned int cur_len;
@@ -194,7 +194,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
pg_off = offset_in_page(start);
cur_len = btrfs_calc_input_length(orig_end, start);
data_in = kmap_local_folio(in_folio, pg_off);
- start += PAGE_SIZE;
+ start += cur_len;
workspace->strm.next_in = data_in;
workspace->strm.avail_in = cur_len;
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 7fa2920632ba..73e0aa9fc08a 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -707,11 +707,14 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
* zoned mode. In this case, we don't have a valid max zone
* append size.
*/
- if (bdev_is_zoned(device->bdev)) {
- blk_stack_limits(lim,
- &bdev_get_queue(device->bdev)->limits,
- 0);
- }
+ if (bdev_is_zoned(device->bdev))
+ blk_stack_limits(lim, bdev_limits(device->bdev), 0);
+ }
+
+ ret = blk_validate_limits(lim);
+ if (ret) {
+ btrfs_err(fs_info, "zoned: failed to validate queue limits");
+ return ret;
}
/*
@@ -745,8 +748,9 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
(u64)lim->max_segments << PAGE_SHIFT),
fs_info->sectorsize);
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
- if (fs_info->max_zone_append_size < fs_info->max_extent_size)
- fs_info->max_extent_size = fs_info->max_zone_append_size;
+
+ fs_info->max_extent_size = min_not_zero(fs_info->max_extent_size,
+ fs_info->max_zone_append_size);
/*
* Check mount options here, because we might change fs_info->zoned
@@ -1340,7 +1344,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
switch (zone.cond) {
case BLK_ZONE_COND_OFFLINE:
case BLK_ZONE_COND_READONLY:
- btrfs_err(fs_info,
+ btrfs_err_in_rcu(fs_info,
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
(info->physical >> device->zone_info->zone_size_shift),
rcu_str_deref(device->name), device->devid);
@@ -1739,7 +1743,7 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio)
return false;
/*
- * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
+ * Using REQ_OP_ZONE_APPEND for relocation can break assumptions on the
* extent layout the relocation code has.
* Furthermore we have set aside own block-group from which only the
* relocation "process" can allocate and make sure only one process at a
@@ -1973,7 +1977,7 @@ int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
if (block_group->meta_write_pointer > eb->start)
return -EBUSY;
- /* If for_sync, this hole will be filled with trasnsaction commit. */
+ /* If for_sync, this hole will be filled with transaction commit. */
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
return -EAGAIN;
return -EBUSY;
@@ -2648,3 +2652,127 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
}
spin_unlock(&fs_info->zone_active_bgs_lock);
}
+
+/*
+ * Reset the zones of unused block groups from @space_info->bytes_zone_unusable.
+ *
+ * @space_info: the space to work on
+ * @num_bytes: targeting reclaim bytes
+ *
+ * This one resets the zones of a block group, so we can reuse the region
+ * without removing the block group. On the other hand, btrfs_delete_unused_bgs()
+ * just removes a block group and frees up the underlying zones. So, we still
+ * need to allocate a new block group to reuse the zones.
+ *
+ * Resetting is faster than deleting/recreating a block group. It is similar
+ * to freeing the logical space on the regular mode. However, we cannot change
+ * the block group's profile with this operation.
+ */
+int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes)
+{
+ struct btrfs_fs_info *fs_info = space_info->fs_info;
+ const sector_t zone_size_sectors = fs_info->zone_size >> SECTOR_SHIFT;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ while (num_bytes > 0) {
+ struct btrfs_chunk_map *map;
+ struct btrfs_block_group *bg = NULL;
+ bool found = false;
+ u64 reclaimed = 0;
+
+ /*
+ * Here, we choose a fully zone_unusable block group. It's
+ * technically possible to reset a partly zone_unusable block
+ * group, which still has some free space left. However,
+ * handling that needs to cope with the allocation side, which
+ * makes the logic more complex. So, let's handle the easy case
+ * for now.
+ */
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_for_each_entry(bg, &fs_info->unused_bgs, bg_list) {
+ if ((bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != space_info->flags)
+ continue;
+
+ /*
+ * Use trylock to avoid locking order violation. In
+ * btrfs_reclaim_bgs_work(), the lock order is
+ * &bg->lock -> &fs_info->unused_bgs_lock. We skip a
+ * block group if we cannot take its lock.
+ */
+ if (!spin_trylock(&bg->lock))
+ continue;
+ if (btrfs_is_block_group_used(bg) || bg->zone_unusable < bg->length) {
+ spin_unlock(&bg->lock);
+ continue;
+ }
+ spin_unlock(&bg->lock);
+ found = true;
+ break;
+ }
+ if (!found) {
+ spin_unlock(&fs_info->unused_bgs_lock);
+ return 0;
+ }
+
+ list_del_init(&bg->bg_list);
+ btrfs_put_block_group(bg);
+ spin_unlock(&fs_info->unused_bgs_lock);
+
+ /*
+ * Since the block group is fully zone_unusable and we cannot
+ * allocate from this block group anymore, we don't need to set
+ * this block group read-only.
+ */
+
+ down_read(&fs_info->dev_replace.rwsem);
+ map = bg->physical_map;
+ for (int i = 0; i < map->num_stripes; i++) {
+ struct btrfs_io_stripe *stripe = &map->stripes[i];
+ unsigned int nofs_flags;
+ int ret;
+
+ nofs_flags = memalloc_nofs_save();
+ ret = blkdev_zone_mgmt(stripe->dev->bdev, REQ_OP_ZONE_RESET,
+ stripe->physical >> SECTOR_SHIFT,
+ zone_size_sectors);
+ memalloc_nofs_restore(nofs_flags);
+
+ if (ret) {
+ up_read(&fs_info->dev_replace.rwsem);
+ return ret;
+ }
+ }
+ up_read(&fs_info->dev_replace.rwsem);
+
+ spin_lock(&space_info->lock);
+ spin_lock(&bg->lock);
+ ASSERT(!btrfs_is_block_group_used(bg));
+ if (bg->ro) {
+ spin_unlock(&bg->lock);
+ spin_unlock(&space_info->lock);
+ continue;
+ }
+
+ reclaimed = bg->alloc_offset;
+ bg->zone_unusable = bg->length - bg->zone_capacity;
+ bg->alloc_offset = 0;
+ /*
+ * This holds because we currently reset fully used then freed
+ * block group.
+ */
+ ASSERT(reclaimed == bg->zone_capacity);
+ bg->free_space_ctl->free_space += reclaimed;
+ space_info->bytes_zone_unusable -= reclaimed;
+ spin_unlock(&bg->lock);
+ btrfs_return_free_space(space_info, reclaimed);
+ spin_unlock(&space_info->lock);
+
+ if (num_bytes <= reclaimed)
+ break;
+ num_bytes -= reclaimed;
+ }
+
+ return 0;
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 7612e6572605..9672bf4c3335 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -96,6 +96,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, bool do_finish);
void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
+int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
@@ -265,6 +266,12 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
+static inline int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ return 0;
+}
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 866607fd3e58..5232b56d5892 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -111,6 +111,8 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
struct list_head *pos, *next;
+ ASSERT(timer == &wsm.timer);
+
spin_lock(&wsm.lock);
if (list_empty(&wsm.lru_list)) {
@@ -495,7 +497,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
/* Check if we need more input */
if (workspace->in_buf.pos == workspace->in_buf.size) {
- tot_in += PAGE_SIZE;
+ tot_in += workspace->in_buf.size;
kunmap_local(workspace->in_buf.src);
workspace->in_buf.src = NULL;
folio_put(in_folio);