diff options
| author | Jakub Kicinski <kuba@kernel.org> | 2025-09-11 17:37:09 -0700 | 
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2025-09-11 17:40:13 -0700 | 
| commit | fc3a2810412c163b5df1b377d332e048860f45db (patch) | |
| tree | 9eeb81c7f965176a32ca3062aefcc3532c637b01 /fs/btrfs | |
| parent | 5f790208d68fe1526c751dc2af366c7b552b8631 (diff) | |
| parent | db87bd2ad1f736c2f7ab231f9b40c885934f6b2c (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.17-rc6).
Conflicts:
net/netfilter/nft_set_pipapo.c
net/netfilter/nft_set_pipapo_avx2.c
  c4eaca2e1052 ("netfilter: nft_set_pipapo: don't check genbit from packetpath lookups")
  84c1da7b38d9 ("netfilter: nft_set_pipapo: use avx2 algorithm for insertions too")
Only trivial adjacent changes (in a doc and a Makefile).
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'fs/btrfs')
| -rw-r--r-- | fs/btrfs/extent_io.c | 40 | ||||
| -rw-r--r-- | fs/btrfs/inode.c | 12 | ||||
| -rw-r--r-- | fs/btrfs/qgroup.c | 6 | ||||
| -rw-r--r-- | fs/btrfs/super.c | 9 | ||||
| -rw-r--r-- | fs/btrfs/volumes.c | 5 | 
5 files changed, 56 insertions, 16 deletions
| diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c953297aa89a..b21cb72835cc 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -111,6 +111,24 @@ struct btrfs_bio_ctrl {  	 */  	unsigned long submit_bitmap;  	struct readahead_control *ractl; + +	/* +	 * The start offset of the last used extent map by a read operation. +	 * +	 * This is for proper compressed read merge. +	 * U64_MAX means we are starting the read and have made no progress yet. +	 * +	 * The current btrfs_bio_is_contig() only uses disk_bytenr as +	 * the condition to check if the read can be merged with previous +	 * bio, which is not correct. E.g. two file extents pointing to the +	 * same extent but with different offset. +	 * +	 * So here we need to do extra checks to only merge reads that are +	 * covered by the same extent map. +	 * Just extent_map::start will be enough, as they are unique +	 * inside the same inode. +	 */ +	u64 last_em_start;  };  static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) @@ -909,7 +927,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,   * return 0 on success, otherwise return error   */  static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, -		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start) +			     struct btrfs_bio_ctrl *bio_ctrl)  {  	struct inode *inode = folio->mapping->host;  	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); @@ -1019,12 +1037,11 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,  		 * non-optimal behavior (submitting 2 bios for the same extent).  		 */  		if (compress_type != BTRFS_COMPRESS_NONE && -		    prev_em_start && *prev_em_start != (u64)-1 && -		    *prev_em_start != em->start) +		    bio_ctrl->last_em_start != U64_MAX && +		    bio_ctrl->last_em_start != em->start)  			force_bio_submit = true; -		if (prev_em_start) -			*prev_em_start = em->start; +		bio_ctrl->last_em_start = em->start;  		btrfs_free_extent_map(em);  		em = NULL; @@ -1238,12 +1255,15 @@ int btrfs_read_folio(struct file *file, struct folio *folio)  	const u64 start = folio_pos(folio);  	const u64 end = start + folio_size(folio) - 1;  	struct extent_state *cached_state = NULL; -	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ }; +	struct btrfs_bio_ctrl bio_ctrl = { +		.opf = REQ_OP_READ, +		.last_em_start = U64_MAX, +	};  	struct extent_map *em_cached = NULL;  	int ret;  	lock_extents_for_read(inode, start, end, &cached_state); -	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL); +	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl);  	btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);  	btrfs_free_extent_map(em_cached); @@ -2583,7 +2603,8 @@ void btrfs_readahead(struct readahead_control *rac)  {  	struct btrfs_bio_ctrl bio_ctrl = {  		.opf = REQ_OP_READ | REQ_RAHEAD, -		.ractl = rac +		.ractl = rac, +		.last_em_start = U64_MAX,  	};  	struct folio *folio;  	struct btrfs_inode *inode = BTRFS_I(rac->mapping->host); @@ -2591,12 +2612,11 @@ void btrfs_readahead(struct readahead_control *rac)  	const u64 end = start + readahead_length(rac) - 1;  	struct extent_state *cached_state = NULL;  	struct extent_map *em_cached = NULL; -	u64 prev_em_start = (u64)-1;  	lock_extents_for_read(inode, start, end, &cached_state);  	while ((folio = readahead_folio(rac)) != NULL) -		btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start); +		btrfs_do_readpage(folio, &em_cached, &bio_ctrl);  	btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index dd82dcc7b2b7..e7218e78bff4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5696,7 +5696,17 @@ static void btrfs_del_inode_from_root(struct btrfs_inode *inode)  	bool empty = false;  	xa_lock(&root->inodes); -	entry = __xa_erase(&root->inodes, btrfs_ino(inode)); +	/* +	 * This btrfs_inode is being freed and has already been unhashed at this +	 * point. It's possible that another btrfs_inode has already been +	 * allocated for the same inode and inserted itself into the root, so +	 * don't delete it in that case. +	 * +	 * Note that this shouldn't need to allocate memory, so the gfp flags +	 * don't really matter. +	 */ +	entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL, +			     GFP_ATOMIC);  	if (entry == inode)  		empty = xa_empty(&root->inodes);  	xa_unlock(&root->inodes); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index ccaa9a3cf1ce..da102da169fd 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1455,6 +1455,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,  	struct btrfs_qgroup *qgroup;  	LIST_HEAD(qgroup_list);  	u64 num_bytes = src->excl; +	u64 num_bytes_cmpr = src->excl_cmpr;  	int ret = 0;  	qgroup = find_qgroup_rb(fs_info, ref_root); @@ -1466,11 +1467,12 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,  		struct btrfs_qgroup_list *glist;  		qgroup->rfer += sign * num_bytes; -		qgroup->rfer_cmpr += sign * num_bytes; +		qgroup->rfer_cmpr += sign * num_bytes_cmpr;  		WARN_ON(sign < 0 && qgroup->excl < num_bytes); +		WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);  		qgroup->excl += sign * num_bytes; -		qgroup->excl_cmpr += sign * num_bytes; +		qgroup->excl_cmpr += sign * num_bytes_cmpr;  		if (sign > 0)  			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a262b494a89f..df1f6cc3fe21 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -299,9 +299,12 @@ static int btrfs_parse_compress(struct btrfs_fs_context *ctx,  		btrfs_set_opt(ctx->mount_opt, COMPRESS);  		btrfs_clear_opt(ctx->mount_opt, NODATACOW);  		btrfs_clear_opt(ctx->mount_opt, NODATASUM); -	} else if (btrfs_match_compress_type(string, "lzo", false)) { +	} else if (btrfs_match_compress_type(string, "lzo", true)) {  		ctx->compress_type = BTRFS_COMPRESS_LZO; -		ctx->compress_level = 0; +		ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, +							       string + 3); +		if (string[3] == ':' && string[4]) +			btrfs_warn(NULL, "Compression level ignored for LZO");  		btrfs_set_opt(ctx->mount_opt, COMPRESS);  		btrfs_clear_opt(ctx->mount_opt, NODATACOW);  		btrfs_clear_opt(ctx->mount_opt, NODATASUM); @@ -1079,7 +1082,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)  			seq_printf(seq, ",compress-force=%s", compress_type);  		else  			seq_printf(seq, ",compress=%s", compress_type); -		if (info->compress_level) +		if (info->compress_level && info->compress_type != BTRFS_COMPRESS_LZO)  			seq_printf(seq, ":%d", info->compress_level);  	}  	if (btrfs_test_opt(info, NOSSD)) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index fa7a929a0461..c6e3efd6f602 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2722,6 +2722,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path  		goto error;  	} +	if (bdev_nr_bytes(file_bdev(bdev_file)) <= BTRFS_DEVICE_RANGE_RESERVED) { +		ret = -EINVAL; +		goto error; +	} +  	if (fs_devices->seeding) {  		seeding_dev = true;  		down_write(&sb->s_umount); | 
