From 544fe4a903ce71fb8ecbc159db6f245ef3f691fe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:34 +0100 Subject: btrfs: embed a btrfs_bio into struct compressed_bio Embed a btrfs_bio into struct compressed_bio. This avoids potential (so far theoretical) deadlocks due to nesting of btrfs_bioset allocations for the original read bio and the compressed bio, and avoids an extra memory allocation in the I/O path. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 146 +++++++++++++++++++++++-------------------------- 1 file changed, 68 insertions(+), 78 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f42f31f22d13..cd0cfa8fdb8c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -37,6 +37,8 @@ #include "file-item.h" #include "super.h" +struct bio_set btrfs_compressed_bioset; + static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; const char* btrfs_compress_type2str(enum btrfs_compression_type type) @@ -54,6 +56,24 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type) return NULL; } +static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio) +{ + return container_of(bbio, struct compressed_bio, bbio); +} + +static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, + u64 start, blk_opf_t op, + btrfs_bio_end_io_t end_io) +{ + struct btrfs_bio *bbio; + + bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op, + GFP_NOFS, &btrfs_compressed_bioset)); + btrfs_bio_init(bbio, inode, end_io, NULL); + bbio->file_offset = start; + return to_compressed_bio(bbio); +} + bool btrfs_compress_is_valid_type(const char *str, size_t len) { int i; @@ -143,14 +163,13 @@ static int btrfs_decompress_bio(struct compressed_bio *cb); static void end_compressed_bio_read(struct btrfs_bio *bbio) { - struct compressed_bio *cb = bbio->private; + struct compressed_bio *cb = to_compressed_bio(bbio); + blk_status_t status = bbio->bio.bi_status; unsigned int index; struct page *page; - if (bbio->bio.bi_status) - cb->status = bbio->bio.bi_status; - else - cb->status = errno_to_blk_status(btrfs_decompress_bio(cb)); + if (!status) + status = errno_to_blk_status(btrfs_decompress_bio(cb)); /* Release the compressed pages */ for (index = 0; index < cb->nr_pages; index++) { @@ -160,11 +179,10 @@ static void end_compressed_bio_read(struct btrfs_bio *bbio) } /* Do io completion on the original bio */ - btrfs_bio_end_io(btrfs_bio(cb->orig_bio), cb->status); + btrfs_bio_end_io(btrfs_bio(cb->orig_bio), status); /* Finally free the cb struct */ kfree(cb->compressed_pages); - kfree(cb); bio_put(&bbio->bio); } @@ -172,14 +190,14 @@ static void end_compressed_bio_read(struct btrfs_bio *bbio) * Clear the writeback bits on all of the file * pages for a compressed write */ -static noinline void end_compressed_writeback(struct inode *inode, - const struct compressed_bio *cb) +static noinline void end_compressed_writeback(const struct compressed_bio *cb) { + struct inode *inode = &cb->bbio.inode->vfs_inode; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); unsigned long index = cb->start >> PAGE_SHIFT; unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct folio_batch fbatch; - const int errno = blk_status_to_errno(cb->status); + const int errno = blk_status_to_errno(cb->bbio.bio.bi_status); int i; int ret; @@ -209,19 +227,18 @@ static noinline void end_compressed_writeback(struct inode *inode, static void finish_compressed_bio_write(struct compressed_bio *cb) { - struct inode *inode = cb->inode; unsigned int index; /* * Ok, we're the last bio for this extent, step one is to call back * into the FS and do all the end_io operations. */ - btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL, + btrfs_writepage_endio_finish_ordered(cb->bbio.inode, NULL, cb->start, cb->start + cb->len - 1, - cb->status == BLK_STS_OK); + cb->bbio.bio.bi_status == BLK_STS_OK); if (cb->writeback) - end_compressed_writeback(inode, cb); + end_compressed_writeback(cb); /* Note, our inode could be gone now */ /* @@ -237,7 +254,7 @@ static void finish_compressed_bio_write(struct compressed_bio *cb) /* Finally free the cb struct */ kfree(cb->compressed_pages); - kfree(cb); + bio_put(&cb->bbio.bio); } static void btrfs_finish_compressed_write_work(struct work_struct *work) @@ -257,13 +274,10 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work) */ static void end_compressed_bio_write(struct btrfs_bio *bbio) { - struct compressed_bio *cb = bbio->private; - struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); + struct compressed_bio *cb = to_compressed_bio(bbio); + struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; - cb->status = bbio->bio.bi_status; queue_work(fs_info->compressed_write_workers, &cb->write_end_work); - - bio_put(&bbio->bio); } /* @@ -275,7 +289,7 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio) * This also checksums the file bytes and gets things ready for * the end io hooks. */ -blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, +void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned int len, u64 disk_start, unsigned int compressed_len, struct page **compressed_pages, @@ -285,18 +299,21 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, bool writeback) { struct btrfs_fs_info *fs_info = inode->root->fs_info; - struct bio *bio = NULL; + struct bio *bio; struct compressed_bio *cb; u64 cur_disk_bytenr = disk_start; - blk_status_t ret = BLK_STS_OK; ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && IS_ALIGNED(len, fs_info->sectorsize)); - cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS); - if (!cb) - return BLK_STS_RESOURCE; - cb->status = BLK_STS_OK; - cb->inode = &inode->vfs_inode; + + if (blkcg_css) { + kthread_associate_blkcg(blkcg_css); + write_flags |= REQ_CGROUP_PUNT; + } + write_flags |= REQ_BTRFS_ONE_ORDERED; + + cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags, + end_compressed_bio_write); cb->start = start; cb->len = len; cb->compressed_pages = compressed_pages; @@ -305,16 +322,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; - if (blkcg_css) { - kthread_associate_blkcg(blkcg_css); - write_flags |= REQ_CGROUP_PUNT; - } - - write_flags |= REQ_BTRFS_ONE_ORDERED; - bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_WRITE | write_flags, - BTRFS_I(cb->inode), end_compressed_bio_write, cb); - bio->bi_iter.bi_sector = cur_disk_bytenr >> SECTOR_SHIFT; - btrfs_bio(bio)->file_offset = start; + bio = &cb->bbio.bio; + bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; while (cur_disk_bytenr < disk_start + compressed_len) { u64 offset = cur_disk_bytenr - disk_start; @@ -346,7 +355,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, btrfs_submit_bio(bio, 0); if (blkcg_css) kthread_associate_blkcg(NULL); - return ret; } static u64 bio_end_offset(struct bio *bio) @@ -515,11 +523,11 @@ static noinline int add_ra_bio_pages(struct inode *inode, * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ -void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, - int mirror_num) +void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct extent_map_tree *em_tree; + struct btrfs_inode *inode = btrfs_bio(bio)->inode; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_map_tree *em_tree = &inode->extent_tree; struct compressed_bio *cb; unsigned int compressed_len; struct bio *comp_bio; @@ -533,9 +541,6 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int memstall = 0; blk_status_t ret; int ret2; - int i; - - em_tree = &BTRFS_I(inode)->extent_tree; file_offset = bio_first_bvec_all(bio)->bv_offset + page_offset(bio_first_page_all(bio)); @@ -551,14 +556,11 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ASSERT(em->compress_type != BTRFS_COMPRESS_NONE); compressed_len = em->block_len; - cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS); - if (!cb) { - ret = BLK_STS_RESOURCE; - goto out; - } - cb->status = BLK_STS_OK; - cb->inode = inode; + cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ, + end_compressed_bio_read); + comp_bio = &cb->bbio.bio; + comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT; cb->start = em->orig_start; em_len = em->len; @@ -576,24 +578,21 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS); if (!cb->compressed_pages) { ret = BLK_STS_RESOURCE; - goto fail; + goto out_free_bio; } ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages); if (ret2) { ret = BLK_STS_RESOURCE; - goto fail; + goto out_free_compressed_pages; } - add_ra_bio_pages(inode, em_start + em_len, cb, &memstall, &pflags); + add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall, + &pflags); /* include any pages we added in add_ra-bio_pages */ cb->len = bio->bi_iter.bi_size; - comp_bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, BTRFS_I(cb->inode), - end_compressed_bio_read, cb); - comp_bio->bi_iter.bi_sector = (cur_disk_byte >> SECTOR_SHIFT); - while (cur_disk_byte < disk_bytenr + compressed_len) { u64 offset = cur_disk_byte - disk_bytenr; unsigned int index = offset >> PAGE_SHIFT; @@ -622,31 +621,17 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, if (memstall) psi_memstall_leave(&pflags); - /* - * Stash the initial offset of this chunk, as there is no direct - * correlation between compressed pages and the original file offset. - * The field is only used for printing error messages anyway. - */ - btrfs_bio(comp_bio)->file_offset = file_offset; - ASSERT(comp_bio->bi_iter.bi_size); btrfs_submit_bio(comp_bio, mirror_num); return; -fail: - if (cb->compressed_pages) { - for (i = 0; i < cb->nr_pages; i++) { - if (cb->compressed_pages[i]) - __free_page(cb->compressed_pages[i]); - } - } - +out_free_compressed_pages: kfree(cb->compressed_pages); - kfree(cb); -out: +out_free_bio: + bio_put(comp_bio); free_extent_map(em); +out: btrfs_bio_end_io(btrfs_bio(bio), ret); - return; } /* @@ -1062,6 +1047,10 @@ int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, int __init btrfs_init_compress(void) { + if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE, + offsetof(struct compressed_bio, bbio.bio), + BIOSET_NEED_BVECS)) + return -ENOMEM; btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); @@ -1075,6 +1064,7 @@ void __cold btrfs_exit_compress(void) btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); zstd_cleanup_workspace_manager(); + bioset_exit(&btrfs_compressed_bioset); } /* -- cgit v1.2.3 From 798c9fc74d034fca49031efb195e07e59bb926df Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:35 +0100 Subject: btrfs: remove redundant free_extent_map in btrfs_submit_compressed_read em can't be non-NULL after the free_extent_map label. Also remove the now pointless clearing of em to NULL after freeing it. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index cd0cfa8fdb8c..6fd9c6efe387 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -572,7 +572,6 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) cb->orig_bio = bio; free_extent_map(em); - em = NULL; cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS); @@ -629,7 +628,6 @@ out_free_compressed_pages: kfree(cb->compressed_pages); out_free_bio: bio_put(comp_bio); - free_extent_map(em); out: btrfs_bio_end_io(btrfs_bio(bio), ret); } -- cgit v1.2.3 From e7aff33e31610729f0c9c487f0e262cf96e98ebb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:36 +0100 Subject: btrfs: use the bbio file offset in btrfs_submit_compressed_read struct btrfs_bio now has a file_offset field set up by all submitters. Use that in btrfs_submit_compressed_read instead of recalculating the value. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6fd9c6efe387..f7b6c0baae80 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -533,7 +533,7 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) struct bio *comp_bio; const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; u64 cur_disk_byte = disk_bytenr; - u64 file_offset; + u64 file_offset = btrfs_bio(bio)->file_offset; u64 em_len; u64 em_start; struct extent_map *em; @@ -542,9 +542,6 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) blk_status_t ret; int ret2; - file_offset = bio_first_bvec_all(bio)->bv_offset + - page_offset(bio_first_page_all(bio)); - /* we need the actual starting offset of this extent in the file */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize); -- cgit v1.2.3 From d7294e4deeb9f3e7a41c759b3b0b2d28d80fbde2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:37 +0100 Subject: btrfs: use the bbio file offset in add_ra_bio_pages struct btrfs_bio now has a file_offset field set up by all submitters. Use that value combined with the bio size in add_ra_bio_pages to calculate the last offset in the bio. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f7b6c0baae80..6a6a6055774f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -357,13 +357,6 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, kthread_associate_blkcg(NULL); } -static u64 bio_end_offset(struct bio *bio) -{ - struct bio_vec *last = bio_last_bvec_all(bio); - - return page_offset(last->bv_page) + last->bv_len + last->bv_offset; -} - /* * Add extra pages in the same compressed file extent so that we don't need to * re-read the same extent again and again. @@ -382,7 +375,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); unsigned long end_index; - u64 cur = bio_end_offset(cb->orig_bio); + u64 cur = btrfs_bio(cb->orig_bio)->file_offset + cb->orig_bio->bi_iter.bi_size; u64 isize = i_size_read(inode); int ret; struct page *page; -- cgit v1.2.3 From 10e924bc320a956a62db768ee6e5c49af8f0b670 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:38 +0100 Subject: btrfs: factor out a btrfs_add_compressed_bio_pages helper Factor out a common helper to add the compressed_bio pages to the bio that is shared by the compressed read and write path. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 104 +++++++++++++++++++------------------------------ 1 file changed, 41 insertions(+), 63 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6a6a6055774f..89c9b39e663c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -280,6 +280,42 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio) queue_work(fs_info->compressed_write_workers, &cb->write_end_work); } +static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb, + u64 disk_bytenr) +{ + struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info; + struct bio *bio = &cb->bbio.bio; + u64 cur_disk_byte = disk_bytenr; + + bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; + while (cur_disk_byte < disk_bytenr + cb->compressed_len) { + u64 offset = cur_disk_byte - disk_bytenr; + unsigned int index = offset >> PAGE_SHIFT; + unsigned int real_size; + unsigned int added; + struct page *page = cb->compressed_pages[index]; + + /* + * We have various limit on the real read size: + * - page boundary + * - compressed length boundary + */ + real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset)); + real_size = min_t(u64, real_size, cb->compressed_len - offset); + ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); + + added = bio_add_page(bio, page, real_size, offset_in_page(offset)); + /* + * Maximum compressed extent is smaller than bio size limit, + * thus bio_add_page() should always success. + */ + ASSERT(added == real_size); + cur_disk_byte += added; + } + + ASSERT(bio->bi_iter.bi_size); +} + /* * worker function to build and submit bios for previously compressed pages. * The corresponding pages in the inode should be marked for writeback @@ -299,9 +335,7 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, bool writeback) { struct btrfs_fs_info *fs_info = inode->root->fs_info; - struct bio *bio; struct compressed_bio *cb; - u64 cur_disk_bytenr = disk_start; ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && IS_ALIGNED(len, fs_info->sectorsize)); @@ -322,37 +356,9 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; - bio = &cb->bbio.bio; - bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; - - while (cur_disk_bytenr < disk_start + compressed_len) { - u64 offset = cur_disk_bytenr - disk_start; - unsigned int index = offset >> PAGE_SHIFT; - unsigned int real_size; - unsigned int added; - struct page *page = compressed_pages[index]; - - /* - * We have various limits on the real read size: - * - page boundary - * - compressed length boundary - */ - real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset)); - real_size = min_t(u64, real_size, compressed_len - offset); - ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); - - added = bio_add_page(bio, page, real_size, offset_in_page(offset)); - /* - * Maximum compressed extent is smaller than bio size limit, - * thus bio_add_page() should always success. - */ - ASSERT(added == real_size); - cur_disk_bytenr += added; - } + btrfs_add_compressed_bio_pages(cb, disk_start); + btrfs_submit_bio(&cb->bbio.bio, 0); - /* Finished the range. */ - ASSERT(bio->bi_iter.bi_size); - btrfs_submit_bio(bio, 0); if (blkcg_css) kthread_associate_blkcg(NULL); } @@ -523,9 +529,7 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) struct extent_map_tree *em_tree = &inode->extent_tree; struct compressed_bio *cb; unsigned int compressed_len; - struct bio *comp_bio; const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; - u64 cur_disk_byte = disk_bytenr; u64 file_offset = btrfs_bio(bio)->file_offset; u64 em_len; u64 em_start; @@ -549,8 +553,6 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ, end_compressed_bio_read); - comp_bio = &cb->bbio.bio; - comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT; cb->start = em->orig_start; em_len = em->len; @@ -582,42 +584,18 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) /* include any pages we added in add_ra-bio_pages */ cb->len = bio->bi_iter.bi_size; - while (cur_disk_byte < disk_bytenr + compressed_len) { - u64 offset = cur_disk_byte - disk_bytenr; - unsigned int index = offset >> PAGE_SHIFT; - unsigned int real_size; - unsigned int added; - struct page *page = cb->compressed_pages[index]; - - /* - * We have various limit on the real read size: - * - page boundary - * - compressed length boundary - */ - real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset)); - real_size = min_t(u64, real_size, compressed_len - offset); - ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); - - added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset)); - /* - * Maximum compressed extent is smaller than bio size limit, - * thus bio_add_page() should always success. - */ - ASSERT(added == real_size); - cur_disk_byte += added; - } + btrfs_add_compressed_bio_pages(cb, disk_bytenr); if (memstall) psi_memstall_leave(&pflags); - ASSERT(comp_bio->bi_iter.bi_size); - btrfs_submit_bio(comp_bio, mirror_num); + btrfs_submit_bio(&cb->bbio.bio, mirror_num); return; out_free_compressed_pages: kfree(cb->compressed_pages); out_free_bio: - bio_put(comp_bio); + bio_put(&cb->bbio.bio); out: btrfs_bio_end_io(btrfs_bio(bio), ret); } -- cgit v1.2.3 From 32586c5bca72bfd2875f0c66032e134ce1c68680 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:39 +0100 Subject: btrfs: factor out a btrfs_free_compressed_pages helper Share the code to free the compressed pages and the array to hold them into a common helper. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 41 +++++++++++++---------------------------- 1 file changed, 13 insertions(+), 28 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 89c9b39e663c..5f64a775f1fd 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -159,30 +159,29 @@ static int compression_decompress(int type, struct list_head *ws, } } +static void btrfs_free_compressed_pages(struct compressed_bio *cb) +{ + for (unsigned int i = 0; i < cb->nr_pages; i++) { + struct page *page = cb->compressed_pages[i]; + + page->mapping = NULL; + put_page(page); + } + kfree(cb->compressed_pages); +} + static int btrfs_decompress_bio(struct compressed_bio *cb); static void end_compressed_bio_read(struct btrfs_bio *bbio) { struct compressed_bio *cb = to_compressed_bio(bbio); blk_status_t status = bbio->bio.bi_status; - unsigned int index; - struct page *page; if (!status) status = errno_to_blk_status(btrfs_decompress_bio(cb)); - /* Release the compressed pages */ - for (index = 0; index < cb->nr_pages; index++) { - page = cb->compressed_pages[index]; - page->mapping = NULL; - put_page(page); - } - - /* Do io completion on the original bio */ + btrfs_free_compressed_pages(cb); btrfs_bio_end_io(btrfs_bio(cb->orig_bio), status); - - /* Finally free the cb struct */ - kfree(cb->compressed_pages); bio_put(&bbio->bio); } @@ -227,8 +226,6 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb) static void finish_compressed_bio_write(struct compressed_bio *cb) { - unsigned int index; - /* * Ok, we're the last bio for this extent, step one is to call back * into the FS and do all the end_io operations. @@ -241,19 +238,7 @@ static void finish_compressed_bio_write(struct compressed_bio *cb) end_compressed_writeback(cb); /* Note, our inode could be gone now */ - /* - * Release the compressed pages, these came from alloc_page and - * are not attached to the inode at all - */ - for (index = 0; index < cb->nr_pages; index++) { - struct page *page = cb->compressed_pages[index]; - - page->mapping = NULL; - put_page(page); - } - - /* Finally free the cb struct */ - kfree(cb->compressed_pages); + btrfs_free_compressed_pages(cb); bio_put(&cb->bbio.bio); } -- cgit v1.2.3 From a959a1745d333fbce1e21895ac6d833a77213112 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:40 +0100 Subject: btrfs: don't clear page->mapping in btrfs_free_compressed_pages No one ever set ->mapping on these pages, so don't bother clearing it. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 5f64a775f1fd..6a23d6cc29aa 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -161,12 +161,8 @@ static int compression_decompress(int type, struct list_head *ws, static void btrfs_free_compressed_pages(struct compressed_bio *cb) { - for (unsigned int i = 0; i < cb->nr_pages; i++) { - struct page *page = cb->compressed_pages[i]; - - page->mapping = NULL; - put_page(page); - } + for (unsigned int i = 0; i < cb->nr_pages; i++) + put_page(cb->compressed_pages[i]); kfree(cb->compressed_pages); } -- cgit v1.2.3 From f9327a70c12c362b15c62b011332e22d242cf009 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Feb 2023 08:48:41 +0100 Subject: btrfs: fold finish_compressed_bio_write into btrfs_finish_compressed_write_work Fold finish_compressed_bio_write into its only caller as there is no reason to keep them separate. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6a23d6cc29aa..5b1de1c19991 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -220,8 +220,11 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb) /* the inode may be gone now */ } -static void finish_compressed_bio_write(struct compressed_bio *cb) +static void btrfs_finish_compressed_write_work(struct work_struct *work) { + struct compressed_bio *cb = + container_of(work, struct compressed_bio, write_end_work); + /* * Ok, we're the last bio for this extent, step one is to call back * into the FS and do all the end_io operations. @@ -238,14 +241,6 @@ static void finish_compressed_bio_write(struct compressed_bio *cb) bio_put(&cb->bbio.bio); } -static void btrfs_finish_compressed_write_work(struct work_struct *work) -{ - struct compressed_bio *cb = - container_of(work, struct compressed_bio, write_end_work); - - finish_compressed_bio_write(cb); -} - /* * Do the cleanup once all the compressed pages hit the disk. This will clear * writeback on the file pages and free the compressed pages. -- cgit v1.2.3 From 7edb9a3e72009917602f80f1c01f2337a103e7e0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 7 Mar 2023 17:39:38 +0100 Subject: btrfs: move zero filling of compressed read bios into common code All algorithms have to fill the remainder of the orig_bio with zeroes, so do it in common code. Reviewed-by: Anand Jain Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/compression.c | 2 ++ fs/btrfs/lzo.c | 14 +++++--------- fs/btrfs/zlib.c | 2 -- fs/btrfs/zstd.c | 1 - 4 files changed, 7 insertions(+), 12 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 5b1de1c19991..64c804dc3962 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -965,6 +965,8 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) ret = compression_decompress_bio(workspace, cb); put_workspace(type, workspace); + if (!ret) + zero_fill_bio(cb->orig_bio); return ret; } diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index dc66ee98989e..3a095b9c6373 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -389,8 +389,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) */ btrfs_err(fs_info, "unexpectedly large lzo segment len %u", seg_len); - ret = -EIO; - goto out; + return -EIO; } /* Copy the compressed segment payload into workspace */ @@ -401,8 +400,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) workspace->buf, &out_len); if (ret != LZO_E_OK) { btrfs_err(fs_info, "failed to decompress"); - ret = -EIO; - goto out; + return -EIO; } /* Copy the data into inode pages */ @@ -411,7 +409,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) /* All data read, exit */ if (ret == 0) - goto out; + return 0; ret = 0; /* Check if the sector has enough space for a segment header */ @@ -422,10 +420,8 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) /* Skip the padding zeros */ cur_in += sector_bytes_left; } -out: - if (!ret) - zero_fill_bio(cb->orig_bio); - return ret; + + return 0; } int lzo_decompress(struct list_head *ws, const u8 *data_in, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index da7bb9187b68..8acb05e176c5 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -350,8 +350,6 @@ done: zlib_inflateEnd(&workspace->strm); if (data_in) kunmap_local(data_in); - if (!ret) - zero_fill_bio(cb->orig_bio); return ret; } diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index e34f1ab99d56..f798da267590 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -609,7 +609,6 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) } } ret = 0; - zero_fill_bio(cb->orig_bio); done: if (workspace->in_buf.src) kunmap_local(workspace->in_buf.src); -- cgit v1.2.3 From ae42a154ca8972739be29f811a69bef6c4818a26 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 7 Mar 2023 17:39:39 +0100 Subject: btrfs: pass a btrfs_bio to btrfs_submit_bio btrfs_submit_bio expects the bio passed to it to be embedded into a btrfs_bio structure. Pass the btrfs_bio directly to increase type safety and make the code self-documenting. Reviewed-by: Anand Jain Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/bio.c | 14 +++++++------- fs/btrfs/bio.h | 2 +- fs/btrfs/compression.c | 4 ++-- fs/btrfs/extent_io.c | 2 +- fs/btrfs/inode.c | 6 +++--- 5 files changed, 14 insertions(+), 14 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 726592868e9c..c04e103f8768 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -164,7 +164,7 @@ static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, goto done; } - btrfs_submit_bio(&repair_bbio->bio, mirror); + btrfs_submit_bio(repair_bbio, mirror); return; } @@ -232,7 +232,7 @@ static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror); - btrfs_submit_bio(repair_bio, mirror); + btrfs_submit_bio(repair_bbio, mirror); return fbio; } @@ -603,12 +603,12 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, return true; } -static bool btrfs_submit_chunk(struct bio *bio, int mirror_num) +static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) { - struct btrfs_bio *bbio = btrfs_bio(bio); struct btrfs_inode *inode = bbio->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_bio *orig_bbio = bbio; + struct bio *bio = &bbio->bio; u64 logical = bio->bi_iter.bi_sector << 9; u64 length = bio->bi_iter.bi_size; u64 map_length = length; @@ -650,7 +650,7 @@ static bool btrfs_submit_chunk(struct bio *bio, int mirror_num) if (use_append) { bio->bi_opf &= ~REQ_OP_WRITE; bio->bi_opf |= REQ_OP_ZONE_APPEND; - ret = btrfs_extract_ordered_extent(btrfs_bio(bio)); + ret = btrfs_extract_ordered_extent(bbio); if (ret) goto fail_put_bio; } @@ -686,9 +686,9 @@ fail: return true; } -void btrfs_submit_bio(struct bio *bio, int mirror_num) +void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) { - while (!btrfs_submit_chunk(bio, mirror_num)) + while (!btrfs_submit_chunk(bbio, mirror_num)) ; } diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 873ff85817f0..b4e7d5ab7d23 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -88,7 +88,7 @@ static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) /* Bio only refers to one ordered extent. */ #define REQ_BTRFS_ONE_ORDERED REQ_DRV -void btrfs_submit_bio(struct bio *bio, int mirror_num); +void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num); int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, u64 length, u64 logical, struct page *page, unsigned int pg_offset, int mirror_num); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 64c804dc3962..27bea05cab1a 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -333,7 +333,7 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, cb->nr_pages = nr_pages; btrfs_add_compressed_bio_pages(cb, disk_start); - btrfs_submit_bio(&cb->bbio.bio, 0); + btrfs_submit_bio(&cb->bbio, 0); if (blkcg_css) kthread_associate_blkcg(NULL); @@ -565,7 +565,7 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) if (memstall) psi_memstall_leave(&pflags); - btrfs_submit_bio(&cb->bbio.bio, mirror_num); + btrfs_submit_bio(&cb->bbio, mirror_num); return; out_free_compressed_pages: diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 924fcb6c97e8..2e594252af01 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -157,7 +157,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) btrfs_submit_compressed_read(bio, mirror_num); else - btrfs_submit_bio(bio, mirror_num); + btrfs_submit_bio(btrfs_bio(bio), mirror_num); /* The bio is owned by the end_io handler now */ bio_ctrl->bio = NULL; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 25b675cee216..b5a82d22dbd1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7761,7 +7761,7 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, dip->bytes = bio->bi_iter.bi_size; dio_data->submitted += bio->bi_iter.bi_size; - btrfs_submit_bio(bio, 0); + btrfs_submit_bio(bbio, 0); } static const struct iomap_ops btrfs_dio_iomap_ops = { @@ -9941,7 +9941,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, if (bio_add_page(bio, pages[i], bytes, 0) < bytes) { atomic_inc(&priv.pending); - btrfs_submit_bio(bio, 0); + btrfs_submit_bio(btrfs_bio(bio), 0); bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, btrfs_encoded_read_endio, &priv); @@ -9955,7 +9955,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, } while (disk_io_size); atomic_inc(&priv.pending); - btrfs_submit_bio(bio, 0); + btrfs_submit_bio(btrfs_bio(bio), 0); if (atomic_dec_return(&priv.pending)) io_wait_event(priv.wait, !atomic_read(&priv.pending)); -- cgit v1.2.3 From 690834e47cf7868a4c13e32ea2332d9fe6590073 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 7 Mar 2023 17:39:40 +0100 Subject: btrfs: pass a btrfs_bio to btrfs_submit_compressed_read btrfs_submit_compressed_read expects the bio passed to it to be embedded into a btrfs_bio structure. Pass the btrfs_bio directly to increase type safety and make the code self-documenting. Reviewed-by: Anand Jain Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/compression.c | 16 ++++++++-------- fs/btrfs/compression.h | 2 +- fs/btrfs/extent_io.c | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 27bea05cab1a..c12e317e1336 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -498,15 +498,15 @@ static noinline int add_ra_bio_pages(struct inode *inode, * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ -void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) +void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) { - struct btrfs_inode *inode = btrfs_bio(bio)->inode; + struct btrfs_inode *inode = bbio->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *em_tree = &inode->extent_tree; struct compressed_bio *cb; unsigned int compressed_len; - const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; - u64 file_offset = btrfs_bio(bio)->file_offset; + const u64 disk_bytenr = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; + u64 file_offset = bbio->file_offset; u64 em_len; u64 em_start; struct extent_map *em; @@ -534,10 +534,10 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) em_len = em->len; em_start = em->start; - cb->len = bio->bi_iter.bi_size; + cb->len = bbio->bio.bi_iter.bi_size; cb->compressed_len = compressed_len; cb->compress_type = em->compress_type; - cb->orig_bio = bio; + cb->orig_bio = &bbio->bio; free_extent_map(em); @@ -558,7 +558,7 @@ void btrfs_submit_compressed_read(struct bio *bio, int mirror_num) &pflags); /* include any pages we added in add_ra-bio_pages */ - cb->len = bio->bi_iter.bi_size; + cb->len = bbio->bio.bi_iter.bi_size; btrfs_add_compressed_bio_pages(cb, disk_bytenr); @@ -573,7 +573,7 @@ out_free_compressed_pages: out_free_bio: bio_put(&cb->bbio.bio); out: - btrfs_bio_end_io(btrfs_bio(bio), ret); + btrfs_bio_end_io(bbio, ret); } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 95d2e85c6e4e..692bafa1050e 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -94,7 +94,7 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, blk_opf_t write_flags, struct cgroup_subsys_state *blkcg_css, bool writeback); -void btrfs_submit_compressed_read(struct bio *bio, int mirror_num); +void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num); unsigned int btrfs_compress_str2level(unsigned int type, const char *str); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2e594252af01..2b9e24782b36 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -155,7 +155,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) if (btrfs_op(bio) == BTRFS_MAP_READ && bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) - btrfs_submit_compressed_read(bio, mirror_num); + btrfs_submit_compressed_read(btrfs_bio(bio), mirror_num); else btrfs_submit_bio(btrfs_bio(bio), mirror_num); -- cgit v1.2.3 From b7d463a1d1252c2cd5e9f13c008eb49b8a5f75af Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 7 Mar 2023 17:39:41 +0100 Subject: btrfs: store a pointer to the original btrfs_bio in struct compressed_bio The original bio must be a btrfs_bio, so store a pointer to the btrfs_bio for better type checking. Reviewed-by: Anand Jain Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/compression.c | 15 ++++++++------- fs/btrfs/compression.h | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index c12e317e1336..c5839d04690d 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -177,7 +177,7 @@ static void end_compressed_bio_read(struct btrfs_bio *bbio) status = errno_to_blk_status(btrfs_decompress_bio(cb)); btrfs_free_compressed_pages(cb); - btrfs_bio_end_io(btrfs_bio(cb->orig_bio), status); + btrfs_bio_end_io(cb->orig_bbio, status); bio_put(&bbio->bio); } @@ -357,7 +357,8 @@ static noinline int add_ra_bio_pages(struct inode *inode, { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); unsigned long end_index; - u64 cur = btrfs_bio(cb->orig_bio)->file_offset + cb->orig_bio->bi_iter.bi_size; + struct bio *orig_bio = &cb->orig_bbio->bio; + u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; u64 isize = i_size_read(inode); int ret; struct page *page; @@ -447,7 +448,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, */ if (!em || cur < em->start || (cur + fs_info->sectorsize > extent_map_end(em)) || - (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { + (em->block_start >> 9) != orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, cur, page_end, NULL); unlock_page(page); @@ -467,7 +468,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, } add_size = min(em->start + em->len, page_end + 1) - cur; - ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur)); + ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur)); if (ret != add_size) { unlock_extent(tree, cur, page_end, NULL); unlock_page(page); @@ -537,7 +538,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) cb->len = bbio->bio.bi_iter.bi_size; cb->compressed_len = compressed_len; cb->compress_type = em->compress_type; - cb->orig_bio = &bbio->bio; + cb->orig_bbio = bbio; free_extent_map(em); @@ -966,7 +967,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) put_workspace(type, workspace); if (!ret) - zero_fill_bio(cb->orig_bio); + zero_fill_bio(&cb->orig_bbio->bio); return ret; } @@ -1044,7 +1045,7 @@ void __cold btrfs_exit_compress(void) int btrfs_decompress_buf2page(const char *buf, u32 buf_len, struct compressed_bio *cb, u32 decompressed) { - struct bio *orig_bio = cb->orig_bio; + struct bio *orig_bio = &cb->orig_bbio->bio; /* Offset inside the full decompressed extent */ u32 cur_offset; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 692bafa1050e..5d5146e72a86 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -55,7 +55,7 @@ struct compressed_bio { union { /* For reads, this is the bio we are copying the data into */ - struct bio *orig_bio; + struct btrfs_bio *orig_bbio; struct work_struct write_end_work; }; -- cgit v1.2.3 From 4513cb0c40d79599f72a5d1a6ab2fb279b63500d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Mar 2023 17:51:09 +0100 Subject: btrfs: move the bi_sector assignment out of btrfs_add_compressed_bio_pages Adding pages to a bio has nothing to do with the sector. Move the assignment to the two callers in preparation for cleaning up btrfs_add_compressed_bio_pages. Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index c5839d04690d..1487c9413e69 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -256,14 +256,13 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio) queue_work(fs_info->compressed_write_workers, &cb->write_end_work); } -static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb, - u64 disk_bytenr) +static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb) { struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info; struct bio *bio = &cb->bbio.bio; + u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; u64 cur_disk_byte = disk_bytenr; - bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; while (cur_disk_byte < disk_bytenr + cb->compressed_len) { u64 offset = cur_disk_byte - disk_bytenr; unsigned int index = offset >> PAGE_SHIFT; @@ -331,8 +330,9 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, cb->writeback = writeback; INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; + cb->bbio.bio.bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; + btrfs_add_compressed_bio_pages(cb); - btrfs_add_compressed_bio_pages(cb, disk_start); btrfs_submit_bio(&cb->bbio, 0); if (blkcg_css) @@ -506,7 +506,6 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) struct extent_map_tree *em_tree = &inode->extent_tree; struct compressed_bio *cb; unsigned int compressed_len; - const u64 disk_bytenr = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; u64 file_offset = bbio->file_offset; u64 em_len; u64 em_start; @@ -560,8 +559,8 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) /* include any pages we added in add_ra-bio_pages */ cb->len = bbio->bio.bi_iter.bi_size; - - btrfs_add_compressed_bio_pages(cb, disk_bytenr); + cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector; + btrfs_add_compressed_bio_pages(cb); if (memstall) psi_memstall_leave(&pflags); -- cgit v1.2.3 From 43fa4219bcf012385150de299364b5044de6500d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Mar 2023 17:51:10 +0100 Subject: btrfs: simplify adding pages in btrfs_add_compressed_bio_pages btrfs_add_compressed_bio_pages is needlessly complicated. Instead of iterating over the logic disk offset just to add pages to the bio use a simple offset starting at 0, which also removes most of the claiming. Additionally __bio_add_pages already takes care of the assert that the bio is always properly sized, and btrfs_submit_bio called right after asserts that the bio size is non-zero. Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 1487c9413e69..44c4276741ce 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -258,37 +258,17 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio) static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb) { - struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info; struct bio *bio = &cb->bbio.bio; - u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; - u64 cur_disk_byte = disk_bytenr; + u32 offset = 0; - while (cur_disk_byte < disk_bytenr + cb->compressed_len) { - u64 offset = cur_disk_byte - disk_bytenr; - unsigned int index = offset >> PAGE_SHIFT; - unsigned int real_size; - unsigned int added; - struct page *page = cb->compressed_pages[index]; + while (offset < cb->compressed_len) { + u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE); - /* - * We have various limit on the real read size: - * - page boundary - * - compressed length boundary - */ - real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset)); - real_size = min_t(u64, real_size, cb->compressed_len - offset); - ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); - - added = bio_add_page(bio, page, real_size, offset_in_page(offset)); - /* - * Maximum compressed extent is smaller than bio size limit, - * thus bio_add_page() should always success. - */ - ASSERT(added == real_size); - cur_disk_byte += added; + /* Maximum compressed extent is smaller than bio size limit. */ + __bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT], + len, 0); + offset += len; } - - ASSERT(bio->bi_iter.bi_size); } /* -- cgit v1.2.3 From 05d06a5c9d9c3c8119c365246dc1e3de2e3c5dd1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 27 Mar 2023 09:49:47 +0900 Subject: btrfs: move kthread_associate_blkcg out of btrfs_submit_compressed_write btrfs_submit_compressed_write should not have to care if it is called from a helper thread or not. Move the kthread_associate_blkcg handling into submit_one_async_extent, as that is the one caller that needs it. Also move the assignment of REQ_CGROUP_PUNT into cow_file_range_async, as that is the routine that sets up the helper thread offload. Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 8 -------- fs/btrfs/compression.h | 1 - fs/btrfs/inode.c | 12 ++++++++---- 3 files changed, 8 insertions(+), 13 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 44c4276741ce..d532a8c8c9d8 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -286,7 +286,6 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, struct page **compressed_pages, unsigned int nr_pages, blk_opf_t write_flags, - struct cgroup_subsys_state *blkcg_css, bool writeback) { struct btrfs_fs_info *fs_info = inode->root->fs_info; @@ -295,10 +294,6 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && IS_ALIGNED(len, fs_info->sectorsize)); - if (blkcg_css) { - kthread_associate_blkcg(blkcg_css); - write_flags |= REQ_CGROUP_PUNT; - } write_flags |= REQ_BTRFS_ONE_ORDERED; cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags, @@ -314,9 +309,6 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, btrfs_add_compressed_bio_pages(cb); btrfs_submit_bio(&cb->bbio, 0); - - if (blkcg_css) - kthread_associate_blkcg(NULL); } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 5d5146e72a86..19ab2abeddc0 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -92,7 +92,6 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, struct page **compressed_pages, unsigned int nr_pages, blk_opf_t write_flags, - struct cgroup_subsys_state *blkcg_css, bool writeback); void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d069cde28af5..74d1a664b90f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1054,14 +1054,18 @@ static int submit_one_async_extent(struct btrfs_inode *inode, extent_clear_unlock_delalloc(inode, start, end, NULL, EXTENT_LOCKED | EXTENT_DELALLOC, PAGE_UNLOCK | PAGE_START_WRITEBACK); + + if (async_chunk->blkcg_css) + kthread_associate_blkcg(async_chunk->blkcg_css); btrfs_submit_compressed_write(inode, start, /* file_offset */ async_extent->ram_size, /* num_bytes */ ins.objectid, /* disk_bytenr */ ins.offset, /* compressed_len */ async_extent->pages, /* compressed_pages */ async_extent->nr_pages, - async_chunk->write_flags, - async_chunk->blkcg_css, true); + async_chunk->write_flags, true); + if (async_chunk->blkcg_css) + kthread_associate_blkcg(NULL); *alloc_hint = ins.objectid + ins.offset; kfree(async_extent); return ret; @@ -1613,6 +1617,7 @@ static int cow_file_range_async(struct btrfs_inode *inode, if (blkcg_css != blkcg_root_css) { css_get(blkcg_css); async_chunk[i].blkcg_css = blkcg_css; + async_chunk[i].write_flags |= REQ_CGROUP_PUNT; } else { async_chunk[i].blkcg_css = NULL; } @@ -10348,8 +10353,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, btrfs_delalloc_release_extents(inode, num_bytes); btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid, - ins.offset, pages, nr_pages, 0, NULL, - false); + ins.offset, pages, nr_pages, 0, false); ret = orig_count; goto out; -- cgit v1.2.3 From 4317ff0056bedfc472202bf4ccf72d51094d6ade Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 23 Mar 2023 17:01:20 +0800 Subject: btrfs: introduce btrfs_bio::fs_info member Currently we're doing a lot of work for btrfs_bio: - Checksum verification for data read bios - Bio splits if it crosses stripe boundary - Read repair for data read bios However for the incoming scrub patches, we don't want this extra functionality at all, just plain logical + mirror -> physical mapping ability. Thus here we do the following changes: - Introduce btrfs_bio::fs_info This is for the new scrub specific btrfs_bio, which would not populate btrfs_bio::inode. Thus we need such new member to grab a fs_info This new member will always be populated. - Replace @inode argument with @fs_info for btrfs_bio_init() and its caller Since @inode is no longer a mandatory member, replace it with @fs_info, and let involved users populate @inode. - Skip checksum verification and generation if @bbio->inode is NULL - Add extra ASSERT()s To make sure: * bbio->inode is properly set for involved read repair path * if @file_offset is set, bbio->inode is also populated - Grab @fs_info from @bbio directly We can no longer go @bbio->inode->root->fs_info, as bbio->inode can be NULL. This involves: * btrfs_simple_end_io() * should_async_write() * btrfs_wq_submit_bio() * btrfs_use_zone_append() Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/bio.c | 42 +++++++++++++++++++++++++----------------- fs/btrfs/bio.h | 12 +++++++++--- fs/btrfs/compression.c | 3 ++- fs/btrfs/extent_io.c | 3 ++- fs/btrfs/inode.c | 13 +++++++++---- fs/btrfs/zoned.c | 4 ++-- 6 files changed, 49 insertions(+), 28 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index ed5aa8a176b9..e40d1ababa08 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -31,11 +31,11 @@ struct btrfs_failed_bio { * Initialize a btrfs_bio structure. This skips the embedded bio itself as it * is already initialized by the block layer. */ -void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, +void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private) { memset(bbio, 0, offsetof(struct btrfs_bio, bio)); - bbio->inode = inode; + bbio->fs_info = fs_info; bbio->end_io = end_io; bbio->private = private; atomic_set(&bbio->pending_ios, 1); @@ -49,7 +49,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, * a mempool. */ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, - struct btrfs_inode *inode, + struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private) { struct btrfs_bio *bbio; @@ -57,7 +57,7 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); bbio = btrfs_bio(bio); - btrfs_bio_init(bbio, inode, end_io, private); + btrfs_bio_init(bbio, fs_info, end_io, private); return bbio; } @@ -92,8 +92,8 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, GFP_NOFS, &btrfs_clone_bioset); } bbio = btrfs_bio(bio); - btrfs_bio_init(bbio, orig_bbio->inode, NULL, orig_bbio); - + btrfs_bio_init(bbio, fs_info, NULL, orig_bbio); + bbio->inode = orig_bbio->inode; bbio->file_offset = orig_bbio->file_offset; if (!(orig_bbio->bio.bi_opf & REQ_BTRFS_ONE_ORDERED)) orig_bbio->file_offset += map_length; @@ -244,7 +244,8 @@ static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); repair_bbio = btrfs_bio(repair_bio); - btrfs_bio_init(repair_bbio, failed_bbio->inode, NULL, fbio); + btrfs_bio_init(repair_bbio, fs_info, NULL, fbio); + repair_bbio->inode = failed_bbio->inode; repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); @@ -263,6 +264,9 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de struct btrfs_failed_bio *fbio = NULL; u32 offset = 0; + /* Read-repair requires the inode field to be set by the submitter. */ + ASSERT(inode); + /* * Hand off repair bios to the repair code as there is no upper level * submitter for them. @@ -323,17 +327,17 @@ static void btrfs_end_bio_work(struct work_struct *work) struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work); /* Metadata reads are checked and repaired by the submitter. */ - if (bbio->bio.bi_opf & REQ_META) - bbio->end_io(bbio); - else + if (bbio->inode && !(bbio->bio.bi_opf & REQ_META)) btrfs_check_read_bio(bbio, bbio->bio.bi_private); + else + bbio->end_io(bbio); } static void btrfs_simple_end_io(struct bio *bio) { struct btrfs_bio *bbio = btrfs_bio(bio); struct btrfs_device *dev = bio->bi_private; - struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; + struct btrfs_fs_info *fs_info = bbio->fs_info; btrfs_bio_counter_dec(fs_info); @@ -357,7 +361,8 @@ static void btrfs_raid56_end_io(struct bio *bio) btrfs_bio_counter_dec(bioc->fs_info); bbio->mirror_num = bioc->mirror_num; - if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META)) + if (bio_op(bio) == REQ_OP_READ && bbio->inode && + !(bbio->bio.bi_opf & REQ_META)) btrfs_check_read_bio(bbio, NULL); else btrfs_orig_bbio_end_io(bbio); @@ -583,7 +588,7 @@ static bool should_async_write(struct btrfs_bio *bbio) * in order. */ if (bbio->bio.bi_opf & REQ_META) { - struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; + struct btrfs_fs_info *fs_info = bbio->fs_info; if (btrfs_is_zoned(fs_info)) return false; @@ -603,7 +608,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, struct btrfs_io_context *bioc, struct btrfs_io_stripe *smap, int mirror_num) { - struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; + struct btrfs_fs_info *fs_info = bbio->fs_info; struct async_submit_bio *async; async = kmalloc(sizeof(*async), GFP_NOFS); @@ -627,7 +632,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) { struct btrfs_inode *inode = bbio->inode; - struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct btrfs_fs_info *fs_info = bbio->fs_info; struct btrfs_bio *orig_bbio = bbio; struct bio *bio = &bbio->bio; u64 logical = bio->bi_iter.bi_sector << 9; @@ -660,7 +665,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) * Save the iter for the end_io handler and preload the checksums for * data reads. */ - if (bio_op(bio) == REQ_OP_READ && !(bio->bi_opf & REQ_META)) { + if (bio_op(bio) == REQ_OP_READ && inode && !(bio->bi_opf & REQ_META)) { bbio->saved_iter = bio->bi_iter; ret = btrfs_lookup_bio_sums(bbio); if (ret) @@ -680,7 +685,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) * Csum items for reloc roots have already been cloned at this * point, so they are handled as part of the no-checksum case. */ - if (!(inode->flags & BTRFS_INODE_NODATASUM) && + if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) && !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && !btrfs_is_data_reloc_root(inode->root)) { if (should_async_write(bbio) && @@ -709,6 +714,9 @@ fail: void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) { + /* If bbio->inode is not populated, its file_offset must be 0. */ + ASSERT(bbio->inode || bbio->file_offset == 0); + while (!btrfs_submit_chunk(bbio, mirror_num)) ; } diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 8edf3c35eead..51b4f3d93f04 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -30,7 +30,10 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio); * passed to btrfs_submit_bio for mapping to the physical devices. */ struct btrfs_bio { - /* Inode and offset into it that this I/O operates on. */ + /* + * Inode and offset into it that this I/O operates on. + * Only set for data I/O. + */ struct btrfs_inode *inode; u64 file_offset; @@ -58,6 +61,9 @@ struct btrfs_bio { atomic_t pending_ios; struct work_struct end_io_work; + /* File system that this I/O operates on. */ + struct btrfs_fs_info *fs_info; + /* * This member must come last, bio_alloc_bioset will allocate enough * bytes for entire btrfs_bio but relies on bio being last. @@ -73,10 +79,10 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio) int __init btrfs_bioset_init(void); void __cold btrfs_bioset_exit(void); -void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, +void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private); struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, - struct btrfs_inode *inode, + struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private); static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d532a8c8c9d8..2d0493f0a184 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -69,7 +69,8 @@ static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op, GFP_NOFS, &btrfs_compressed_bioset)); - btrfs_bio_init(bbio, inode, end_io, NULL); + btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL); + bbio->inode = inode; bbio->file_offset = start; return to_compressed_bio(bbio); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f40e4a002f78..a1adadd5d25d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -898,9 +898,10 @@ static void alloc_new_bio(struct btrfs_inode *inode, struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_bio *bbio; - bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, inode, + bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info, bio_ctrl->end_io_func, NULL); bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; + bbio->inode = inode; bbio->file_offset = file_offset; bio_ctrl->bbio = bbio; bio_ctrl->len_to_oe_boundary = U32_MAX; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 93e16a408f43..57d070025c7a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7711,7 +7711,9 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, container_of(bbio, struct btrfs_dio_private, bbio); struct btrfs_dio_data *dio_data = iter->private; - btrfs_bio_init(bbio, BTRFS_I(iter->inode), btrfs_dio_end_io, bio->bi_private); + btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, + btrfs_dio_end_io, bio->bi_private); + bbio->inode = BTRFS_I(iter->inode); bbio->file_offset = file_offset; dip->file_offset = file_offset; @@ -9899,6 +9901,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, u64 file_offset, u64 disk_bytenr, u64 disk_io_size, struct page **pages) { + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_encoded_read_private priv = { .pending = ATOMIC_INIT(1), }; @@ -9907,9 +9910,10 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, init_waitqueue_head(&priv.wait); - bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, - btrfs_encoded_read_endio, &priv); + bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, + btrfs_encoded_read_endio, &priv); bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; + bbio->inode = inode; do { size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); @@ -9918,9 +9922,10 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, atomic_inc(&priv.pending); btrfs_submit_bio(bbio, 0); - bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, + bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, btrfs_encoded_read_endio, &priv); bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; + bbio->inode = inode; continue; } diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 45d04092f2f8..a9b32ba6b2ce 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1640,14 +1640,14 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio) { u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT); struct btrfs_inode *inode = bbio->inode; - struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct btrfs_fs_info *fs_info = bbio->fs_info; struct btrfs_block_group *cache; bool ret = false; if (!btrfs_is_zoned(fs_info)) return false; - if (!is_data_inode(&inode->vfs_inode)) + if (!inode || !is_data_inode(&inode->vfs_inode)) return false; if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE) -- cgit v1.2.3