From 447286ebadaafa551550704ff0b42eb08b1d1cb2 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Thu, 16 Feb 2023 21:53:24 +0800 Subject: f2fs: convert to use bitmap API Let's use BIT() and GENMASK() instead of open it. Signed-off-by: Yangtao Li Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/f2fs/compress.c') diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index b40dec3d7f79..93fec1d37899 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -670,7 +670,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) cc->cbuf->clen = cpu_to_le32(cc->clen); - if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM) + if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM)) chksum = f2fs_crc32(F2FS_I_SB(cc->inode), cc->cbuf->cdata, cc->clen); cc->cbuf->chksum = cpu_to_le32(chksum); @@ -761,7 +761,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) ret = cops->decompress_pages(dic); - if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) { + if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) { u32 provided = le32_to_cpu(dic->cbuf->chksum); u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen); -- cgit v1.2.3 From babedcbac164cec970872b8097401ca913a80e61 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Tue, 21 Mar 2023 01:22:18 +0800 Subject: f2fs: compress: fix to call f2fs_wait_on_page_writeback() in f2fs_write_raw_pages() BUG_ON() will be triggered when writing files concurrently, because the same page is writtenback multiple times. 1597 void folio_end_writeback(struct folio *folio) 1598 { ...... 1618 if (!__folio_end_writeback(folio)) 1619 BUG(); ...... 1625 } kernel BUG at mm/filemap.c:1619! Call Trace: f2fs_write_end_io+0x1a0/0x370 blk_update_request+0x6c/0x410 blk_mq_end_request+0x15/0x130 blk_complete_reqs+0x3c/0x50 __do_softirq+0xb8/0x29b ? sort_range+0x20/0x20 run_ksoftirqd+0x19/0x20 smpboot_thread_fn+0x10b/0x1d0 kthread+0xde/0x110 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 Below is the concurrency scenario: [Process A] [Process B] [Process C] f2fs_write_raw_pages() - redirty_page_for_writepage() - unlock page() f2fs_do_write_data_page() - lock_page() - clear_page_dirty_for_io() - set_page_writeback() [1st writeback] ..... - unlock page() generic_perform_write() - f2fs_write_begin() - wait_for_stable_page() - f2fs_write_end() - set_page_dirty() - lock_page() - f2fs_do_write_data_page() - set_page_writeback() [2st writeback] This problem was introduced by the previous commit 7377e853967b ("f2fs: compress: fix potential deadlock of compress file"). All pagelocks were released in f2fs_write_raw_pages(), but whether the page was in the writeback state was ignored in the subsequent writing process. Let's fix it by waiting for the page to writeback before writing. Cc: Christoph Hellwig Fixes: 4c8ff7095bef ("f2fs: support data compression") Fixes: 7377e853967b ("f2fs: compress: fix potential deadlock of compress file") Signed-off-by: Qi Han Signed-off-by: Yangtao Li Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/f2fs/compress.c') diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 93fec1d37899..9b7149534a58 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -1456,6 +1456,12 @@ continue_unlock: if (!PageDirty(cc->rpages[i])) goto continue_unlock; + if (PageWriteback(cc->rpages[i])) { + if (wbc->sync_mode == WB_SYNC_NONE) + goto continue_unlock; + f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true); + } + if (!clear_page_dirty_for_io(cc->rpages[i])) goto continue_unlock; -- cgit v1.2.3 From 1aa161e43106d46ca8e9a86f4aa28d420258134b Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 23 Mar 2023 15:37:54 -0700 Subject: f2fs: fix scheduling while atomic in decompression path [ 16.945668][ C0] Call trace: [ 16.945678][ C0] dump_backtrace+0x110/0x204 [ 16.945706][ C0] dump_stack_lvl+0x84/0xbc [ 16.945735][ C0] __schedule_bug+0xb8/0x1ac [ 16.945756][ C0] __schedule+0x724/0xbdc [ 16.945778][ C0] schedule+0x154/0x258 [ 16.945793][ C0] bit_wait_io+0x48/0xa4 [ 16.945808][ C0] out_of_line_wait_on_bit+0x114/0x198 [ 16.945824][ C0] __sync_dirty_buffer+0x1f8/0x2e8 [ 16.945853][ C0] __f2fs_commit_super+0x140/0x1f4 [ 16.945881][ C0] f2fs_commit_super+0x110/0x28c [ 16.945898][ C0] f2fs_handle_error+0x1f4/0x2f4 [ 16.945917][ C0] f2fs_decompress_cluster+0xc4/0x450 [ 16.945942][ C0] f2fs_end_read_compressed_page+0xc0/0xfc [ 16.945959][ C0] f2fs_handle_step_decompress+0x118/0x1cc [ 16.945978][ C0] f2fs_read_end_io+0x168/0x2b0 [ 16.945993][ C0] bio_endio+0x25c/0x2c8 [ 16.946015][ C0] dm_io_dec_pending+0x3e8/0x57c [ 16.946052][ C0] clone_endio+0x134/0x254 [ 16.946069][ C0] bio_endio+0x25c/0x2c8 [ 16.946084][ C0] blk_update_request+0x1d4/0x478 [ 16.946103][ C0] scsi_end_request+0x38/0x4cc [ 16.946129][ C0] scsi_io_completion+0x94/0x184 [ 16.946147][ C0] scsi_finish_command+0xe8/0x154 [ 16.946164][ C0] scsi_complete+0x90/0x1d8 [ 16.946181][ C0] blk_done_softirq+0xa4/0x11c [ 16.946198][ C0] _stext+0x184/0x614 [ 16.946214][ C0] __irq_exit_rcu+0x78/0x144 [ 16.946234][ C0] handle_domain_irq+0xd4/0x154 [ 16.946260][ C0] gic_handle_irq.33881+0x5c/0x27c [ 16.946281][ C0] call_on_irq_stack+0x40/0x70 [ 16.946298][ C0] do_interrupt_handler+0x48/0xa4 [ 16.946313][ C0] el1_interrupt+0x38/0x68 [ 16.946346][ C0] el1h_64_irq_handler+0x20/0x30 [ 16.946362][ C0] el1h_64_irq+0x78/0x7c [ 16.946377][ C0] finish_task_switch+0xc8/0x3d8 [ 16.946394][ C0] __schedule+0x600/0xbdc [ 16.946408][ C0] preempt_schedule_common+0x34/0x5c [ 16.946423][ C0] preempt_schedule+0x44/0x48 [ 16.946438][ C0] process_one_work+0x30c/0x550 [ 16.946456][ C0] worker_thread+0x414/0x8bc [ 16.946472][ C0] kthread+0x16c/0x1e0 [ 16.946486][ C0] ret_from_fork+0x10/0x20 Fixes: bff139b49d9f ("f2fs: handle decompress only post processing in softirq") Fixes: 95fa90c9e5a7 ("f2fs: support recording errors into superblock") Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 7 ++++++- fs/f2fs/f2fs.h | 1 + fs/f2fs/super.c | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'fs/f2fs/compress.c') diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 9b7149534a58..3182e1506252 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -755,7 +755,12 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { ret = -EFSCORRUPTED; - f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION); + + /* Avoid f2fs_commit_super in irq context */ + if (in_task) + f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION); + else + f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION); goto out_release; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 53a005b420cf..4e2596dacbf1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3539,6 +3539,7 @@ int f2fs_quota_sync(struct super_block *sb, int type); loff_t max_file_blocks(struct inode *inode); void f2fs_quota_off_umount(struct super_block *sb); void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason); +void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); int f2fs_sync_fs(struct super_block *sb, int sync); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 9c87d91df61b..50d23dcc33fb 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3885,7 +3885,7 @@ void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason) f2fs_up_write(&sbi->sb_lock); } -static void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) +void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) { spin_lock(&sbi->error_lock); if (!test_bit(flag, (unsigned long *)sbi->errors)) { -- cgit v1.2.3 From 3094e5579b4d5d8343fdb05e9a3a35cc85a14c1c Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Fri, 31 Mar 2023 00:49:47 +0800 Subject: f2fs: merge lz4hc_compress_pages() to lz4_compress_pages() Remove unnecessary lz4hc_compress_pages(). Signed-off-by: Yangtao Li Reviewed-by: Chao Yu [Jaegeuk Kim: clean up] Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) (limited to 'fs/f2fs/compress.c') diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 3182e1506252..11653fa79289 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -264,35 +264,21 @@ static void lz4_destroy_compress_ctx(struct compress_ctx *cc) cc->private = NULL; } -#ifdef CONFIG_F2FS_FS_LZ4HC -static int lz4hc_compress_pages(struct compress_ctx *cc) +static int lz4_compress_pages(struct compress_ctx *cc) { + int len = -EINVAL; unsigned char level = F2FS_I(cc->inode)->i_compress_level; - int len; - if (level) - len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen, - cc->clen, level, cc->private); - else + if (!level) len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, cc->clen, cc->private); - if (!len) - return -EAGAIN; - - cc->clen = len; - return 0; -} -#endif - -static int lz4_compress_pages(struct compress_ctx *cc) -{ - int len; - #ifdef CONFIG_F2FS_FS_LZ4HC - return lz4hc_compress_pages(cc); + else + len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen, + cc->clen, level, cc->private); #endif - len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, - cc->clen, cc->private); + if (len < 0) + return len; if (!len) return -EAGAIN; -- cgit v1.2.3