summaryrefslogtreecommitdiff
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-18 12:41:14 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-18 12:41:14 -0800
commit6fdf886424cf8c4fff96a20189c00606327e5df6 (patch)
treeea101e877e800417a979729cad1388fbe34816be /fs/btrfs/async-thread.c
parentdb850a9b8d173afd622984c6d28c0064412b3fd8 (diff)
parent6c405b24097c24cbb11570b47fd382676014f72e (diff)
Merge tag 'for-5.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs fixes from David Sterba: "Several xes and one old ioctl deprecation. Namely there's fix for crashes/warnings with lzo compression that was suspected to be caused by first pull merge resolution, but it was a different bug. Summary: - regression fix for a crash in lzo due to missing boundary checks of the page array - fix crashes on ARM64 due to missing barriers when synchronizing status bits between work queues - silence lockdep when reading chunk tree during mount - fix false positive warning in integrity checker on devices with disabled write caching - fix signedness of bitfields in scrub - start deprecation of balance v1 ioctl" * tag 'for-5.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: deprecate BTRFS_IOC_BALANCE ioctl btrfs: make 1-bit bit-fields of scrub_page unsigned int btrfs: check-integrity: fix a warning on write caching disabled disk btrfs: silence lockdep when reading chunk tree during mount btrfs: fix memory ordering between normal and ordered work functions btrfs: fix a out-of-bound access in copy_compressed_data_to_page()
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 309516e6a9682..43c89952b7d25 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
ordered_list);
if (!test_bit(WORK_DONE_BIT, &work->flags))
break;
+ /*
+ * Orders all subsequent loads after reading WORK_DONE_BIT,
+ * paired with the smp_mb__before_atomic in btrfs_work_helper
+ * this guarantees that the ordered function will see all
+ * updates from ordinary work function.
+ */
+ smp_rmb();
/*
* we are going to call the ordered done function, but
@@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work)
thresh_exec_hook(wq);
work->func(work);
if (need_order) {
+ /*
+ * Ensures all memory accesses done in the work function are
+ * ordered before setting the WORK_DONE_BIT. Ensuring the thread
+ * which is going to executed the ordered work sees them.
+ * Pairs with the smp_rmb in run_ordered_work.
+ */
+ smp_mb__before_atomic();
set_bit(WORK_DONE_BIT, &work->flags);
run_ordered_work(wq, work);
} else {