diff options
author | Mark Brown <broonie@kernel.org> | 2020-08-25 11:01:46 +0100 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2020-08-25 11:01:46 +0100 |
commit | 3bec5b6aae830355e786e204b20a7cea38c3a8ed (patch) | |
tree | fd597b87faf55ceb2a207ee94f4feca6276696db /lib/percpu_counter.c | |
parent | a577f3456c0a2fac3dee037c483753e6e68f3e49 (diff) | |
parent | d012a7190fc1fd72ed48911e77ca97ba4521bccd (diff) |
Merge tag 'v5.9-rc2' into regulator-5.9
Linux 5.9-rc2
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r-- | lib/percpu_counter.c | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a66595ba5543..a2345de90e93 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -99,6 +99,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) EXPORT_SYMBOL(percpu_counter_add_batch); /* + * For percpu_counter with a big batch, the devication of its count could + * be big, and there is requirement to reduce the deviation, like when the + * counter's batch could be runtime decreased to get a better accuracy, + * which can be achieved by running this sync function on each CPU. + */ +void percpu_counter_sync(struct percpu_counter *fbc) +{ + unsigned long flags; + s64 count; + + raw_spin_lock_irqsave(&fbc->lock, flags); + count = __this_cpu_read(*fbc->counters); + fbc->count += count; + __this_cpu_sub(*fbc->counters, count); + raw_spin_unlock_irqrestore(&fbc->lock, flags); +} +EXPORT_SYMBOL(percpu_counter_sync); + +/* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ |