summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2025-02-25 18:17:48 +0100
committerPaolo Abeni <pabeni@redhat.com>2025-02-27 14:03:39 +0100
commited16b8a4d1ca901fc13ced042b76dde54738249a (patch)
treefaee17265fb8866bfad53332bcec8b039781d087 /kernel
parent859d6acd94cc4ad65e9eb3fa2a9815a19e5b35cf (diff)
bpf: cpumap: switch to napi_skb_cache_get_bulk()
Now that cpumap uses GRO, which drops unused skb heads to the NAPI cache, use napi_skb_cache_get_bulk() to try to reuse cached entries and lower MM layer pressure. Always disable the BH before checking and running the cpumap-pinned XDP prog and don't re-enable it in between that and allocating an skb bulk, as we can access the NAPI caches only from the BH context. The better GRO aggregates packets, the less new skbs will be allocated. If an aggregated skb contains 16 frags, this means 15 skbs were returned to the cache, so next 15 skbs will be built without allocating anything. The same trafficgen UDP GRO test now shows: GRO off GRO on threaded GRO 2.3 4 Mpps thr bulk GRO 2.4 4.7 Mpps diff +4 +17 % Comparing to the baseline cpumap: baseline 2.7 N/A Mpps thr bulk GRO 2.4 4.7 Mpps diff -11 +74 % Tested-by: Daniel Xu <dxu@dxuuu.xyz> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/cpumap.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 85936f09d8d77..67e8a2fc1a99d 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -253,7 +253,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
if (!rcpu->prog)
goto out;
- rcu_read_lock_bh();
+ rcu_read_lock();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats);
@@ -265,7 +265,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
xdp_do_flush();
bpf_net_ctx_clear(bpf_net_ctx);
- rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
+ rcu_read_unlock();
out:
if (unlikely(ret->skb_n) && ret->xdp_n)
@@ -303,7 +303,6 @@ static int cpu_map_kthread_run(void *data)
while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
struct xdp_cpumap_stats stats = {}; /* zero stats */
unsigned int kmem_alloc_drops = 0, sched = 0;
- gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
struct cpu_map_ret ret = { };
void *frames[CPUMAP_BATCH];
void *skbs[CPUMAP_BATCH];
@@ -355,15 +354,14 @@ static int cpu_map_kthread_run(void *data)
prefetchw(page);
}
+ local_bh_disable();
+
/* Support running another XDP prog on this CPU */
cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats);
- if (!ret.xdp_n) {
- local_bh_disable();
+ if (!ret.xdp_n)
goto stats;
- }
- m = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp,
- ret.xdp_n, skbs);
+ m = napi_skb_cache_get_bulk(skbs, ret.xdp_n);
if (unlikely(m < ret.xdp_n)) {
for (i = m; i < ret.xdp_n; i++)
xdp_return_frame(frames[i]);
@@ -376,7 +374,6 @@ static int cpu_map_kthread_run(void *data)
ret.xdp_n = m;
}
- local_bh_disable();
for (i = 0; i < ret.xdp_n; i++) {
struct xdp_frame *xdpf = frames[i];