diff options
author | David S. Miller <davem@davemloft.net> | 2019-03-23 21:57:38 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-03-23 21:57:38 -0400 |
commit | bdaba8959e9248524f3d148d1aa47f13944ba8e8 (patch) | |
tree | 0e6f2cfd66715d2234acda3ae48d1543facc5303 /include/net/sock.h | |
parent | 7c1508e5f64a784988be4659dd4d6b791c008bbf (diff) | |
parent | 8b27dae5a2e89a61c46c6dbc76c040c0e6d0ed4c (diff) |
Merge branch 'tcp-rx-tx-cache'
Eric Dumazet says:
====================
tcp: add rx/tx cache to reduce lock contention
On hosts with many cpus we can observe a very serious contention
on spinlocks used in mm slab layer.
The following can happen quite often :
1) TX path
sendmsg() allocates one (fclone) skb on CPU A, sends a clone.
ACK is received on CPU B, and consumes the skb that was in the retransmit
queue.
2) RX path
network driver allocates skb on CPU C
recvmsg() happens on CPU D, freeing the skb after it has been delivered
to user space.
In both cases, we are hitting the asymetric alloc/free pattern
for which slab has to drain alien caches. At 8 Mpps per second,
this represents 16 Mpps alloc/free per second and has a huge penalty.
In an interesting experiment, I tried to use a single kmem_cache for all the skbs
(in skb_init() : skbuff_fclone_cache = skbuff_head_cache =
kmem_cache_create("skbuff_fclone_cache", sizeof(struct sk_buff_fclones),);
qnd most of the contention disappeared, since cpus could better use
their local slab per-cpu cache.
But we can do actually better, in the following patches.
TX : at ACK time, no longer free the skb but put it back in a tcp socket cache,
so that next sendmsg() can reuse it immediately.
RX : at recvmsg() time, do not free the skb but put it in a tcp socket cache
so that it can be freed by the cpu feeding the incoming packets in BH.
This increased the performance of small RPC benchmark by about 10 % on a host
with 112 hyperthreads.
v2 : - Solved a race condition : sk_stream_alloc_skb() to make sure the prior
clone has been freed.
- Really test rps_needed in sk_eat_skb() as claimed.
- Fixed rps_needed use in drivers/net/tun.c
v3: Added a #ifdef CONFIG_RPS, to avoid compile error (kbuild robot)
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 8de5ee258b93..577d91fb5626 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -368,6 +368,7 @@ struct sock { atomic_t sk_drops; int sk_rcvlowat; struct sk_buff_head sk_error_queue; + struct sk_buff *sk_rx_skb_cache; struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with @@ -414,6 +415,7 @@ struct sock { struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; + struct sk_buff *sk_tx_skb_cache; struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; @@ -966,7 +968,7 @@ static inline void sock_rps_record_flow_hash(__u32 hash) static inline void sock_rps_record_flow(const struct sock *sk) { #ifdef CONFIG_RPS - if (static_key_false(&rfs_needed)) { + if (static_branch_unlikely(&rfs_needed)) { /* Reading sk->sk_rxhash might incur an expensive cache line * miss. * @@ -1463,6 +1465,10 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { + if (!sk->sk_tx_skb_cache) { + sk->sk_tx_skb_cache = skb; + return; + } sock_set_flag(sk, SOCK_QUEUE_SHRUNK); sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); @@ -2433,6 +2439,15 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); + if ( +#ifdef CONFIG_RPS + !static_branch_unlikely(&rps_needed) && +#endif + !sk->sk_rx_skb_cache) { + sk->sk_rx_skb_cache = skb; + skb_orphan(skb); + return; + } __kfree_skb(skb); } |