summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2023-09-12 19:10:03 +0200
committerPaolo Abeni <pabeni@redhat.com>2023-09-12 19:10:04 +0200
commit8fc8911b66962c6ff4345e7000930a4bcc54ae5a (patch)
tree987f1ac9958a489af0b4bfd4726fb91ce94f028d /net/ipv4/tcp_input.c
parentcd8bae85815416d19f46e3828d457442f77de292 (diff)
parent133c4c0d37175f510a10fa9bed51e223936073fc (diff)
Merge branch 'tcp-backlog-processing-optims'
Eric Dumazet says: ==================== tcp: backlog processing optims First patches are mostly preparing the ground for the last one. Last patch of the series implements sort of ACK reduction only for the cases a TCP receiver is under high stress, which happens for high throughput flows. This gives us a ~20% increase of single TCP flow (100Gbit -> 120Gbit) ==================== Link: https://lore.kernel.org/r/20230911170531.828100-1-edumazet@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 06fe1cf645d5a..41b471748437b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5553,6 +5553,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
tcp_in_quickack_mode(sk) ||
/* Protocol state mandates a one-time immediate ACK */
inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
+ /* If we are running from __release_sock() in user context,
+ * Defer the ack until tcp_release_cb().
+ */
+ if (sock_owned_by_user_nocheck(sk) &&
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_backlog_ack_defer)) {
+ set_bit(TCP_ACK_DEFERRED, &sk->sk_tsq_flags);
+ return;
+ }
send_now:
tcp_send_ack(sk);
return;