summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2023-09-12 19:10:03 +0200
committerPaolo Abeni <pabeni@redhat.com>2023-09-12 19:10:04 +0200
commit8fc8911b66962c6ff4345e7000930a4bcc54ae5a (patch)
tree987f1ac9958a489af0b4bfd4726fb91ce94f028d /net/ipv4/tcp_output.c
parentcd8bae85815416d19f46e3828d457442f77de292 (diff)
parent133c4c0d37175f510a10fa9bed51e223936073fc (diff)
Merge branch 'tcp-backlog-processing-optims'
Eric Dumazet says: ==================== tcp: backlog processing optims First patches are mostly preparing the ground for the last one. Last patch of the series implements sort of ACK reduction only for the cases a TCP receiver is under high stress, which happens for high throughput flows. This gives us a ~20% increase of single TCP flow (100Gbit -> 120Gbit) ==================== Link: https://lore.kernel.org/r/20230911170531.828100-1-edumazet@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ccfc8bbf74558..1fc1f879cfd6c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1077,7 +1077,8 @@ static void tcp_tasklet_func(struct tasklet_struct *t)
#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \
TCPF_WRITE_TIMER_DEFERRED | \
TCPF_DELACK_TIMER_DEFERRED | \
- TCPF_MTU_REDUCED_DEFERRED)
+ TCPF_MTU_REDUCED_DEFERRED | \
+ TCPF_ACK_DEFERRED)
/**
* tcp_release_cb - tcp release_sock() callback
* @sk: socket
@@ -1101,16 +1102,6 @@ void tcp_release_cb(struct sock *sk)
tcp_tsq_write(sk);
__sock_put(sk);
}
- /* Here begins the tricky part :
- * We are called from release_sock() with :
- * 1) BH disabled
- * 2) sk_lock.slock spinlock held
- * 3) socket owned by us (sk->sk_lock.owned == 1)
- *
- * But following code is meant to be called from BH handlers,
- * so we should keep BH disabled, but early release socket ownership
- */
- sock_release_ownership(sk);
if (flags & TCPF_WRITE_TIMER_DEFERRED) {
tcp_write_timer_handler(sk);
@@ -1124,6 +1115,8 @@ void tcp_release_cb(struct sock *sk)
inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
__sock_put(sk);
}
+ if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk))
+ tcp_send_ack(sk);
}
EXPORT_SYMBOL(tcp_release_cb);