summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 934396bb137..66e9a729f6d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -268,7 +268,7 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
-int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
+int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
@@ -568,7 +568,7 @@ new_segment:
skb->truesize += copy;
sk->sk_wmem_queued += copy;
sk->sk_forward_alloc -= copy;
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_PARTIAL;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
skb_shinfo(skb)->gso_segs = 0;
@@ -723,7 +723,7 @@ new_segment:
* Check whether we can use HW checksum.
*/
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, tp, skb);
copy = size_goal;
@@ -955,8 +955,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
* receive buffer and there was a small segment
* in queue.
*/
- (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
- !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
+ (copied > 0 &&
+ ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
+ ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
+ !icsk->icsk_ack.pingpong)) &&
+ !atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = 1;
}
@@ -2205,7 +2208,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
th->fin = th->psh = 0;
th->check = ~csum_fold(th->check + delta);
- if (skb->ip_summed != CHECKSUM_HW)
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
@@ -2219,7 +2222,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
th->check = ~csum_fold(th->check + delta);
- if (skb->ip_summed != CHECKSUM_HW)
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
@@ -2254,9 +2257,7 @@ void __init tcp_init(void)
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (!tcp_hashinfo.bind_bucket_cachep)
- panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
/* Size and allocate the main established and bind bucket
* hash tables.