diff options
Diffstat (limited to 'net/mptcp/protocol.c')
| -rw-r--r-- | net/mptcp/protocol.c | 65 | 
1 files changed, 37 insertions, 28 deletions
| diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 0d536b183a6c..37ebcb7640eb 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -139,7 +139,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,  	    !skb_try_coalesce(to, from, &fragstolen, &delta))  		return false; -	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx", +	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",  		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,  		 to->len, MPTCP_SKB_CB(from)->end_seq);  	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; @@ -217,7 +217,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)  	end_seq = MPTCP_SKB_CB(skb)->end_seq;  	max_seq = atomic64_read(&msk->rcv_wnd_sent); -	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq, +	pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,  		 RB_EMPTY_ROOT(&msk->out_of_order_queue));  	if (after64(end_seq, max_seq)) {  		/* out of window */ @@ -643,7 +643,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,  		}  	} -	pr_debug("msk=%p ssk=%p", msk, ssk); +	pr_debug("msk=%p ssk=%p\n", msk, ssk);  	tp = tcp_sk(ssk);  	do {  		u32 map_remaining, offset; @@ -724,7 +724,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)  	u64 end_seq;  	p = rb_first(&msk->out_of_order_queue); -	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); +	pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));  	while (p) {  		skb = rb_to_skb(p);  		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) @@ -746,7 +746,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)  			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;  			/* skip overlapping data, if any */ -			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d", +			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",  				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,  				 delta);  			MPTCP_SKB_CB(skb)->offset += delta; @@ -1240,7 +1240,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,  	size_t copy;  	int i; -	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u", +	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",  		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);  	if (WARN_ON_ONCE(info->sent > info->limit || @@ -1341,7 +1341,7 @@ alloc_skb:  	mpext->use_map = 1;  	mpext->dsn64 = 1; -	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", +	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",  		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,  		 mpext->dsn64); @@ -1892,7 +1892,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)  			if (!msk->first_pending)  				WRITE_ONCE(msk->first_pending, dfrag);  		} -		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk, +		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,  			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,  			 !dfrag_collapsed); @@ -2248,7 +2248,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,  			}  		} -		pr_debug("block timeout %ld", timeo); +		pr_debug("block timeout %ld\n", timeo);  		sk_wait_data(sk, &timeo, NULL);  	} @@ -2264,7 +2264,7 @@ out_err:  		}  	} -	pr_debug("msk=%p rx queue empty=%d:%d copied=%d", +	pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",  		 msk, skb_queue_empty_lockless(&sk->sk_receive_queue),  		 skb_queue_empty(&msk->receive_queue), copied);  	if (!(flags & MSG_PEEK)) @@ -2326,7 +2326,7 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)  			continue;  		} -		if (subflow->backup) { +		if (subflow->backup || subflow->request_bkup) {  			if (!backup)  				backup = ssk;  			continue; @@ -2508,6 +2508,12 @@ out:  void mptcp_close_ssk(struct sock *sk, struct sock *ssk,  		     struct mptcp_subflow_context *subflow)  { +	/* The first subflow can already be closed and still in the list */ +	if (subflow->close_event_done) +		return; + +	subflow->close_event_done = true; +  	if (sk->sk_state == TCP_ESTABLISHED)  		mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); @@ -2533,8 +2539,11 @@ static void __mptcp_close_subflow(struct sock *sk)  	mptcp_for_each_subflow_safe(msk, subflow, tmp) {  		struct sock *ssk = mptcp_subflow_tcp_sock(subflow); +		int ssk_state = inet_sk_state_load(ssk); -		if (inet_sk_state_load(ssk) != TCP_CLOSE) +		if (ssk_state != TCP_CLOSE && +		    (ssk_state != TCP_CLOSE_WAIT || +		     inet_sk_state_load(sk) != TCP_ESTABLISHED))  			continue;  		/* 'subflow_data_ready' will re-sched once rx queue is empty */ @@ -2714,7 +2723,7 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)  	if (!ssk)  		return; -	pr_debug("MP_FAIL doesn't respond, reset the subflow"); +	pr_debug("MP_FAIL doesn't respond, reset the subflow\n");  	slow = lock_sock_fast(ssk);  	mptcp_subflow_reset(ssk); @@ -2888,7 +2897,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)  		break;  	default:  		if (__mptcp_check_fallback(mptcp_sk(sk))) { -			pr_debug("Fallback"); +			pr_debug("Fallback\n");  			ssk->sk_shutdown |= how;  			tcp_shutdown(ssk, how); @@ -2898,7 +2907,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)  			WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);  			mptcp_schedule_work(sk);  		} else { -			pr_debug("Sending DATA_FIN on subflow %p", ssk); +			pr_debug("Sending DATA_FIN on subflow %p\n", ssk);  			tcp_send_ack(ssk);  			if (!mptcp_rtx_timer_pending(sk))  				mptcp_reset_rtx_timer(sk); @@ -2964,7 +2973,7 @@ static void mptcp_check_send_data_fin(struct sock *sk)  	struct mptcp_subflow_context *subflow;  	struct mptcp_sock *msk = mptcp_sk(sk); -	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu", +	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",  		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),  		 msk->snd_nxt, msk->write_seq); @@ -2988,7 +2997,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)  {  	struct mptcp_sock *msk = mptcp_sk(sk); -	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d", +	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",  		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,  		 !!mptcp_send_head(sk)); @@ -3003,7 +3012,7 @@ static void __mptcp_destroy_sock(struct sock *sk)  {  	struct mptcp_sock *msk = mptcp_sk(sk); -	pr_debug("msk=%p", msk); +	pr_debug("msk=%p\n", msk);  	might_sleep(); @@ -3111,7 +3120,7 @@ cleanup:  		mptcp_set_state(sk, TCP_CLOSE);  	sock_hold(sk); -	pr_debug("msk=%p state=%d", sk, sk->sk_state); +	pr_debug("msk=%p state=%d\n", sk, sk->sk_state);  	if (msk->token)  		mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL); @@ -3543,7 +3552,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)  {  	struct mptcp_sock *msk = mptcp_sk(sk); -	pr_debug("msk=%p, ssk=%p", msk, msk->first); +	pr_debug("msk=%p, ssk=%p\n", msk, msk->first);  	if (WARN_ON_ONCE(!msk->first))  		return -EINVAL; @@ -3560,7 +3569,7 @@ void mptcp_finish_connect(struct sock *ssk)  	sk = subflow->conn;  	msk = mptcp_sk(sk); -	pr_debug("msk=%p, token=%u", sk, subflow->token); +	pr_debug("msk=%p, token=%u\n", sk, subflow->token);  	subflow->map_seq = subflow->iasn;  	subflow->map_subflow_seq = 1; @@ -3589,7 +3598,7 @@ bool mptcp_finish_join(struct sock *ssk)  	struct sock *parent = (void *)msk;  	bool ret = true; -	pr_debug("msk=%p, subflow=%p", msk, subflow); +	pr_debug("msk=%p, subflow=%p\n", msk, subflow);  	/* mptcp socket already closing? */  	if (!mptcp_is_fully_established(parent)) { @@ -3635,7 +3644,7 @@ err_prohibited:  static void mptcp_shutdown(struct sock *sk, int how)  { -	pr_debug("sk=%p, how=%d", sk, how); +	pr_debug("sk=%p, how=%d\n", sk, how);  	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))  		__mptcp_wr_shutdown(sk); @@ -3856,7 +3865,7 @@ static int mptcp_listen(struct socket *sock, int backlog)  	struct sock *ssk;  	int err; -	pr_debug("msk=%p", msk); +	pr_debug("msk=%p\n", msk);  	lock_sock(sk); @@ -3895,7 +3904,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,  	struct mptcp_sock *msk = mptcp_sk(sock->sk);  	struct sock *ssk, *newsk; -	pr_debug("msk=%p", msk); +	pr_debug("msk=%p\n", msk);  	/* Buggy applications can call accept on socket states other then LISTEN  	 * but no need to allocate the first subflow just to error out. @@ -3904,12 +3913,12 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,  	if (!ssk)  		return -EINVAL; -	pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk)); +	pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));  	newsk = inet_csk_accept(ssk, arg);  	if (!newsk)  		return arg->err; -	pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk)); +	pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));  	if (sk_is_mptcp(newsk)) {  		struct mptcp_subflow_context *subflow;  		struct sock *new_mptcp_sock; @@ -4002,7 +4011,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,  	sock_poll_wait(file, sock, wait);  	state = inet_sk_state_load(sk); -	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); +	pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);  	if (state == TCP_LISTEN) {  		struct sock *ssk = READ_ONCE(msk->first); | 
