summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKuniyuki Iwashima <kuniyu@amazon.com>2024-06-04 09:52:38 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-07-05 09:00:21 +0200
commitb56ff0d20a4a60b9e6424d0609acccbbde573ea8 (patch)
tree1e10c19dd65f9cb6c1a1587b24e3f153574aa64b
parentf5cf5e139ec736f1ac18cda8d5e5fc80d7eef787 (diff)
af_unix: Use unix_recvq_full_lockless() in unix_stream_connect().
[ Upstream commit 45d872f0e65593176d880ec148f41ad7c02e40a7 ] Once sk->sk_state is changed to TCP_LISTEN, it never changes. unix_accept() takes advantage of this characteristics; it does not hold the listener's unix_state_lock() and only acquires recvq lock to pop one skb. It means unix_state_lock() does not prevent the queue length from changing in unix_stream_connect(). Thus, we need to use unix_recvq_full_lockless() to avoid data-race. Now we remove unix_recvq_full() as no one uses it. Note that we can remove READ_ONCE() for sk->sk_max_ack_backlog in unix_recvq_full_lockless() because of the following reasons: (1) For SOCK_DGRAM, it is a written-once field in unix_create1() (2) For SOCK_STREAM and SOCK_SEQPACKET, it is changed under the listener's unix_state_lock() in unix_listen(), and we hold the lock in unix_stream_connect() Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--net/unix/af_unix.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 4a4b6d254453..dfcafbb8cd0e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -194,15 +194,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
-static inline int unix_recvq_full(const struct sock *sk)
-{
- return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
-}
-
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
- return skb_queue_len_lockless(&sk->sk_receive_queue) >
- READ_ONCE(sk->sk_max_ack_backlog);
+ return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
struct sock *unix_peer_get(struct sock *s)
@@ -1306,7 +1300,7 @@ restart:
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
- if (unix_recvq_full(other)) {
+ if (unix_recvq_full_lockless(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;