diff options
Diffstat (limited to 'io_uring/net.c')
| -rw-r--r-- | io_uring/net.c | 135 | 
1 files changed, 50 insertions, 85 deletions
| diff --git a/io_uring/net.c b/io_uring/net.c index 8944eb679024..24040bc3916a 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -97,6 +97,11 @@ struct io_recvzc {  	struct io_zcrx_ifq		*ifq;  }; +static int io_sg_from_iter_iovec(struct sk_buff *skb, +				 struct iov_iter *from, size_t length); +static int io_sg_from_iter(struct sk_buff *skb, +			   struct iov_iter *from, size_t length); +  int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  {  	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); @@ -176,16 +181,6 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)  	return hdr;  } -/* assign new iovec to kmsg, if we need to */ -static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg, -			     struct iovec *iov) -{ -	if (iov) { -		req->flags |= REQ_F_NEED_CLEANUP; -		io_vec_reset_iovec(&kmsg->vec, iov, kmsg->msg.msg_iter.nr_segs); -	} -} -  static inline void io_mshot_prep_retry(struct io_kiocb *req,  				       struct io_async_msghdr *kmsg)  { @@ -217,7 +212,11 @@ static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg  			     &iomsg->msg.msg_iter, io_is_compat(req->ctx));  	if (unlikely(ret < 0))  		return ret; -	io_net_vec_assign(req, iomsg, iov); + +	if (iov) { +		req->flags |= REQ_F_NEED_CLEANUP; +		io_vec_reset_iovec(&iomsg->vec, iov, iomsg->msg.msg_iter.nr_segs); +	}  	return 0;  } @@ -325,25 +324,6 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,  	return 0;  } -static int io_sendmsg_copy_hdr(struct io_kiocb *req, -			       struct io_async_msghdr *iomsg) -{ -	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); -	struct user_msghdr msg; -	int ret; - -	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE, NULL); -	if (unlikely(ret)) -		return ret; - -	if (!(req->flags & REQ_F_BUFFER_SELECT)) -		ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen, -					ITER_SOURCE); -	/* save msg_control as sys_sendmsg() overwrites it */ -	sr->msg_control = iomsg->msg.msg_control_user; -	return ret; -} -  void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)  {  	struct io_async_msghdr *io = req->async_data; @@ -379,6 +359,8 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)  		kmsg->msg.msg_name = &kmsg->addr;  		kmsg->msg.msg_namelen = addr_len;  	} +	if (sr->flags & IORING_RECVSEND_FIXED_BUF) +		return 0;  	if (!io_do_buffer_select(req)) {  		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,  				  &kmsg->msg.msg_iter); @@ -392,31 +374,24 @@ static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe  {  	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);  	struct io_async_msghdr *kmsg = req->async_data; - -	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); - -	return io_sendmsg_copy_hdr(req, kmsg); -} - -static int io_sendmsg_zc_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ -	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); -	struct io_async_msghdr *kmsg = req->async_data;  	struct user_msghdr msg;  	int ret; -	if (!(sr->flags & IORING_RECVSEND_FIXED_BUF)) -		return io_sendmsg_setup(req, sqe); -  	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); -  	ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL);  	if (unlikely(ret))  		return ret; +	/* save msg_control as sys_sendmsg() overwrites it */  	sr->msg_control = kmsg->msg.msg_control_user; -	kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen; -	return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, msg.msg_iovlen); +	if (sr->flags & IORING_RECVSEND_FIXED_BUF) { +		kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen; +		return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, +					 msg.msg_iovlen); +	} +	if (req->flags & REQ_F_BUFFER_SELECT) +		return 0; +	return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);  }  #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE) @@ -427,12 +402,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  	sr->done_io = 0;  	sr->retry = false; - -	if (req->opcode != IORING_OP_SEND) { -		if (sqe->addr2 || sqe->file_index) -			return -EINVAL; -	} -  	sr->len = READ_ONCE(sqe->len);  	sr->flags = READ_ONCE(sqe->ioprio);  	if (sr->flags & ~SENDMSG_FLAGS) @@ -458,6 +427,8 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  		return -ENOMEM;  	if (req->opcode != IORING_OP_SENDMSG)  		return io_send_setup(req, sqe); +	if (unlikely(sqe->addr2 || sqe->file_index)) +		return -EINVAL;  	return io_sendmsg_setup(req, sqe);  } @@ -1302,11 +1273,12 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  {  	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);  	struct io_ring_ctx *ctx = req->ctx; +	struct io_async_msghdr *iomsg;  	struct io_kiocb *notif; +	int ret;  	zc->done_io = 0;  	zc->retry = false; -	req->flags |= REQ_F_POLL_NO_LAZY;  	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))  		return -EINVAL; @@ -1320,7 +1292,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  	notif->cqe.user_data = req->cqe.user_data;  	notif->cqe.res = 0;  	notif->cqe.flags = IORING_CQE_F_NOTIF; -	req->flags |= REQ_F_NEED_CLEANUP; +	req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY;  	zc->flags = READ_ONCE(sqe->ioprio);  	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { @@ -1335,11 +1307,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  		}  	} -	if (req->opcode != IORING_OP_SEND_ZC) { -		if (unlikely(sqe->addr2 || sqe->file_index)) -			return -EINVAL; -	} -  	zc->len = READ_ONCE(sqe->len);  	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;  	req->buf_index = READ_ONCE(sqe->buf_index); @@ -1349,13 +1316,28 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  	if (io_is_compat(req->ctx))  		zc->msg_flags |= MSG_CMSG_COMPAT; -	if (unlikely(!io_msg_alloc_async(req))) +	iomsg = io_msg_alloc_async(req); +	if (unlikely(!iomsg))  		return -ENOMEM; +  	if (req->opcode == IORING_OP_SEND_ZC) { -		req->flags |= REQ_F_IMPORT_BUFFER; -		return io_send_setup(req, sqe); +		if (zc->flags & IORING_RECVSEND_FIXED_BUF) +			req->flags |= REQ_F_IMPORT_BUFFER; +		ret = io_send_setup(req, sqe); +	} else { +		if (unlikely(sqe->addr2 || sqe->file_index)) +			return -EINVAL; +		ret = io_sendmsg_setup(req, sqe); +	} +	if (unlikely(ret)) +		return ret; + +	if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) { +		iomsg->msg.sg_from_iter = io_sg_from_iter_iovec; +		return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count);  	} -	return io_sendmsg_zc_setup(req, sqe); +	iomsg->msg.sg_from_iter = io_sg_from_iter; +	return 0;  }  static int io_sg_from_iter_iovec(struct sk_buff *skb, @@ -1412,27 +1394,13 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)  {  	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);  	struct io_async_msghdr *kmsg = req->async_data; -	int ret; -	if (sr->flags & IORING_RECVSEND_FIXED_BUF) { -		sr->notif->buf_index = req->buf_index; -		ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter, -					(u64)(uintptr_t)sr->buf, sr->len, -					ITER_SOURCE, issue_flags); -		if (unlikely(ret)) -			return ret; -		kmsg->msg.sg_from_iter = io_sg_from_iter; -	} else { -		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); -		if (unlikely(ret)) -			return ret; -		ret = io_notif_account_mem(sr->notif, sr->len); -		if (unlikely(ret)) -			return ret; -		kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; -	} +	WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF)); -	return ret; +	sr->notif->buf_index = req->buf_index; +	return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter, +				(u64)(uintptr_t)sr->buf, sr->len, +				ITER_SOURCE, issue_flags);  }  int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) @@ -1513,8 +1481,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)  	unsigned flags;  	int ret, min_ret = 0; -	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; -  	if (req->flags & REQ_F_IMPORT_BUFFER) {  		unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;  		int ret; @@ -1523,7 +1489,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)  					&kmsg->vec, uvec_segs, issue_flags);  		if (unlikely(ret))  			return ret; -		kmsg->msg.sg_from_iter = io_sg_from_iter;  		req->flags &= ~REQ_F_IMPORT_BUFFER;  	} | 
