diff options
-rw-r--r-- | .mailmap | 1 | ||||
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | drivers/vhost/net.c | 40 | ||||
-rw-r--r-- | drivers/vhost/scsi.c | 2 | ||||
-rw-r--r-- | include/linux/virtio_config.h | 11 | ||||
-rw-r--r-- | include/uapi/linux/vduse.h | 2 | ||||
-rw-r--r-- | kernel/vhost_task.c | 3 |
7 files changed, 29 insertions, 32 deletions
@@ -623,6 +623,7 @@ Paulo Alcantara <pc@manguebit.org> <palcantara@suse.com> Paulo Alcantara <pc@manguebit.org> <pc@manguebit.com> Pavankumar Kondeti <quic_pkondeti@quicinc.com> <pkondeti@codeaurora.org> Peter A Jonsson <pj@ludd.ltu.se> +Peter Hilber <peter.hilber@oss.qualcomm.com> <quic_philber@quicinc.com> Peter Oruba <peter.oruba@amd.com> Peter Oruba <peter@oruba.de> Pierre-Louis Bossart <pierre-louis.bossart@linux.dev> <pierre-louis.bossart@linux.intel.com> diff --git a/MAINTAINERS b/MAINTAINERS index 0e897f2ba9eb..7ab92f471cb8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26785,7 +26785,7 @@ F: drivers/nvdimm/nd_virtio.c F: drivers/nvdimm/virtio_pmem.c VIRTIO RTC DRIVER -M: Peter Hilber <quic_philber@quicinc.com> +M: Peter Hilber <peter.hilber@oss.qualcomm.com> L: virtualization@lists.linux.dev S: Maintained F: drivers/virtio/virtio_rtc_* diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c6508fe0d5c8..35ded4330431 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -765,11 +765,11 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) int err; int sent_pkts = 0; bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); - bool busyloop_intr; bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); do { - busyloop_intr = false; + bool busyloop_intr = false; + if (nvq->done_idx == VHOST_NET_BATCH) vhost_tx_batch(net, nvq, sock, &msg); @@ -780,10 +780,18 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { - /* Kicks are disabled at this point, break loop and - * process any remaining batched packets. Queue will - * be re-enabled afterwards. + /* Flush batched packets to handle pending RX + * work (if busyloop_intr is set) and to avoid + * unnecessary virtqueue kicks. */ + vhost_tx_batch(net, nvq, sock, &msg); + if (unlikely(busyloop_intr)) { + vhost_poll_queue(&vq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, + vq))) { + vhost_disable_notify(&net->dev, vq); + continue; + } break; } @@ -839,22 +847,7 @@ done: ++nvq->done_idx; } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); - /* Kicks are still disabled, dispatch any remaining batched msgs. */ vhost_tx_batch(net, nvq, sock, &msg); - - if (unlikely(busyloop_intr)) - /* If interrupted while doing busy polling, requeue the - * handler to be fair handle_rx as well as other tasks - * waiting on cpu. - */ - vhost_poll_queue(&vq->poll); - else - /* All of our work has been completed; however, before - * leaving the TX handler, do one last check for work, - * and requeue handler if necessary. If there is no work, - * queue will be reenabled. - */ - vhost_net_busy_poll_try_queue(net, vq); } static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) @@ -1014,7 +1007,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) } static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, - bool *busyloop_intr, unsigned int count) + bool *busyloop_intr, unsigned int *count) { struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; @@ -1024,7 +1017,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, if (!len && rvq->busyloop_timeout) { /* Flush batched heads first */ - vhost_net_signal_used(rnvq, count); + vhost_net_signal_used(rnvq, *count); + *count = 0; /* Both tx vq and rx socket were polled here */ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true); @@ -1180,7 +1174,7 @@ static void handle_rx(struct vhost_net *net) do { sock_len = vhost_net_rx_peek_head_len(net, sock->sk, - &busyloop_intr, count); + &busyloop_intr, &count); if (!sock_len) break; sock_len += sock_hlen; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index abf51332a5c5..98e4f68f4e3c 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -2884,7 +2884,7 @@ vhost_scsi_make_tport(struct target_fabric_configfs *tf, check_len: if (strlen(name) >= VHOST_SCSI_NAMELEN) { pr_err("Emulated %s Address: %s, exceeds" - " max: %d\n", name, vhost_scsi_dump_proto_id(tport), + " max: %d\n", vhost_scsi_dump_proto_id(tport), name, VHOST_SCSI_NAMELEN); kfree(tport); return ERR_PTR(-EINVAL); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 8bf156dde554..7427b79d6f3d 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -193,14 +193,15 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, } static inline void virtio_get_features(struct virtio_device *vdev, - u64 *features) + u64 *features_out) { if (vdev->config->get_extended_features) { - vdev->config->get_extended_features(vdev, features); + vdev->config->get_extended_features(vdev, features_out); return; } - virtio_features_from_u64(features, vdev->config->get_features(vdev)); + virtio_features_from_u64(features_out, + vdev->config->get_features(vdev)); } /** @@ -326,11 +327,11 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) static inline bool virtio_get_shm_region(struct virtio_device *vdev, - struct virtio_shm_region *region, u8 id) + struct virtio_shm_region *region_out, u8 id) { if (!vdev->config->get_shm_region) return false; - return vdev->config->get_shm_region(vdev, region, id); + return vdev->config->get_shm_region(vdev, region_out, id); } static inline bool virtio_is_little_endian(struct virtio_device *vdev) diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h index 68a627d04afa..10ad71aa00d6 100644 --- a/include/uapi/linux/vduse.h +++ b/include/uapi/linux/vduse.h @@ -237,7 +237,7 @@ struct vduse_iova_umem { * struct vduse_iova_info - information of one IOVA region * @start: start of the IOVA region * @last: last of the IOVA region - * @capability: capability of the IOVA regsion + * @capability: capability of the IOVA region * @reserved: for future use, needs to be initialized to zero * * Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index bc738fa90c1d..27107dcc1cbf 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -100,6 +100,7 @@ void vhost_task_stop(struct vhost_task *vtsk) * freeing it below. */ wait_for_completion(&vtsk->exited); + put_task_struct(vtsk->task); kfree(vtsk); } EXPORT_SYMBOL_GPL(vhost_task_stop); @@ -148,7 +149,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), return ERR_CAST(tsk); } - vtsk->task = tsk; + vtsk->task = get_task_struct(tsk); return vtsk; } EXPORT_SYMBOL_GPL(vhost_task_create); |