diff options
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_submit.c')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_submit.c | 256 | 
1 files changed, 244 insertions, 12 deletions
| diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c index cf3c04b16a7a..3c00135ead45 100644 --- a/drivers/gpu/drm/virtio/virtgpu_submit.c +++ b/drivers/gpu/drm/virtio/virtgpu_submit.c @@ -14,11 +14,24 @@  #include <linux/uaccess.h>  #include <drm/drm_file.h> +#include <drm/drm_syncobj.h>  #include <drm/virtgpu_drm.h>  #include "virtgpu_drv.h" +struct virtio_gpu_submit_post_dep { +	struct drm_syncobj *syncobj; +	struct dma_fence_chain *chain; +	u64 point; +}; +  struct virtio_gpu_submit { +	struct virtio_gpu_submit_post_dep *post_deps; +	unsigned int num_out_syncobjs; + +	struct drm_syncobj **in_syncobjs; +	unsigned int num_in_syncobjs; +  	struct virtio_gpu_object_array *buflist;  	struct drm_virtgpu_execbuffer *exbuf;  	struct virtio_gpu_fence *out_fence; @@ -59,18 +72,211 @@ static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,  	return 0;  } +static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs, +				     u32 nr_syncobjs) +{ +	u32 i = nr_syncobjs; + +	while (i--) { +		if (syncobjs[i]) +			drm_syncobj_put(syncobjs[i]); +	} + +	kvfree(syncobjs); +} + +static int +virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) +{ +	struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; +	struct drm_virtgpu_execbuffer_syncobj syncobj_desc; +	size_t syncobj_stride = exbuf->syncobj_stride; +	u32 num_in_syncobjs = exbuf->num_in_syncobjs; +	struct drm_syncobj **syncobjs; +	int ret = 0, i; + +	if (!num_in_syncobjs) +		return 0; + +	/* +	 * kvalloc at first tries to allocate memory using kmalloc and +	 * falls back to vmalloc only on failure. It also uses __GFP_NOWARN +	 * internally for allocations larger than a page size, preventing +	 * storm of KMSG warnings. +	 */ +	syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL); +	if (!syncobjs) +		return -ENOMEM; + +	for (i = 0; i < num_in_syncobjs; i++) { +		u64 address = exbuf->in_syncobjs + i * syncobj_stride; +		struct dma_fence *fence; + +		memset(&syncobj_desc, 0, sizeof(syncobj_desc)); + +		if (copy_from_user(&syncobj_desc, +				   u64_to_user_ptr(address), +				   min(syncobj_stride, sizeof(syncobj_desc)))) { +			ret = -EFAULT; +			break; +		} + +		if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) { +			ret = -EINVAL; +			break; +		} + +		ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle, +					     syncobj_desc.point, 0, &fence); +		if (ret) +			break; + +		ret = virtio_gpu_dma_fence_wait(submit, fence); + +		dma_fence_put(fence); +		if (ret) +			break; + +		if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) { +			syncobjs[i] = drm_syncobj_find(submit->file, +						       syncobj_desc.handle); +			if (!syncobjs[i]) { +				ret = -EINVAL; +				break; +			} +		} +	} + +	if (ret) { +		virtio_gpu_free_syncobjs(syncobjs, i); +		return ret; +	} + +	submit->num_in_syncobjs = num_in_syncobjs; +	submit->in_syncobjs = syncobjs; + +	return ret; +} + +static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs, +				      u32 nr_syncobjs) +{ +	u32 i; + +	for (i = 0; i < nr_syncobjs; i++) { +		if (syncobjs[i]) +			drm_syncobj_replace_fence(syncobjs[i], NULL); +	} +} + +static void +virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps, +			  u32 nr_syncobjs) +{ +	u32 i = nr_syncobjs; + +	while (i--) { +		kfree(post_deps[i].chain); +		drm_syncobj_put(post_deps[i].syncobj); +	} + +	kvfree(post_deps); +} + +static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit) +{ +	struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; +	struct drm_virtgpu_execbuffer_syncobj syncobj_desc; +	struct virtio_gpu_submit_post_dep *post_deps; +	u32 num_out_syncobjs = exbuf->num_out_syncobjs; +	size_t syncobj_stride = exbuf->syncobj_stride; +	int ret = 0, i; + +	if (!num_out_syncobjs) +		return 0; + +	post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL); +	if (!post_deps) +		return -ENOMEM; + +	for (i = 0; i < num_out_syncobjs; i++) { +		u64 address = exbuf->out_syncobjs + i * syncobj_stride; + +		memset(&syncobj_desc, 0, sizeof(syncobj_desc)); + +		if (copy_from_user(&syncobj_desc, +				   u64_to_user_ptr(address), +				   min(syncobj_stride, sizeof(syncobj_desc)))) { +			ret = -EFAULT; +			break; +		} + +		post_deps[i].point = syncobj_desc.point; + +		if (syncobj_desc.flags) { +			ret = -EINVAL; +			break; +		} + +		if (syncobj_desc.point) { +			post_deps[i].chain = dma_fence_chain_alloc(); +			if (!post_deps[i].chain) { +				ret = -ENOMEM; +				break; +			} +		} + +		post_deps[i].syncobj = drm_syncobj_find(submit->file, +							syncobj_desc.handle); +		if (!post_deps[i].syncobj) { +			kfree(post_deps[i].chain); +			ret = -EINVAL; +			break; +		} +	} + +	if (ret) { +		virtio_gpu_free_post_deps(post_deps, i); +		return ret; +	} + +	submit->num_out_syncobjs = num_out_syncobjs; +	submit->post_deps = post_deps; + +	return 0; +} + +static void +virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit) +{ +	struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps; + +	if (post_deps) { +		struct dma_fence *fence = &submit->out_fence->f; +		u32 i; + +		for (i = 0; i < submit->num_out_syncobjs; i++) { +			if (post_deps[i].chain) { +				drm_syncobj_add_point(post_deps[i].syncobj, +						      post_deps[i].chain, +						      fence, post_deps[i].point); +				post_deps[i].chain = NULL; +			} else { +				drm_syncobj_replace_fence(post_deps[i].syncobj, +							  fence); +			} +		} +	} +} +  static int virtio_gpu_fence_event_create(struct drm_device *dev,  					 struct drm_file *file,  					 struct virtio_gpu_fence *fence,  					 u32 ring_idx)  { -	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;  	struct virtio_gpu_fence_event *e = NULL;  	int ret; -	if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx))) -		return 0; -  	e = kzalloc(sizeof(*e), GFP_KERNEL);  	if (!e)  		return -ENOMEM; @@ -122,6 +328,10 @@ static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)  static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)  { +	virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs); +	virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs); +	virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs); +  	if (!IS_ERR(submit->buf))  		kvfree(submit->buf); @@ -164,18 +374,31 @@ static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,  	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;  	struct virtio_gpu_device *vgdev = dev->dev_private;  	struct virtio_gpu_fence *out_fence; +	bool drm_fence_event;  	int err;  	memset(submit, 0, sizeof(*submit)); -	out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx); -	if (!out_fence) -		return -ENOMEM; - -	err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx); -	if (err) { -		dma_fence_put(&out_fence->f); -		return err; +	if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) && +	    (vfpriv->ring_idx_mask & BIT_ULL(ring_idx))) +		drm_fence_event = true; +	else +		drm_fence_event = false; + +	if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) || +	    exbuf->num_out_syncobjs || +	    exbuf->num_bo_handles || +	    drm_fence_event) +		out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx); +	else +		out_fence = NULL; + +	if (drm_fence_event) { +		err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx); +		if (err) { +			dma_fence_put(&out_fence->f); +			return err; +		}  	}  	submit->out_fence = out_fence; @@ -283,6 +506,14 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  	if (ret)  		goto cleanup; +	ret = virtio_gpu_parse_post_deps(&submit); +	if (ret) +		goto cleanup; + +	ret = virtio_gpu_parse_deps(&submit); +	if (ret) +		goto cleanup; +  	/*  	 * Await in-fences in the end of the job submission path to  	 * optimize the path by proceeding directly to the submission @@ -303,6 +534,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  	 * the job submission path.  	 */  	virtio_gpu_install_out_fence_fd(&submit); +	virtio_gpu_process_post_deps(&submit);  	virtio_gpu_complete_submit(&submit);  cleanup:  	virtio_gpu_cleanup_submit(&submit); | 
