diff options
Diffstat (limited to 'drivers/gpu/drm/drm_syncobj.c')
| -rw-r--r-- | drivers/gpu/drm/drm_syncobj.c | 154 | 
1 files changed, 145 insertions, 9 deletions
| diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 0c2be8360525..f7003d1ec5ef 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -136,6 +136,10 @@   * requirement is inherited from the wait-before-signal behavior required by   * the Vulkan timeline semaphore API.   * + * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without + * blocking: an eventfd will be signaled when the syncobj is. This is useful to + * integrate the wait in an event loop. + *   *   * Import/export of syncobjs   * ------------------------- @@ -185,6 +189,7 @@  #include <linux/anon_inodes.h>  #include <linux/dma-fence-unwrap.h> +#include <linux/eventfd.h>  #include <linux/file.h>  #include <linux/fs.h>  #include <linux/sched/signal.h> @@ -212,6 +217,20 @@ struct syncobj_wait_entry {  static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,  				      struct syncobj_wait_entry *wait); +struct syncobj_eventfd_entry { +	struct list_head node; +	struct dma_fence *fence; +	struct dma_fence_cb fence_cb; +	struct drm_syncobj *syncobj; +	struct eventfd_ctx *ev_fd_ctx; +	u64 point; +	u32 flags; +}; + +static void +syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, +			   struct syncobj_eventfd_entry *entry); +  /**   * drm_syncobj_find - lookup and reference a sync object.   * @file_private: drm file private pointer @@ -274,6 +293,28 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,  	spin_unlock(&syncobj->lock);  } +static void +syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry) +{ +	eventfd_ctx_put(entry->ev_fd_ctx); +	dma_fence_put(entry->fence); +	/* This happens either inside the syncobj lock, or after the node has +	 * already been removed from the list. +	 */ +	list_del(&entry->node); +	kfree(entry); +} + +static void +drm_syncobj_add_eventfd(struct drm_syncobj *syncobj, +			struct syncobj_eventfd_entry *entry) +{ +	spin_lock(&syncobj->lock); +	list_add_tail(&entry->node, &syncobj->ev_fd_list); +	syncobj_eventfd_entry_func(syncobj, entry); +	spin_unlock(&syncobj->lock); +} +  /**   * drm_syncobj_add_point - add new timeline point to the syncobj   * @syncobj: sync object to add timeline point do @@ -288,7 +329,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,  			   struct dma_fence *fence,  			   uint64_t point)  { -	struct syncobj_wait_entry *cur, *tmp; +	struct syncobj_wait_entry *wait_cur, *wait_tmp; +	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;  	struct dma_fence *prev;  	dma_fence_get(fence); @@ -302,8 +344,10 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,  	dma_fence_chain_init(chain, prev, fence, point);  	rcu_assign_pointer(syncobj->fence, &chain->base); -	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) -		syncobj_wait_syncobj_func(syncobj, cur); +	list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node) +		syncobj_wait_syncobj_func(syncobj, wait_cur); +	list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) +		syncobj_eventfd_entry_func(syncobj, ev_fd_cur);  	spin_unlock(&syncobj->lock);  	/* Walk the chain once to trigger garbage collection */ @@ -323,7 +367,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,  			       struct dma_fence *fence)  {  	struct dma_fence *old_fence; -	struct syncobj_wait_entry *cur, *tmp; +	struct syncobj_wait_entry *wait_cur, *wait_tmp; +	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;  	if (fence)  		dma_fence_get(fence); @@ -335,8 +380,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,  	rcu_assign_pointer(syncobj->fence, fence);  	if (fence != old_fence) { -		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) -			syncobj_wait_syncobj_func(syncobj, cur); +		list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node) +			syncobj_wait_syncobj_func(syncobj, wait_cur); +		list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) +			syncobj_eventfd_entry_func(syncobj, ev_fd_cur);  	}  	spin_unlock(&syncobj->lock); @@ -353,10 +400,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);   */  static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)  { -	struct dma_fence *fence = dma_fence_allocate_private_stub(); +	struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get()); -	if (IS_ERR(fence)) -		return PTR_ERR(fence); +	if (!fence) +		return -ENOMEM;  	drm_syncobj_replace_fence(syncobj, fence);  	dma_fence_put(fence); @@ -472,7 +519,13 @@ void drm_syncobj_free(struct kref *kref)  	struct drm_syncobj *syncobj = container_of(kref,  						   struct drm_syncobj,  						   refcount); +	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp; +  	drm_syncobj_replace_fence(syncobj, NULL); + +	list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node) +		syncobj_eventfd_entry_free(ev_fd_cur); +  	kfree(syncobj);  }  EXPORT_SYMBOL(drm_syncobj_free); @@ -501,6 +554,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,  	kref_init(&syncobj->refcount);  	INIT_LIST_HEAD(&syncobj->cb_list); +	INIT_LIST_HEAD(&syncobj->ev_fd_list);  	spin_lock_init(&syncobj->lock);  	if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { @@ -1304,6 +1358,88 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,  	return ret;  } +static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence, +					     struct dma_fence_cb *cb) +{ +	struct syncobj_eventfd_entry *entry = +		container_of(cb, struct syncobj_eventfd_entry, fence_cb); + +	eventfd_signal(entry->ev_fd_ctx, 1); +	syncobj_eventfd_entry_free(entry); +} + +static void +syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, +			   struct syncobj_eventfd_entry *entry) +{ +	int ret; +	struct dma_fence *fence; + +	/* This happens inside the syncobj lock */ +	fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); +	ret = dma_fence_chain_find_seqno(&fence, entry->point); +	if (ret != 0 || !fence) { +		dma_fence_put(fence); +		return; +	} + +	list_del_init(&entry->node); +	entry->fence = fence; + +	if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) { +		eventfd_signal(entry->ev_fd_ctx, 1); +		syncobj_eventfd_entry_free(entry); +	} else { +		ret = dma_fence_add_callback(fence, &entry->fence_cb, +					     syncobj_eventfd_entry_fence_func); +		if (ret == -ENOENT) { +			eventfd_signal(entry->ev_fd_ctx, 1); +			syncobj_eventfd_entry_free(entry); +		} +	} +} + +int +drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, +			  struct drm_file *file_private) +{ +	struct drm_syncobj_eventfd *args = data; +	struct drm_syncobj *syncobj; +	struct eventfd_ctx *ev_fd_ctx; +	struct syncobj_eventfd_entry *entry; + +	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) +		return -EOPNOTSUPP; + +	if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) +		return -EINVAL; + +	if (args->pad) +		return -EINVAL; + +	syncobj = drm_syncobj_find(file_private, args->handle); +	if (!syncobj) +		return -ENOENT; + +	ev_fd_ctx = eventfd_ctx_fdget(args->fd); +	if (IS_ERR(ev_fd_ctx)) +		return PTR_ERR(ev_fd_ctx); + +	entry = kzalloc(sizeof(*entry), GFP_KERNEL); +	if (!entry) { +		eventfd_ctx_put(ev_fd_ctx); +		return -ENOMEM; +	} +	entry->syncobj = syncobj; +	entry->ev_fd_ctx = ev_fd_ctx; +	entry->point = args->point; +	entry->flags = args->flags; + +	drm_syncobj_add_eventfd(syncobj, entry); +	drm_syncobj_put(syncobj); + +	return 0; +}  int  drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, | 
