diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.h')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem.h | 137 | 
1 files changed, 113 insertions, 24 deletions
| diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index a1bf741b9b89..b3a0a880cbab 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -64,7 +64,6 @@ struct msm_gem_object {  	 *  	 */  	struct list_head mm_list; -	struct msm_gpu *gpu;     /* non-null if active */  	/* Transiently in the process of submit ioctl, objects associated  	 * with the submit are on submit->bo_list.. this only lasts for @@ -85,50 +84,124 @@ struct msm_gem_object {  	 * an IOMMU.  Also used for stolen/splashscreen buffer.  	 */  	struct drm_mm_node *vram_node; -	struct mutex lock; /* Protects resources associated with bo */  	char name[32]; /* Identifier to print for the debugfs files */ -	atomic_t active_count; +	int active_count;  };  #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) +int msm_gem_mmap_obj(struct drm_gem_object *obj, +			struct vm_area_struct *vma); +int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); +uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); +int msm_gem_get_iova(struct drm_gem_object *obj, +		struct msm_gem_address_space *aspace, uint64_t *iova); +int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, +		struct msm_gem_address_space *aspace, uint64_t *iova, +		u64 range_start, u64 range_end); +int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, +		struct msm_gem_address_space *aspace, uint64_t *iova); +int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, +		struct msm_gem_address_space *aspace, uint64_t *iova); +uint64_t msm_gem_iova(struct drm_gem_object *obj, +		struct msm_gem_address_space *aspace); +void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, +		struct msm_gem_address_space *aspace); +void msm_gem_unpin_iova(struct drm_gem_object *obj, +		struct msm_gem_address_space *aspace); +struct page **msm_gem_get_pages(struct drm_gem_object *obj); +void msm_gem_put_pages(struct drm_gem_object *obj); +int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, +		struct drm_mode_create_dumb *args); +int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, +		uint32_t handle, uint64_t *offset); +void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); +void *msm_gem_get_vaddr(struct drm_gem_object *obj); +void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); +void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); +void msm_gem_put_vaddr(struct drm_gem_object *obj); +int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); +int msm_gem_sync_object(struct drm_gem_object *obj, +		struct msm_fence_context *fctx, bool exclusive); +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); +void msm_gem_active_put(struct drm_gem_object *obj); +int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); +int msm_gem_cpu_fini(struct drm_gem_object *obj); +void msm_gem_free_object(struct drm_gem_object *obj); +int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, +		uint32_t size, uint32_t flags, uint32_t *handle, char *name); +struct drm_gem_object *msm_gem_new(struct drm_device *dev, +		uint32_t size, uint32_t flags); +struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, +		uint32_t size, uint32_t flags); +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, +		uint32_t flags, struct msm_gem_address_space *aspace, +		struct drm_gem_object **bo, uint64_t *iova); +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, +		uint32_t flags, struct msm_gem_address_space *aspace, +		struct drm_gem_object **bo, uint64_t *iova); +void msm_gem_kernel_put(struct drm_gem_object *bo, +		struct msm_gem_address_space *aspace, bool locked); +struct drm_gem_object *msm_gem_import(struct drm_device *dev, +		struct dma_buf *dmabuf, struct sg_table *sgt); +__printf(2, 3) +void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); +#ifdef CONFIG_DEBUG_FS +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); +void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); +#endif + +static inline void +msm_gem_lock(struct drm_gem_object *obj) +{ +	dma_resv_lock(obj->resv, NULL); +} + +static inline bool __must_check +msm_gem_trylock(struct drm_gem_object *obj) +{ +	return dma_resv_trylock(obj->resv); +} + +static inline int +msm_gem_lock_interruptible(struct drm_gem_object *obj) +{ +	return dma_resv_lock_interruptible(obj->resv, NULL); +} + +static inline void +msm_gem_unlock(struct drm_gem_object *obj) +{ +	dma_resv_unlock(obj->resv); +} + +static inline bool +msm_gem_is_locked(struct drm_gem_object *obj) +{ +	return dma_resv_is_locked(obj->resv); +} +  static inline bool is_active(struct msm_gem_object *msm_obj)  { -	return atomic_read(&msm_obj->active_count); +	WARN_ON(!msm_gem_is_locked(&msm_obj->base)); +	return msm_obj->active_count;  }  static inline bool is_purgeable(struct msm_gem_object *msm_obj)  { -	WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));  	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&  			!msm_obj->base.dma_buf && !msm_obj->base.import_attach;  }  static inline bool is_vunmapable(struct msm_gem_object *msm_obj)  { +	WARN_ON(!msm_gem_is_locked(&msm_obj->base));  	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;  } -/* The shrinker can be triggered while we hold objA->lock, and need - * to grab objB->lock to purge it.  Lockdep just sees these as a single - * class of lock, so we use subclasses to teach it the difference. - * - * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and - * OBJ_LOCK_SHRINKER is used by shrinker. - * - * It is *essential* that we never go down paths that could trigger the - * shrinker for a purgable object.  This is ensured by checking that - * msm_obj->madv == MSM_MADV_WILLNEED. - */ -enum msm_gem_lock { -	OBJ_LOCK_NORMAL, -	OBJ_LOCK_SHRINKER, -}; - -void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass); -void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass); -void msm_gem_free_work(struct work_struct *work); +void msm_gem_purge(struct drm_gem_object *obj); +void msm_gem_vunmap(struct drm_gem_object *obj);  /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,   * associated with the cmdstream submission for synchronization (and @@ -136,6 +209,7 @@ void msm_gem_free_work(struct work_struct *work);   * lasts for the duration of the submit-ioctl.   */  struct msm_gem_submit { +	struct kref ref;  	struct drm_device *dev;  	struct msm_gpu *gpu;  	struct msm_gem_address_space *aspace; @@ -157,7 +231,10 @@ struct msm_gem_submit {  		uint32_t type;  		uint32_t size;  /* in dwords */  		uint64_t iova; +		uint32_t offset;/* in dwords */  		uint32_t idx;   /* cmdstream buffer idx in bos[] */ +		uint32_t nr_relocs; +		struct drm_msm_gem_submit_reloc *relocs;  	} *cmd;  /* array of size nr_cmds */  	struct {  		uint32_t flags; @@ -169,6 +246,18 @@ struct msm_gem_submit {  	} bos[];  }; +void __msm_gem_submit_destroy(struct kref *kref); + +static inline void msm_gem_submit_get(struct msm_gem_submit *submit) +{ +	kref_get(&submit->ref); +} + +static inline void msm_gem_submit_put(struct msm_gem_submit *submit) +{ +	kref_put(&submit->ref, __msm_gem_submit_destroy); +} +  /* helper to determine of a buffer in submit should be dumped, used for both   * devcoredump and debugfs cmdstream dumping:   */ | 
