diff options
| author | Dave Airlie <airlied@redhat.com> | 2023-10-13 11:04:53 +1000 | 
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2023-10-13 11:05:40 +1000 | 
| commit | dcad98b140554c325fa2ec7d42311edc7a79cdbb (patch) | |
| tree | 66c0216a655edab6c7ac45bbbbc0d90b9fe623f6 /drivers | |
| parent | 94f6f0550c625fab1f373bb86a6669b45e9748b3 (diff) | |
| parent | c1165df2be2fffe3adeeaa68f4ee4325108c5e4e (diff) | |
Merge tag 'drm-misc-fixes-2023-10-12' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes
Short summary of fixes pull:
 * atomic-helper: Relax checks for unregistered connectors
 * dma-buf: Work around race condition when retrieving fence timestamp
 * gem: Avoid OOB access in BO memory range
 * panel:
   * boe-tv101wun-ml6: Fix flickering
 * simpledrm: Fix error output
 * vwmgfx:
   * Fix size calculation in texture-state code
   * Ref GEM BOs in surfaces
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20231012111638.GA25037@linux-uq9g
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/dma-buf/dma-fence-unwrap.c | 13 | ||||
| -rw-r--r-- | drivers/dma-buf/sync_file.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/drm_atomic_helper.c | 17 | ||||
| -rw-r--r-- | drivers/gpu/drm/drm_gem.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/tiny/simpledrm.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 17 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 12 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 18 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 12 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 31 | 
18 files changed, 96 insertions, 76 deletions
| diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c index c625bb2b5d56..628af51c81af 100644 --- a/drivers/dma-buf/dma-fence-unwrap.c +++ b/drivers/dma-buf/dma-fence-unwrap.c @@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,  		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {  			if (!dma_fence_is_signaled(tmp)) {  				++count; -			} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, -					    &tmp->flags)) { -				if (ktime_after(tmp->timestamp, timestamp)) -					timestamp = tmp->timestamp;  			} else { -				/* -				 * Use the current time if the fence is -				 * currently signaling. -				 */ -				timestamp = ktime_get(); +				ktime_t t = dma_fence_timestamp(tmp); + +				if (ktime_after(t, timestamp)) +					timestamp = t;  			}  		}  	} diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index af57799c86ce..2e9a316c596a 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,  		sizeof(info->driver_name));  	info->status = dma_fence_get_status(fence); -	while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && -	       !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) -		cpu_relax();  	info->timestamp_ns = -		test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? -		ktime_to_ns(fence->timestamp) : -		ktime_set(0, 0); +		dma_fence_is_signaled(fence) ? +			ktime_to_ns(dma_fence_timestamp(fence)) : +			ktime_set(0, 0);  	return info->status;  } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 292e38eb6218..60794fcde1d5 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -290,7 +290,8 @@ static int  update_connector_routing(struct drm_atomic_state *state,  			 struct drm_connector *connector,  			 struct drm_connector_state *old_connector_state, -			 struct drm_connector_state *new_connector_state) +			 struct drm_connector_state *new_connector_state, +			 bool added_by_user)  {  	const struct drm_connector_helper_funcs *funcs;  	struct drm_encoder *new_encoder; @@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,  	 * there's a chance the connector may have been destroyed during the  	 * process, but it's better to ignore that then cause  	 * drm_atomic_helper_resume() to fail. +	 * +	 * Last, we want to ignore connector registration when the connector +	 * was not pulled in the atomic state by user-space (ie, was pulled +	 * in by the driver, e.g. when updating a DP-MST stream).  	 */  	if (!state->duplicated && drm_connector_is_unregistered(connector) && -	    crtc_state->active) { +	    added_by_user && crtc_state->active) {  		drm_dbg_atomic(connector->dev,  			       "[CONNECTOR:%d:%s] is not registered\n",  			       connector->base.id, connector->name); @@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,  	struct drm_connector *connector;  	struct drm_connector_state *old_connector_state, *new_connector_state;  	int i, ret; -	unsigned int connectors_mask = 0; +	unsigned int connectors_mask = 0, user_connectors_mask = 0; + +	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) +		user_connectors_mask |= BIT(i);  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {  		bool has_connectors = @@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,  		 */  		ret = update_connector_routing(state, connector,  					       old_connector_state, -					       new_connector_state); +					       new_connector_state, +					       BIT(i) & user_connectors_mask);  		if (ret)  			return ret;  		if (old_connector_state->crtc) { diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6129b89bb366..44a948b80ee1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -540,7 +540,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)  	struct page **pages;  	struct folio *folio;  	struct folio_batch fbatch; -	int i, j, npages; +	long i, j, npages;  	if (WARN_ON(!obj->filp))  		return ERR_PTR(-EINVAL); @@ -564,11 +564,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)  	i = 0;  	while (i < npages) { +		long nr;  		folio = shmem_read_folio_gfp(mapping, i,  				mapping_gfp_mask(mapping));  		if (IS_ERR(folio))  			goto fail; -		for (j = 0; j < folio_nr_pages(folio); j++, i++) +		nr = min(npages - i, folio_nr_pages(folio)); +		for (j = 0; j < nr; j++, i++)  			pages[i] = folio_file_page(folio, i);  		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 5ac926281d2c..c9087f474cbc 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1342,9 +1342,7 @@ static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = {  	_INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11),  	_INIT_DCS_CMD(0xCB, 0x86),  	_INIT_DCS_CMD(0xD2, 0x3C, 0xFA), -	_INIT_DCS_CMD(0xE9, 0xC5), -	_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01), -	_INIT_DCS_CMD(0xE9, 0x3F), +	_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),  	_INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40),  	_INIT_DCS_CMD(0xBD, 0x02),  	_INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0), diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 506371c42745..5a3a622fc672 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -929,7 +929,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)  		if (next) {  			next->s_fence->scheduled.timestamp = -				job->s_fence->finished.timestamp; +				dma_fence_timestamp(&job->s_fence->finished);  			/* start TO timer for next job */  			drm_sched_start_timeout(sched);  		} diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index ff86ba1ae1b8..8ea120eb8674 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -745,7 +745,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,  		ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));  		if (ret) { -			drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret); +			drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);  			return ERR_PTR(ret);  		} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index c43853597776..2bfac3aad7b7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -34,6 +34,8 @@  static void vmw_bo_release(struct vmw_bo *vbo)  { +	WARN_ON(vbo->tbo.base.funcs && +		kref_read(&vbo->tbo.base.refcount) != 0);  	vmw_bo_unmap(vbo);  	drm_gem_object_release(&vbo->tbo.base);  } @@ -497,7 +499,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,  		if (!(flags & drm_vmw_synccpu_allow_cs)) {  			atomic_dec(&vmw_bo->cpu_writers);  		} -		vmw_user_bo_unref(vmw_bo); +		vmw_user_bo_unref(&vmw_bo);  	}  	return ret; @@ -539,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,  			return ret;  		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); -		vmw_user_bo_unref(vbo); +		vmw_user_bo_unref(&vbo);  		if (unlikely(ret != 0)) {  			if (ret == -ERESTARTSYS || ret == -EBUSY)  				return -EBUSY; @@ -612,7 +614,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,  	}  	*out = to_vmw_bo(gobj); -	ttm_bo_get(&(*out)->tbo);  	return 0;  } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index 1d433fceed3d..0d496dc9c6af 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -195,12 +195,19 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)  	return buf;  } -static inline void vmw_user_bo_unref(struct vmw_bo *vbo) +static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)  { -	if (vbo) { -		ttm_bo_put(&vbo->tbo); -		drm_gem_object_put(&vbo->tbo.base); -	} +	drm_gem_object_get(&vbo->tbo.base); +	return vbo; +} + +static inline void vmw_user_bo_unref(struct vmw_bo **buf) +{ +	struct vmw_bo *tmp_buf = *buf; + +	*buf = NULL; +	if (tmp_buf) +		drm_gem_object_put(&tmp_buf->tbo.base);  }  static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index c0b24d1cacbf..a7c07692262b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)  	 * for the new COTable. Initially pin the buffer object to make sure  	 * we can use tryreserve without failure.  	 */ -	ret = vmw_bo_create(dev_priv, &bo_params, &buf); +	ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);  	if (ret) {  		DRM_ERROR("Failed initializing new cotable MOB.\n");  		goto out_done; @@ -502,7 +502,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)  	vmw_resource_mob_attach(res);  	/* Let go of the old mob. */ -	vmw_bo_unreference(&old_buf); +	vmw_user_bo_unref(&old_buf);  	res->id = vcotbl->type;  	ret = dma_resv_reserve_fences(bo->base.resv, 1); @@ -521,7 +521,7 @@ out_map_new:  out_wait:  	ttm_bo_unpin(bo);  	ttm_bo_unreserve(bo); -	vmw_bo_unreference(&buf); +	vmw_user_bo_unref(&buf);  out_done:  	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 58bfdf203eca..3cd5090dedfc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -853,6 +853,10 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)  /**   * GEM related functionality - vmwgfx_gem.c   */ +struct vmw_bo_params; +int vmw_gem_object_create(struct vmw_private *vmw, +			  struct vmw_bo_params *params, +			  struct vmw_bo **p_vbo);  extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,  					     struct drm_file *filp,  					     uint32_t size, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 98e0723ca6f5..36987ef3fc30 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1151,7 +1151,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,  				 SVGAMobId *id,  				 struct vmw_bo **vmw_bo_p)  { -	struct vmw_bo *vmw_bo; +	struct vmw_bo *vmw_bo, *tmp_bo;  	uint32_t handle = *id;  	struct vmw_relocation *reloc;  	int ret; @@ -1164,7 +1164,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,  	}  	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);  	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); -	vmw_user_bo_unref(vmw_bo); +	tmp_bo = vmw_bo; +	vmw_user_bo_unref(&tmp_bo);  	if (unlikely(ret != 0))  		return ret; @@ -1206,7 +1207,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,  				   SVGAGuestPtr *ptr,  				   struct vmw_bo **vmw_bo_p)  { -	struct vmw_bo *vmw_bo; +	struct vmw_bo *vmw_bo, *tmp_bo;  	uint32_t handle = ptr->gmrId;  	struct vmw_relocation *reloc;  	int ret; @@ -1220,7 +1221,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,  	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,  			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);  	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); -	vmw_user_bo_unref(vmw_bo); +	tmp_bo = vmw_bo; +	vmw_user_bo_unref(&tmp_bo);  	if (unlikely(ret != 0))  		return ret; @@ -1619,7 +1621,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,  {  	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);  	SVGA3dTextureState *last_state = (SVGA3dTextureState *) -	  ((unsigned long) header + header->size + sizeof(header)); +	  ((unsigned long) header + header->size + sizeof(*header));  	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)  		((unsigned long) header + sizeof(*cmd));  	struct vmw_resource *ctx; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index c0da89e16e6f..8b1eb0061610 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -111,6 +111,20 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {  	.vm_ops = &vmw_vm_ops,  }; +int vmw_gem_object_create(struct vmw_private *vmw, +			  struct vmw_bo_params *params, +			  struct vmw_bo **p_vbo) +{ +	int ret = vmw_bo_create(vmw, params, p_vbo); + +	if (ret != 0) +		goto out_no_bo; + +	(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; +out_no_bo: +	return ret; +} +  int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,  				      struct drm_file *filp,  				      uint32_t size, @@ -126,12 +140,10 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,  		.pin = false  	}; -	ret = vmw_bo_create(dev_priv, ¶ms, p_vbo); +	ret = vmw_gem_object_create(dev_priv, ¶ms, p_vbo);  	if (ret != 0)  		goto out_no_bo; -	(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; -  	ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);  out_no_bo:  	return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 1489ad73c103..818b7f109f53 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1471,8 +1471,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,  	/* Reserve and switch the backing mob. */  	mutex_lock(&res->dev_priv->cmdbuf_mutex);  	(void) vmw_resource_reserve(res, false, true); -	vmw_bo_unreference(&res->guest_memory_bo); -	res->guest_memory_bo = vmw_bo_reference(bo_mob); +	vmw_user_bo_unref(&res->guest_memory_bo); +	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);  	res->guest_memory_offset = 0;  	vmw_resource_unreserve(res, false, false, false, NULL, 0);  	mutex_unlock(&res->dev_priv->cmdbuf_mutex); @@ -1666,7 +1666,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,  err_out:  	/* vmw_user_lookup_handle takes one ref so does new_fb */  	if (bo) -		vmw_user_bo_unref(bo); +		vmw_user_bo_unref(&bo);  	if (surface)  		vmw_surface_unreference(&surface); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index fb85f244c3d0..c45b4724e414 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,  	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); -	vmw_user_bo_unref(buf); +	vmw_user_bo_unref(&buf);  out_unlock:  	mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 71eeabf001c8..ca300c7427d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -141,7 +141,7 @@ static void vmw_resource_release(struct kref *kref)  		if (res->coherent)  			vmw_bo_dirty_release(res->guest_memory_bo);  		ttm_bo_unreserve(bo); -		vmw_bo_unreference(&res->guest_memory_bo); +		vmw_user_bo_unref(&res->guest_memory_bo);  	}  	if (likely(res->hw_destroy != NULL)) { @@ -338,7 +338,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,  		return 0;  	} -	ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo); +	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);  	if (unlikely(ret != 0))  		goto out_no_bo; @@ -457,11 +457,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,  			vmw_resource_mob_detach(res);  			if (res->coherent)  				vmw_bo_dirty_release(res->guest_memory_bo); -			vmw_bo_unreference(&res->guest_memory_bo); +			vmw_user_bo_unref(&res->guest_memory_bo);  		}  		if (new_guest_memory_bo) { -			res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo); +			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);  			/*  			 * The validation code should already have added a @@ -551,7 +551,7 @@ out_no_reserve:  	ttm_bo_put(val_buf->bo);  	val_buf->bo = NULL;  	if (guest_memory_dirty) -		vmw_bo_unreference(&res->guest_memory_bo); +		vmw_user_bo_unref(&res->guest_memory_bo);  	return ret;  } @@ -727,7 +727,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,  		goto out_no_validate;  	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {  		WARN_ON_ONCE(vmw_resource_mob_attached(res)); -		vmw_bo_unreference(&res->guest_memory_bo); +		vmw_user_bo_unref(&res->guest_memory_bo);  	}  	return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 1e81ff2422cf..a01ca3226d0a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -180,7 +180,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,  	res->guest_memory_size = size;  	if (byte_code) { -		res->guest_memory_bo = vmw_bo_reference(byte_code); +		res->guest_memory_bo = vmw_user_bo_ref(byte_code);  		res->guest_memory_offset = offset;  	}  	shader->size = size; @@ -809,7 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,  				    shader_type, num_input_sig,  				    num_output_sig, tfile, shader_handle);  out_bad_arg: -	vmw_user_bo_unref(buffer); +	vmw_user_bo_unref(&buffer);  	return ret;  } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5db403ee8261..3829be282ff0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -686,9 +686,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)  	    container_of(base, struct vmw_user_surface, prime.base);  	struct vmw_resource *res = &user_srf->srf.res; -	if (res->guest_memory_bo) -		drm_gem_object_put(&res->guest_memory_bo->tbo.base); -  	*p_base = NULL;  	vmw_resource_unreference(&res);  } @@ -855,23 +852,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,  	 * expect a backup buffer to be present.  	 */  	if (dev_priv->has_mob && req->shareable) { -		uint32_t backup_handle; - -		ret = vmw_gem_object_create_with_handle(dev_priv, -							file_priv, -							res->guest_memory_size, -							&backup_handle, -							&res->guest_memory_bo); +		struct vmw_bo_params params = { +			.domain = VMW_BO_DOMAIN_SYS, +			.busy_domain = VMW_BO_DOMAIN_SYS, +			.bo_type = ttm_bo_type_device, +			.size = res->guest_memory_size, +			.pin = false +		}; + +		ret = vmw_gem_object_create(dev_priv, +					    ¶ms, +					    &res->guest_memory_bo);  		if (unlikely(ret != 0)) {  			vmw_resource_unreference(&res);  			goto out_unlock;  		} -		vmw_bo_reference(res->guest_memory_bo); -		/* -		 * We don't expose the handle to the userspace and surface -		 * already holds a gem reference -		 */ -		drm_gem_handle_delete(file_priv, backup_handle);  	}  	tmp = vmw_resource_reference(&srf->res); @@ -1512,7 +1507,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,  		if (ret == 0) {  			if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {  				VMW_DEBUG_USER("Surface backup buffer too small.\n"); -				vmw_bo_unreference(&res->guest_memory_bo); +				vmw_user_bo_unref(&res->guest_memory_bo);  				ret = -EINVAL;  				goto out_unlock;  			} else { @@ -1526,8 +1521,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,  							res->guest_memory_size,  							&backup_handle,  							&res->guest_memory_bo); -		if (ret == 0) -			vmw_bo_reference(res->guest_memory_bo);  	}  	if (unlikely(ret != 0)) { | 
