diff options
Diffstat (limited to 'drivers/gpu')
45 files changed, 344 insertions, 198 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1cf78f4dd339..1e8e1123ddf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)  			DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",  				 adev->clock.default_dispclk / 100);  			adev->clock.default_dispclk = 60000; +		} else if (adev->clock.default_dispclk <= 60000) { +			DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", +				 adev->clock.default_dispclk / 100); +			adev->clock.default_dispclk = 62500;  		}  		adev->clock.dp_extclk =  			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2d705e6a75a..ab6b0d0febab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {  	{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},  	{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},  	{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, +	{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},  	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},  	/* Vega 10 */  	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 8c9bc75a9c2d..8a0818b23ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)  	struct drm_device *dev = crtc->dev;  	struct amdgpu_device *adev = dev->dev_private;  	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); -	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; +	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;  	memset(&args, 0, sizeof(args)); @@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)  void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)  {  	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); -	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; +	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;  	memset(&args, 0, sizeof(args)); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 0cdeb6a2e4a0..5dffa27afa45 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,  	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;  	if (amdgpu_crtc->base.enabled && num_heads && mode) { -		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; -		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); +		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, +					    (u32)mode->clock); +		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, +					  (u32)mode->clock); +		line_time = min(line_time, (u32)65535);  		/* watermark for high clocks */  		if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 773654a19749..47bbc87f96d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,  	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;  	if (amdgpu_crtc->base.enabled && num_heads && mode) { -		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; -		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); +		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, +					    (u32)mode->clock); +		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, +					  (u32)mode->clock); +		line_time = min(line_time, (u32)65535);  		/* watermark for high clocks */  		if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 1f3552967ba3..d8c9a959493e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,  	fixed20_12 a, b, c;  	if (amdgpu_crtc->base.enabled && num_heads && mode) { -		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; -		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); +		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, +					    (u32)mode->clock); +		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, +					  (u32)mode->clock); +		line_time = min(line_time, (u32)65535);  		priority_a_cnt = 0;  		priority_b_cnt = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3c558c170e5e..db30c6ba563a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,  	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;  	if (amdgpu_crtc->base.enabled && num_heads && mode) { -		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; -		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); +		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, +					    (u32)mode->clock); +		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, +					  (u32)mode->clock); +		line_time = min(line_time, (u32)65535);  		/* watermark for high clocks */  		if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 40d2827a6d19..53e78d092d18 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -1,6 +1,7 @@  config DRM_DW_HDMI  	tristate  	select DRM_KMS_HELPER +	select REGMAP_MMIO  config DRM_DW_HDMI_AHB_AUDIO  	tristate "Synopsys Designware AHB Audio interface" diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 9f847615ac74..48ca2457df8c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,  	if (!connector)  		return -ENOENT; -	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); -	encoder = drm_connector_get_encoder(connector); -	if (encoder) -		out_resp->encoder_id = encoder->base.id; -	else -		out_resp->encoder_id = 0; - -	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, -			(uint32_t __user *)(unsigned long)(out_resp->props_ptr), -			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), -			&out_resp->count_props); -	drm_modeset_unlock(&dev->mode_config.connection_mutex); -	if (ret) -		goto out_unref; -  	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)  		if (connector->encoder_ids[i] != 0)  			encoders_count++; @@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,  				if (put_user(connector->encoder_ids[i],  					     encoder_ptr + copied)) {  					ret = -EFAULT; -					goto out_unref; +					goto out;  				}  				copied++;  			} @@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,  			if (copy_to_user(mode_ptr + copied,  					 &u_mode, sizeof(u_mode))) {  				ret = -EFAULT; +				mutex_unlock(&dev->mode_config.mutex); +  				goto out;  			}  			copied++;  		}  	}  	out_resp->count_modes = mode_count; -out:  	mutex_unlock(&dev->mode_config.mutex); -out_unref: + +	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); +	encoder = drm_connector_get_encoder(connector); +	if (encoder) +		out_resp->encoder_id = encoder->base.id; +	else +		out_resp->encoder_id = 0; + +	/* Only grab properties after probing, to make sure EDID and other +	 * properties reflect the latest status. */ +	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, +			(uint32_t __user *)(unsigned long)(out_resp->props_ptr), +			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), +			&out_resp->count_props); +	drm_modeset_unlock(&dev->mode_config.connection_mutex); + +out:  	drm_connector_put(connector);  	return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index c4a091e87426..e437fba1209d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -106,9 +106,10 @@ struct etnaviv_gem_submit {  	struct etnaviv_gpu *gpu;  	struct ww_acquire_ctx ticket;  	struct dma_fence *fence; +	u32 flags;  	unsigned int nr_bos;  	struct etnaviv_gem_submit_bo bos[0]; -	u32 flags; +	/* No new members here, the previous one is variable-length! */  };  int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index de80ee1b71df..1013765274da 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)  	for (i = 0; i < submit->nr_bos; i++) {  		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;  		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; -		bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); +		bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);  		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,  						 explicit); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d689e511744e..4bd1467c17b1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)  	struct file_stats *stats = data;  	struct i915_vma *vma; +	lockdep_assert_held(&obj->base.dev->struct_mutex); +  	stats->count++;  	stats->total += obj->base.size;  	if (!obj->bind_count) @@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)  		struct drm_i915_gem_request *request;  		struct task_struct *task; +		mutex_lock(&dev->struct_mutex); +  		memset(&stats, 0, sizeof(stats));  		stats.file_priv = file->driver_priv;  		spin_lock(&file->table_lock); @@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)  		 * still alive (e.g. get_pid(current) => fork() => exit()).  		 * Therefore, we need to protect this ->comm access using RCU.  		 */ -		mutex_lock(&dev->struct_mutex);  		request = list_first_entry_or_null(&file_priv->mm.request_list,  						   struct drm_i915_gem_request,  						   client_link); @@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)  				PIDTYPE_PID);  		print_file_stats(m, task ? task->comm : "<unknown>", stats);  		rcu_read_unlock(); +  		mutex_unlock(&dev->struct_mutex);  	}  	mutex_unlock(&dev->filelist_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 462031cbd77f..615f0a855222 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)  	struct page *page;  	unsigned long last_pfn = 0;	/* suppress gcc warning */  	unsigned int max_segment; +	gfp_t noreclaim;  	int ret; -	gfp_t gfp;  	/* Assert that the object is not currently in any GPU domain. As it  	 * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2315,22 +2315,31 @@ rebuild_st:  	 * Fail silently without starting the shrinker  	 */  	mapping = obj->base.filp->f_mapping; -	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); -	gfp |= __GFP_NORETRY | __GFP_NOWARN; +	noreclaim = mapping_gfp_constraint(mapping, +					   ~(__GFP_IO | __GFP_RECLAIM)); +	noreclaim |= __GFP_NORETRY | __GFP_NOWARN; +  	sg = st->sgl;  	st->nents = 0;  	for (i = 0; i < page_count; i++) { -		page = shmem_read_mapping_page_gfp(mapping, i, gfp); -		if (unlikely(IS_ERR(page))) { -			i915_gem_shrink(dev_priv, -					page_count, -					I915_SHRINK_BOUND | -					I915_SHRINK_UNBOUND | -					I915_SHRINK_PURGEABLE); +		const unsigned int shrink[] = { +			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, +			0, +		}, *s = shrink; +		gfp_t gfp = noreclaim; + +		do {  			page = shmem_read_mapping_page_gfp(mapping, i, gfp); -		} -		if (unlikely(IS_ERR(page))) { -			gfp_t reclaim; +			if (likely(!IS_ERR(page))) +				break; + +			if (!*s) { +				ret = PTR_ERR(page); +				goto err_sg; +			} + +			i915_gem_shrink(dev_priv, 2 * page_count, *s++); +			cond_resched();  			/* We've tried hard to allocate the memory by reaping  			 * our own buffer, now let the real VM do its job and @@ -2340,15 +2349,26 @@ rebuild_st:  			 * defer the oom here by reporting the ENOMEM back  			 * to userspace.  			 */ -			reclaim = mapping_gfp_mask(mapping); -			reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ - -			page = shmem_read_mapping_page_gfp(mapping, i, reclaim); -			if (IS_ERR(page)) { -				ret = PTR_ERR(page); -				goto err_sg; +			if (!*s) { +				/* reclaim and warn, but no oom */ +				gfp = mapping_gfp_mask(mapping); + +				/* Our bo are always dirty and so we require +				 * kswapd to reclaim our pages (direct reclaim +				 * does not effectively begin pageout of our +				 * buffers on its own). However, direct reclaim +				 * only waits for kswapd when under allocation +				 * congestion. So as a result __GFP_RECLAIM is +				 * unreliable and fails to actually reclaim our +				 * dirty pages -- unless you try over and over +				 * again with !__GFP_NORETRY. However, we still +				 * want to fail this allocation rather than +				 * trigger the out-of-memory killer and for +				 * this we want the future __GFP_MAYFAIL. +				 */  			} -		} +		} while (1); +  		if (!i ||  		    sg->length >= max_segment ||  		    page_to_pfn(page) != last_pfn + 1) { @@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)  	mapping = obj->base.filp->f_mapping;  	mapping_set_gfp_mask(mapping, mask); +	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));  	i915_gem_object_init(obj, &i915_gem_object_ops); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3e59c8ef27b..9ad13eeed904 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -546,11 +546,12 @@ repeat:  }  static int -i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, +i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,  				   struct eb_vmas *eb,  				   struct drm_i915_gem_relocation_entry *reloc,  				   struct reloc_cache *cache)  { +	struct drm_i915_gem_object *obj = vma->obj;  	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);  	struct drm_gem_object *target_obj;  	struct drm_i915_gem_object *target_i915_obj; @@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,  		return -EINVAL;  	} +	/* +	 * If we write into the object, we need to force the synchronisation +	 * barrier, either with an asynchronous clflush or if we executed the +	 * patching using the GPU (though that should be serialised by the +	 * timeline). To be completely sure, and since we are required to +	 * do relocations we are already stalling, disable the user's opt +	 * of our synchronisation. +	 */ +	vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC; +  	ret = relocate_entry(obj, reloc, cache, target_offset);  	if (ret)  		return ret; @@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,  		do {  			u64 offset = r->presumed_offset; -			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); +			ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);  			if (ret)  				goto out; @@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,  	reloc_cache_init(&cache, eb->i915);  	for (i = 0; i < entry->relocation_count; i++) { -		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); +		ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);  		if (ret)  			break;  	} diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 5ddbc9499775..a74d0ac737cb 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,  	 * GPU processing the request, we never over-estimate the  	 * position of the head.  	 */ -	req->head = req->ring->tail; +	req->head = req->ring->emit;  	/* Check that we didn't interrupt ourselves with a new request */  	GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 129c58bb4805..a4a920c4c454 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -123,7 +123,7 @@ struct drm_i915_gem_request {  	 * It is used by the driver to then queue the request for execution.  	 */  	struct i915_sw_fence submit; -	wait_queue_t submitq; +	wait_queue_entry_t submitq;  	wait_queue_head_t execute;  	/* A list of everyone we wait upon, and everyone who waits upon us. diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 1642fff9cf13..ab5140ba108d 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,  	GEM_BUG_ON(freespace < wqi_size);  	/* The GuC firmware wants the tail index in QWords, not bytes */ -	tail = rq->tail; -	assert_ring_tail_valid(rq->ring, rq->tail); -	tail >>= 3; +	tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;  	GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);  	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index c0cb2974caac..2cfe96d3e5d1 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -36,10 +36,6 @@  #define VGT_VERSION_MAJOR 1  #define VGT_VERSION_MINOR 0 -#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) -#define INTEL_VGT_IF_VERSION \ -	INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) -  /*   * notifications from guest to vgpu device model   */ @@ -55,8 +51,8 @@ enum vgt_g2v_type {  struct vgt_if {  	u64 magic;		/* VGT_MAGIC */ -	uint16_t version_major; -	uint16_t version_minor; +	u16 version_major; +	u16 version_minor;  	u32 vgt_id;		/* ID of vGT instance */  	u32 rsv1[12];		/* pad to offset 0x40 */  	/* diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index a277f8eb7beb..380de4360b8a 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -152,7 +152,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,  					struct list_head *continuation)  {  	wait_queue_head_t *x = &fence->wait; -	wait_queue_t *pos, *next; +	wait_queue_entry_t *pos, *next;  	unsigned long flags;  	debug_fence_deactivate(fence); @@ -160,31 +160,30 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,  	/*  	 * To prevent unbounded recursion as we traverse the graph of -	 * i915_sw_fences, we move the task_list from this, the next ready -	 * fence, to the tail of the original fence's task_list +	 * i915_sw_fences, we move the entry list from this, the next ready +	 * fence, to the tail of the original fence's entry list  	 * (and so added to the list to be woken).  	 */  	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);  	if (continuation) { -		list_for_each_entry_safe(pos, next, &x->task_list, task_list) { +		list_for_each_entry_safe(pos, next, &x->head, entry) {  			if (pos->func == autoremove_wake_function)  				pos->func(pos, TASK_NORMAL, 0, continuation);  			else -				list_move_tail(&pos->task_list, continuation); +				list_move_tail(&pos->entry, continuation);  		}  	} else {  		LIST_HEAD(extra);  		do { -			list_for_each_entry_safe(pos, next, -						 &x->task_list, task_list) +			list_for_each_entry_safe(pos, next, &x->head, entry)  				pos->func(pos, TASK_NORMAL, 0, &extra);  			if (list_empty(&extra))  				break; -			list_splice_tail_init(&extra, &x->task_list); +			list_splice_tail_init(&extra, &x->head);  		} while (1);  	}  	spin_unlock_irqrestore(&x->lock, flags); @@ -254,9 +253,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)  	__i915_sw_fence_commit(fence);  } -static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) +static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)  { -	list_del(&wq->task_list); +	list_del(&wq->entry);  	__i915_sw_fence_complete(wq->private, key);  	i915_sw_fence_put(wq->private);  	if (wq->flags & I915_SW_FENCE_FLAG_ALLOC) @@ -267,7 +266,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *  static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,  				    const struct i915_sw_fence * const signaler)  { -	wait_queue_t *wq; +	wait_queue_entry_t *wq;  	if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))  		return false; @@ -275,7 +274,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,  	if (fence == signaler)  		return true; -	list_for_each_entry(wq, &fence->wait.task_list, task_list) { +	list_for_each_entry(wq, &fence->wait.head, entry) {  		if (wq->func != i915_sw_fence_wake)  			continue; @@ -288,12 +287,12 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,  static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)  { -	wait_queue_t *wq; +	wait_queue_entry_t *wq;  	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))  		return; -	list_for_each_entry(wq, &fence->wait.task_list, task_list) { +	list_for_each_entry(wq, &fence->wait.head, entry) {  		if (wq->func != i915_sw_fence_wake)  			continue; @@ -320,7 +319,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,  static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,  					  struct i915_sw_fence *signaler, -					  wait_queue_t *wq, gfp_t gfp) +					  wait_queue_entry_t *wq, gfp_t gfp)  {  	unsigned long flags;  	int pending; @@ -350,7 +349,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,  		pending |= I915_SW_FENCE_FLAG_ALLOC;  	} -	INIT_LIST_HEAD(&wq->task_list); +	INIT_LIST_HEAD(&wq->entry);  	wq->flags = pending;  	wq->func = i915_sw_fence_wake;  	wq->private = i915_sw_fence_get(fence); @@ -359,7 +358,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,  	spin_lock_irqsave(&signaler->wait.lock, flags);  	if (likely(!i915_sw_fence_done(signaler))) { -		__add_wait_queue_tail(&signaler->wait, wq); +		__add_wait_queue_entry_tail(&signaler->wait, wq);  		pending = 1;  	} else {  		i915_sw_fence_wake(wq, 0, 0, NULL); @@ -372,7 +371,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,  int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,  				 struct i915_sw_fence *signaler, -				 wait_queue_t *wq) +				 wait_queue_entry_t *wq)  {  	return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);  } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index d31cefbbcc04..fd3c3bf6c8b7 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -66,7 +66,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);  int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,  				 struct i915_sw_fence *after, -				 wait_queue_t *wq); +				 wait_queue_entry_t *wq);  int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,  				     struct i915_sw_fence *after,  				     gfp_t gfp); diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 4ab8a973b61f..2e739018fb4c 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -60,8 +60,8 @@   */  void i915_check_vgpu(struct drm_i915_private *dev_priv)  { -	uint64_t magic; -	uint32_t version; +	u64 magic; +	u16 version_major;  	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); @@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)  	if (magic != VGT_MAGIC)  		return; -	version = INTEL_VGT_IF_VERSION_ENCODE( -		__raw_i915_read16(dev_priv, vgtif_reg(version_major)), -		__raw_i915_read16(dev_priv, vgtif_reg(version_minor))); -	if (version != INTEL_VGT_IF_VERSION) { +	version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); +	if (version_major < VGT_VERSION_MAJOR) {  		DRM_INFO("VGT interface version mismatch!\n");  		return;  	} diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 1aba47024656..f066e2d785f5 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)  				break;  		} +		if (!ret) { +			ret = i915_gem_active_retire(&vma->last_fence, +						     &vma->vm->i915->drm.struct_mutex); +		} +  		__i915_vma_unpin(vma);  		if (ret)  			return ret; diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index eb638a1e69d2..42fb436f6cdc 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -15,13 +15,9 @@ static struct intel_dsm_priv {  	acpi_handle dhandle;  } intel_dsm_priv; -static const u8 intel_dsm_guid[] = { -	0xd3, 0x73, 0xd8, 0x7e, -	0xd0, 0xc2, -	0x4f, 0x4e, -	0xa8, 0x54, -	0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c -}; +static const guid_t intel_dsm_guid = +	GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f, +		  0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);  static char *intel_dsm_port_name(u8 id)  { @@ -80,7 +76,7 @@ static void intel_dsm_platform_mux_info(void)  	int i;  	union acpi_object *pkg, *connector_count; -	pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid, +	pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,  			INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,  			NULL, ACPI_TYPE_PACKAGE);  	if (!pkg) { @@ -118,7 +114,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)  	if (!dhandle)  		return false; -	if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID, +	if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,  			    1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {  		DRM_DEBUG_KMS("no _DSM method for intel device\n");  		return false; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 569717a12723..9106ea32b048 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,  static void skylake_pfit_enable(struct intel_crtc *crtc);  static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);  static void ironlake_pfit_enable(struct intel_crtc *crtc); -static void intel_modeset_setup_hw_state(struct drm_device *dev); +static void intel_modeset_setup_hw_state(struct drm_device *dev, +					 struct drm_modeset_acquire_ctx *ctx);  static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);  struct intel_limit { @@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,  	struct drm_crtc *crtc;  	int i, ret; -	intel_modeset_setup_hw_state(dev); +	intel_modeset_setup_hw_state(dev, ctx);  	i915_redisable_vga(to_i915(dev));  	if (!state) @@ -4598,7 +4599,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)  static int  skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, -		  unsigned scaler_user, int *scaler_id, unsigned int rotation, +		  unsigned int scaler_user, int *scaler_id,  		  int src_w, int src_h, int dst_w, int dst_h)  {  	struct intel_crtc_scaler_state *scaler_state = @@ -4607,9 +4608,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,  		to_intel_crtc(crtc_state->base.crtc);  	int need_scaling; -	need_scaling = drm_rotation_90_or_270(rotation) ? -		(src_h != dst_w || src_w != dst_h): -		(src_w != dst_w || src_h != dst_h); +	/* +	 * Src coordinates are already rotated by 270 degrees for +	 * the 90/270 degree plane rotation cases (to match the +	 * GTT mapping), hence no need to account for rotation here. +	 */ +	need_scaling = src_w != dst_w || src_h != dst_h;  	/*  	 * if plane is being disabled or scaler is no more required or force detach @@ -4671,7 +4675,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)  	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;  	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, -		&state->scaler_state.scaler_id, DRM_ROTATE_0, +		&state->scaler_state.scaler_id,  		state->pipe_src_w, state->pipe_src_h,  		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);  } @@ -4700,7 +4704,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,  	ret = skl_update_scaler(crtc_state, force_detach,  				drm_plane_index(&intel_plane->base),  				&plane_state->scaler_id, -				plane_state->base.rotation,  				drm_rect_width(&plane_state->base.src) >> 16,  				drm_rect_height(&plane_state->base.src) >> 16,  				drm_rect_width(&plane_state->base.dst), @@ -5823,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,  		intel_update_watermarks(intel_crtc);  } -static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) +static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, +					struct drm_modeset_acquire_ctx *ctx)  {  	struct intel_encoder *encoder;  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5853,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)  		return;  	} -	state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; +	state->acquire_ctx = ctx;  	/* Everything's already locked, -EDEADLK can't happen. */  	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); @@ -15028,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)  	intel_setup_outputs(dev_priv);  	drm_modeset_lock_all(dev); -	intel_modeset_setup_hw_state(dev); +	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);  	drm_modeset_unlock_all(dev);  	for_each_intel_crtc(dev, crtc) { @@ -15065,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)  	return 0;  } -static void intel_enable_pipe_a(struct drm_device *dev) +static void intel_enable_pipe_a(struct drm_device *dev, +				struct drm_modeset_acquire_ctx *ctx)  {  	struct intel_connector *connector;  	struct drm_connector_list_iter conn_iter;  	struct drm_connector *crt = NULL;  	struct intel_load_detect_pipe load_detect_temp; -	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;  	int ret;  	/* We can't just switch on the pipe A, we need to set things up with a @@ -15143,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,  		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);  } -static void intel_sanitize_crtc(struct intel_crtc *crtc) +static void intel_sanitize_crtc(struct intel_crtc *crtc, +				struct drm_modeset_acquire_ctx *ctx)  {  	struct drm_device *dev = crtc->base.dev;  	struct drm_i915_private *dev_priv = to_i915(dev); @@ -15189,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)  		plane = crtc->plane;  		crtc->base.primary->state->visible = true;  		crtc->plane = !plane; -		intel_crtc_disable_noatomic(&crtc->base); +		intel_crtc_disable_noatomic(&crtc->base, ctx);  		crtc->plane = plane;  	} @@ -15199,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)  		 * resume. Force-enable the pipe to fix this, the update_dpms  		 * call below we restore the pipe to the right state, but leave  		 * the required bits on. */ -		intel_enable_pipe_a(dev); +		intel_enable_pipe_a(dev, ctx);  	}  	/* Adjust the state of the output pipe according to whether we  	 * have active connectors/encoders. */  	if (crtc->active && !intel_crtc_has_encoders(crtc)) -		intel_crtc_disable_noatomic(&crtc->base); +		intel_crtc_disable_noatomic(&crtc->base, ctx);  	if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {  		/* @@ -15503,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)   * and sanitizes it to the current state   */  static void -intel_modeset_setup_hw_state(struct drm_device *dev) +intel_modeset_setup_hw_state(struct drm_device *dev, +			     struct drm_modeset_acquire_ctx *ctx)  {  	struct drm_i915_private *dev_priv = to_i915(dev);  	enum pipe pipe; @@ -15523,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)  	for_each_pipe(dev_priv, pipe) {  		crtc = intel_get_crtc_for_pipe(dev_priv, pipe); -		intel_sanitize_crtc(crtc); +		intel_sanitize_crtc(crtc, ctx);  		intel_dump_pipe_config(crtc, crtc->config,  				       "[setup_hw_state]");  	} diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index 6532e226db29..40ba3134545e 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,  	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);  	struct intel_panel *panel = &connector->panel; -	intel_dp_aux_enable_backlight(connector); -  	if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)  		panel->backlight.max = 0xFFFF;  	else diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index dac4e003c1f3..62f44d3e7c43 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)  		rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;  	u32 *reg_state = ce->lrc_reg_state; -	assert_ring_tail_valid(rq->ring, rq->tail); -	reg_state[CTX_RING_TAIL+1] = rq->tail; +	reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);  	/* True 32b PPGTT with dynamic page allocation: update PDP  	 * registers and point the unallocated PDPs to scratch page. @@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)  			ce->state->obj->mm.dirty = true;  			i915_gem_object_unpin_map(ce->state->obj); -			ce->ring->head = ce->ring->tail = 0; -			intel_ring_update_space(ce->ring); +			intel_ring_reset(ce->ring, 0);  		}  	}  } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2ca481b5aa69..078fd1bfa5ea 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,  	/* n.b., src is 16.16 fixed point, dst is whole integer */  	if (plane->id == PLANE_CURSOR) { +		/* +		 * Cursors only support 0/180 degree rotation, +		 * hence no need to account for rotation here. +		 */  		src_w = pstate->base.src_w;  		src_h = pstate->base.src_h;  		dst_w = pstate->base.crtc_w;  		dst_h = pstate->base.crtc_h;  	} else { +		/* +		 * Src coordinates are already rotated by 270 degrees for +		 * the 90/270 degree plane rotation cases (to match the +		 * GTT mapping), hence no need to account for rotation here. +		 */  		src_w = drm_rect_width(&pstate->base.src);  		src_h = drm_rect_height(&pstate->base.src);  		dst_w = drm_rect_width(&pstate->base.dst);  		dst_h = drm_rect_height(&pstate->base.dst);  	} -	if (drm_rotation_90_or_270(pstate->base.rotation)) -		swap(dst_w, dst_h); -  	downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);  	downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); @@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,  	if (y && format != DRM_FORMAT_NV12)  		return 0; +	/* +	 * Src coordinates are already rotated by 270 degrees for +	 * the 90/270 degree plane rotation cases (to match the +	 * GTT mapping), hence no need to account for rotation here. +	 */  	width = drm_rect_width(&intel_pstate->base.src) >> 16;  	height = drm_rect_height(&intel_pstate->base.src) >> 16; -	if (drm_rotation_90_or_270(pstate->rotation)) -		swap(width, height); -  	/* for planar format */  	if (format == DRM_FORMAT_NV12) {  		if (y)  /* y-plane data rate */ @@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,  	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)  		return 8; +	/* +	 * Src coordinates are already rotated by 270 degrees for +	 * the 90/270 degree plane rotation cases (to match the +	 * GTT mapping), hence no need to account for rotation here. +	 */  	src_w = drm_rect_width(&intel_pstate->base.src) >> 16;  	src_h = drm_rect_height(&intel_pstate->base.src) >> 16; -	if (drm_rotation_90_or_270(pstate->rotation)) -		swap(src_w, src_h); -  	/* Halve UV plane width and height for NV12 */  	if (fb->format->format == DRM_FORMAT_NV12 && !y) {  		src_w /= 2; @@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,  		width = intel_pstate->base.crtc_w;  		height = intel_pstate->base.crtc_h;  	} else { +		/* +		 * Src coordinates are already rotated by 270 degrees for +		 * the 90/270 degree plane rotation cases (to match the +		 * GTT mapping), hence no need to account for rotation here. +		 */  		width = drm_rect_width(&intel_pstate->base.src) >> 16;  		height = drm_rect_height(&intel_pstate->base.src) >> 16;  	} -	if (drm_rotation_90_or_270(pstate->rotation)) -		swap(width, height); -  	cpp = fb->format->cpp[0];  	plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 66a2b8b83972..513a0f4b469b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)  void intel_ring_update_space(struct intel_ring *ring)  { -	ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); +	ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);  }  static int @@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)  	i915_gem_request_submit(request); -	assert_ring_tail_valid(request->ring, request->tail); -	I915_WRITE_TAIL(request->engine, request->tail); +	I915_WRITE_TAIL(request->engine, +			intel_ring_set_tail(request->ring, request->tail));  }  static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) @@ -1316,11 +1316,23 @@ err:  	return PTR_ERR(addr);  } +void intel_ring_reset(struct intel_ring *ring, u32 tail) +{ +	GEM_BUG_ON(!list_empty(&ring->request_list)); +	ring->tail = tail; +	ring->head = tail; +	ring->emit = tail; +	intel_ring_update_space(ring); +} +  void intel_ring_unpin(struct intel_ring *ring)  {  	GEM_BUG_ON(!ring->vma);  	GEM_BUG_ON(!ring->vaddr); +	/* Discard any unused bytes beyond that submitted to hw. */ +	intel_ring_reset(ring, ring->tail); +  	if (i915_vma_is_map_and_fenceable(ring->vma))  		i915_vma_unpin_iomap(ring->vma);  	else @@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)  	struct intel_engine_cs *engine;  	enum intel_engine_id id; +	/* Restart from the beginning of the rings for convenience */  	for_each_engine(engine, dev_priv, id) -		engine->buffer->head = engine->buffer->tail; +		intel_ring_reset(engine->buffer, 0);  }  static int ring_request_alloc(struct drm_i915_gem_request *request) @@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)  		unsigned space;  		/* Would completion of this request free enough space? */ -		space = __intel_ring_space(target->postfix, ring->tail, +		space = __intel_ring_space(target->postfix, ring->emit,  					   ring->size);  		if (space >= bytes)  			break; @@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)  u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)  {  	struct intel_ring *ring = req->ring; -	int remain_actual = ring->size - ring->tail; -	int remain_usable = ring->effective_size - ring->tail; +	int remain_actual = ring->size - ring->emit; +	int remain_usable = ring->effective_size - ring->emit;  	int bytes = num_dwords * sizeof(u32);  	int total_bytes, wait_bytes;  	bool need_wrap = false; @@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)  	if (unlikely(need_wrap)) {  		GEM_BUG_ON(remain_actual > ring->space); -		GEM_BUG_ON(ring->tail + remain_actual > ring->size); +		GEM_BUG_ON(ring->emit + remain_actual > ring->size);  		/* Fill the tail with MI_NOOP */ -		memset(ring->vaddr + ring->tail, 0, remain_actual); -		ring->tail = 0; +		memset(ring->vaddr + ring->emit, 0, remain_actual); +		ring->emit = 0;  		ring->space -= remain_actual;  	} -	GEM_BUG_ON(ring->tail > ring->size - bytes); -	cs = ring->vaddr + ring->tail; -	ring->tail += bytes; +	GEM_BUG_ON(ring->emit > ring->size - bytes); +	cs = ring->vaddr + ring->emit; +	ring->emit += bytes;  	ring->space -= bytes;  	GEM_BUG_ON(ring->space < 0); @@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)  int intel_ring_cacheline_align(struct drm_i915_gem_request *req)  {  	int num_dwords = -		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); +		(req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);  	u32 *cs;  	if (num_dwords == 0) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a82a0807f64d..f7144fe09613 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -145,6 +145,7 @@ struct intel_ring {  	u32 head;  	u32 tail; +	u32 emit;  	int space;  	int size; @@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)  struct intel_ring *  intel_engine_create_ring(struct intel_engine_cs *engine, int size);  int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); +void intel_ring_reset(struct intel_ring *ring, u32 tail); +void intel_ring_update_space(struct intel_ring *ring);  void intel_ring_unpin(struct intel_ring *ring);  void intel_ring_free(struct intel_ring *ring); @@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)  	 * reserved for the command packet (i.e. the value passed to  	 * intel_ring_begin()).  	 */ -	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); +	GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);  }  static inline u32 @@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)  	GEM_BUG_ON(tail >= ring->size);  } -void intel_ring_update_space(struct intel_ring *ring); +static inline unsigned int +intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) +{ +	/* Whilst writes to the tail are strictly order, there is no +	 * serialisation between readers and the writers. The tail may be +	 * read by i915_gem_request_retire() just as it is being updated +	 * by execlists, as although the breadcrumb is complete, the context +	 * switch hasn't been seen. +	 */ +	assert_ring_tail_valid(ring, tail); +	ring->tail = tail; +	return tail; +}  void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index adb411a078e8..f4b53588e071 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,  	if (IS_G200_SE(mdev)) { -		if (mdev->unique_rev_id >= 0x02) { +		if  (mdev->unique_rev_id >= 0x04) { +			WREG8(MGAREG_CRTCEXT_INDEX, 0x06); +			WREG8(MGAREG_CRTCEXT_DATA, 0); +		} else if (mdev->unique_rev_id >= 0x02) {  			u8 hi_pri_lvl;  			u32 bpp;  			u32 mb; @@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,  			if (mga_vga_calculate_mode_bandwidth(mode, bpp)  				> (30100 * 1024))  				return MODE_BANDWIDTH; +		} else { +			if (mga_vga_calculate_mode_bandwidth(mode, bpp) +				> (55000 * 1024)) +				return MODE_BANDWIDTH;  		}  	} else if (mdev->type == G200_WB) {  		if (mode->hdisplay > 1280) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 1144e0c9e894..0abe77675b76 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -35,6 +35,13 @@  #include "mxsfb_drv.h"  #include "mxsfb_regs.h" +#define MXS_SET_ADDR		0x4 +#define MXS_CLR_ADDR		0x8 +#define MODULE_CLKGATE		BIT(30) +#define MODULE_SFTRST		BIT(31) +/* 1 second delay should be plenty of time for block reset */ +#define RESET_TIMEOUT		1000000 +  static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)  {  	return (val & mxsfb->devdata->hs_wdth_mask) << @@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)  		clk_disable_unprepare(mxsfb->clk_disp_axi);  } +/* + * Clear the bit and poll it cleared.  This is usually called with + * a reset address and mask being either SFTRST(bit 31) or CLKGATE + * (bit 30). + */ +static int clear_poll_bit(void __iomem *addr, u32 mask) +{ +	u32 reg; + +	writel(mask, addr + MXS_CLR_ADDR); +	return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT); +} + +static int mxsfb_reset_block(void __iomem *reset_addr) +{ +	int ret; + +	ret = clear_poll_bit(reset_addr, MODULE_SFTRST); +	if (ret) +		return ret; + +	writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); + +	ret = clear_poll_bit(reset_addr, MODULE_SFTRST); +	if (ret) +		return ret; + +	return clear_poll_bit(reset_addr, MODULE_CLKGATE); +} +  static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)  {  	struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; @@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)  	 */  	mxsfb_enable_axi_clk(mxsfb); +	/* Mandatory eLCDIF reset as per the Reference Manual */ +	err = mxsfb_reset_block(mxsfb->base); +	if (err) +		return; +  	/* Clear the FIFOs */  	writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 39468c218027..7459ef9943ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -60,15 +60,13 @@ bool nouveau_is_v1_dsm(void) {  }  #ifdef CONFIG_VGA_SWITCHEROO -static const char nouveau_dsm_muid[] = { -	0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, -	0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, -}; +static const guid_t nouveau_dsm_muid = +	GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48, +		  0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4); -static const char nouveau_op_dsm_muid[] = { -	0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, -	0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, -}; +static const guid_t nouveau_op_dsm_muid = +	GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, +		  0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);  static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)  { @@ -86,7 +84,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *  		args_buff[i] = (arg >> i * 8) & 0xFF;  	*result = 0; -	obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100, +	obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100,  				      func, &argv4, ACPI_TYPE_BUFFER);  	if (!obj) {  		acpi_handle_info(handle, "failed to evaluate _DSM\n"); @@ -138,7 +136,7 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg)  		.integer.value = arg,  	}; -	obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102, +	obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102,  				      func, &argv4, ACPI_TYPE_INTEGER);  	if (!obj) {  		acpi_handle_info(handle, "failed to evaluate _DSM\n"); @@ -259,7 +257,7 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out  	if (!acpi_has_method(dhandle, "_DSM"))  		return; -	supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, +	supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102,  				      1 << NOUVEAU_DSM_POWER);  	optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c index e3e2f5e83815..f44682d62f75 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c @@ -81,10 +81,9 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)  {  	struct nvkm_subdev *subdev = &mxm->subdev;  	struct nvkm_device *device = subdev->device; -	static char muid[] = { -		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C, -		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65 -	}; +	static guid_t muid = +		GUID_INIT(0x4004A400, 0x917D, 0x4CF2, +			  0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65);  	u32 mxms_args[] = { 0x00000000 };  	union acpi_object argv4 = {  		.buffer.type = ACPI_TYPE_BUFFER, @@ -105,7 +104,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)  	 * unless you pass in exactly the version it supports..  	 */  	rev = (version & 0xf0) << 4 | (version & 0x0f); -	obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4); +	obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4);  	if (!obj) {  		nvkm_debug(subdev, "DSM MXMS failed\n");  		return false; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 008c145b7f29..ca44233ceacc 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,  	u32 tmp, wm_mask;  	if (radeon_crtc->base.enabled && num_heads && mode) { -		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; -		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); +		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, +					    (u32)mode->clock); +		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, +					  (u32)mode->clock); +		line_time = min(line_time, (u32)65535);  		/* watermark for high clocks */  		if ((rdev->pm.pm_method == PM_METHOD_DPM) && diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0bf103536404..534637203e70 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,  	fixed20_12 a, b, c;  	if (radeon_crtc->base.enabled && num_heads && mode) { -		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; -		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); +		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, +					    (u32)mode->clock); +		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, +					  (u32)mode->clock); +		line_time = min(line_time, (u32)65535);  		priority_a_cnt = 0;  		priority_b_cnt = 0;  		dram_channels = evergreen_get_number_of_dram_channels(rdev); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index c1c8e2208a21..e562a78510ff 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -375,7 +375,7 @@ struct radeon_fence {  	unsigned		ring;  	bool			is_vm_update; -	wait_queue_t		fence_wake; +	wait_queue_entry_t		fence_wake;  };  int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 432480ff9d22..3178ba0c537c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)  	    rdev->pdev->subsystem_vendor == 0x103c &&  	    rdev->pdev->subsystem_device == 0x280a)  		return; +	/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume +	 * - it hangs on resume inside the dynclk 1 table. +	 */ +	if (rdev->family == CHIP_RS400 && +	    rdev->pdev->subsystem_vendor == 0x1179 && +	    rdev->pdev->subsystem_device == 0xff31) +	        return;  	/* DYN CLK 1 */  	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6ecf42783d4b..0a6444d72000 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {  	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381  	 */  	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, +	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU +	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491 +	 */ +	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },  	/* macbook pro 8.2 */  	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },  	{ 0, 0, 0, 0, 0 }, diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index ef09f0a63754..e86f2bd38410 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev,   * for the fence locking itself, so unlocked variants are used for   * fence_signal, and remove_wait_queue.   */ -static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) +static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)  {  	struct radeon_fence *fence;  	u64 seq; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 7431eb4a11b7..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,  	}  	/* TODO: is this still necessary on NI+ ? */ -	if ((cmd == 0 || cmd == 1 || cmd == 0x3) && +	if ((cmd == 0 || cmd == 0x3) &&  	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {  		DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",  			  start, end); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 76d1888528e6..5303f25d5280 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,  	fixed20_12 a, b, c;  	if (radeon_crtc->base.enabled && num_heads && mode) { -		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; -		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); +		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, +					    (u32)mode->clock); +		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, +					  (u32)mode->clock); +		line_time = min(line_time, (u32)65535);  		priority_a_cnt = 0;  		priority_b_cnt = 0; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 9a1e34e48f64..81f86a67c10d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -451,18 +451,6 @@ fail:  #ifdef CONFIG_DRM_TEGRA_STAGING -static struct tegra_drm_context * -tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) -{ -	struct tegra_drm_context *context; - -	mutex_lock(&file->lock); -	context = idr_find(&file->contexts, id); -	mutex_unlock(&file->lock); - -	return context; -} -  static int tegra_gem_create(struct drm_device *drm, void *data,  			    struct drm_file *file)  { @@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,  	if (err < 0)  		return err; -	err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); +	err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);  	if (err < 0) {  		client->ops->close_channel(context);  		return err; @@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,  	mutex_lock(&fpriv->lock); -	context = tegra_drm_file_get_context(fpriv, args->context); +	context = idr_find(&fpriv->contexts, args->context);  	if (!context) {  		err = -EINVAL;  		goto unlock; @@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,  	mutex_lock(&fpriv->lock); -	context = tegra_drm_file_get_context(fpriv, args->context); +	context = idr_find(&fpriv->contexts, args->context);  	if (!context) {  		err = -ENODEV;  		goto unlock; @@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,  	mutex_lock(&fpriv->lock); -	context = tegra_drm_file_get_context(fpriv, args->context); +	context = idr_find(&fpriv->contexts, args->context);  	if (!context) {  		err = -ENODEV;  		goto unlock; @@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,  	mutex_lock(&fpriv->lock); -	context = tegra_drm_file_get_context(fpriv, args->context); +	context = idr_find(&fpriv->contexts, args->context);  	if (!context) {  		err = -ENODEV;  		goto unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2851ed..1f013d45c9e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)  	list_for_each_entry_safe(entry, next, &man->list, head)  		vmw_cmdbuf_res_free(man, entry); +	drm_ht_remove(&man->resources);  	kfree(man);  } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f05ebb14fa63..ac65f52850a6 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)  	host->rst = devm_reset_control_get(&pdev->dev, "host1x");  	if (IS_ERR(host->rst)) { -		err = PTR_ERR(host->clk); +		err = PTR_ERR(host->rst);  		dev_err(&pdev->dev, "failed to get reset: %d\n", err);  		return err;  	} diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 92f1452dad57..76875f6299b8 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)  {  	struct vga_device *vgadev, *conflict;  	unsigned long flags; -	wait_queue_t wait; +	wait_queue_entry_t wait;  	int rc = 0;  	vga_check_first_use(); | 
