diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gpu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gpu.c | 167 | 
1 files changed, 110 insertions, 57 deletions
| diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 55d16489d0f3..ab7c167b0623 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -14,6 +14,7 @@  #include <generated/utsrelease.h>  #include <linux/string_helpers.h>  #include <linux/devfreq.h> +#include <linux/devfreq_cooling.h>  #include <linux/devcoredump.h>  #include <linux/sched/task.h> @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu)  	if (IS_ERR(gpu->devfreq.devfreq)) {  		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");  		gpu->devfreq.devfreq = NULL; +		return;  	}  	devfreq_suspend_device(gpu->devfreq.devfreq); + +	gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, +			gpu->devfreq.devfreq); +	if (IS_ERR(gpu->cooling)) { +		DRM_DEV_ERROR(&gpu->pdev->dev, +				"Couldn't register GPU cooling device\n"); +		gpu->cooling = NULL; +	}  }  static int enable_pwrrail(struct msm_gpu *gpu) @@ -177,15 +187,12 @@ static int disable_clk(struct msm_gpu *gpu)  static int enable_axi(struct msm_gpu *gpu)  { -	if (gpu->ebi1_clk) -		clk_prepare_enable(gpu->ebi1_clk); -	return 0; +	return clk_prepare_enable(gpu->ebi1_clk);  }  static int disable_axi(struct msm_gpu *gpu)  { -	if (gpu->ebi1_clk) -		clk_disable_unprepare(gpu->ebi1_clk); +	clk_disable_unprepare(gpu->ebi1_clk);  	return 0;  } @@ -265,6 +272,22 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)  	return ret;  } +static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, +		uint32_t fence) +{ +	struct msm_gem_submit *submit; + +	spin_lock(&ring->submit_lock); +	list_for_each_entry(submit, &ring->submits, node) { +		if (submit->seqno > fence) +			break; + +		msm_update_fence(submit->ring->fctx, +			submit->fence->seqno); +	} +	spin_unlock(&ring->submit_lock); +} +  #ifdef CONFIG_DEV_COREDUMP  static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,  		size_t count, void *data, size_t datalen) @@ -326,7 +349,9 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,  		if (!state_bo->data)  			goto out; +		msm_gem_lock(&obj->base);  		ptr = msm_gem_get_vaddr_active(&obj->base); +		msm_gem_unlock(&obj->base);  		if (IS_ERR(ptr)) {  			kvfree(state_bo->data);  			state_bo->data = NULL; @@ -411,37 +436,26 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,   * Hangcheck detection for locked gpu:   */ -static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, -		uint32_t fence) -{ -	struct msm_gem_submit *submit; - -	list_for_each_entry(submit, &ring->submits, node) { -		if (submit->seqno > fence) -			break; - -		msm_update_fence(submit->ring->fctx, -			submit->fence->seqno); -	} -} -  static struct msm_gem_submit *  find_submit(struct msm_ringbuffer *ring, uint32_t fence)  {  	struct msm_gem_submit *submit; -	WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex)); - -	list_for_each_entry(submit, &ring->submits, node) -		if (submit->seqno == fence) +	spin_lock(&ring->submit_lock); +	list_for_each_entry(submit, &ring->submits, node) { +		if (submit->seqno == fence) { +			spin_unlock(&ring->submit_lock);  			return submit; +		} +	} +	spin_unlock(&ring->submit_lock);  	return NULL;  }  static void retire_submits(struct msm_gpu *gpu); -static void recover_worker(struct work_struct *work) +static void recover_worker(struct kthread_work *work)  {  	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);  	struct drm_device *dev = gpu->dev; @@ -470,14 +484,22 @@ static void recover_worker(struct work_struct *work)  			put_task_struct(task);  		} +		/* msm_rd_dump_submit() needs bo locked to dump: */ +		for (i = 0; i < submit->nr_bos; i++) +			msm_gem_lock(&submit->bos[i].obj->base); +  		if (comm && cmd) {  			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",  				gpu->name, comm, cmd);  			msm_rd_dump_submit(priv->hangrd, submit,  				"offending task: %s (%s)", comm, cmd); -		} else +		} else {  			msm_rd_dump_submit(priv->hangrd, submit, NULL); +		} + +		for (i = 0; i < submit->nr_bos; i++) +			msm_gem_unlock(&submit->bos[i].obj->base);  	}  	/* Record the crash state */ @@ -523,8 +545,10 @@ static void recover_worker(struct work_struct *work)  		for (i = 0; i < gpu->nr_rings; i++) {  			struct msm_ringbuffer *ring = gpu->rb[i]; +			spin_lock(&ring->submit_lock);  			list_for_each_entry(submit, &ring->submits, node)  				gpu->funcs->submit(gpu, submit); +			spin_unlock(&ring->submit_lock);  		}  	} @@ -535,7 +559,6 @@ static void recover_worker(struct work_struct *work)  static void hangcheck_timer_reset(struct msm_gpu *gpu)  { -	DBG("%s", gpu->name);  	mod_timer(&gpu->hangcheck_timer,  			round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));  } @@ -544,7 +567,6 @@ static void hangcheck_handler(struct timer_list *t)  {  	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);  	struct drm_device *dev = gpu->dev; -	struct msm_drm_private *priv = dev->dev_private;  	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);  	uint32_t fence = ring->memptrs->fence; @@ -561,7 +583,7 @@ static void hangcheck_handler(struct timer_list *t)  		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",  				gpu->name, ring->seqno); -		queue_work(priv->wq, &gpu->recover_work); +		kthread_queue_work(gpu->worker, &gpu->recover_work);  	}  	/* if still more pending work, reset the hangcheck timer: */ @@ -569,7 +591,7 @@ static void hangcheck_handler(struct timer_list *t)  		hangcheck_timer_reset(gpu);  	/* workaround for missing irq: */ -	queue_work(priv->wq, &gpu->retire_work); +	kthread_queue_work(gpu->worker, &gpu->retire_work);  }  /* @@ -697,56 +719,70 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,  		stats->alwayson_start, stats->alwayson_end);  	for (i = 0; i < submit->nr_bos; i++) { -		struct msm_gem_object *msm_obj = submit->bos[i].obj; +		struct drm_gem_object *obj = &submit->bos[i].obj->base; -		msm_gem_active_put(&msm_obj->base); -		msm_gem_unpin_iova(&msm_obj->base, submit->aspace); -		drm_gem_object_put_locked(&msm_obj->base); +		msm_gem_lock(obj); +		msm_gem_active_put(obj); +		msm_gem_unpin_iova_locked(obj, submit->aspace); +		msm_gem_unlock(obj); +		drm_gem_object_put(obj);  	}  	pm_runtime_mark_last_busy(&gpu->pdev->dev);  	pm_runtime_put_autosuspend(&gpu->pdev->dev); -	msm_gem_submit_free(submit); + +	spin_lock(&ring->submit_lock); +	list_del(&submit->node); +	spin_unlock(&ring->submit_lock); + +	msm_gem_submit_put(submit);  }  static void retire_submits(struct msm_gpu *gpu)  { -	struct drm_device *dev = gpu->dev; -	struct msm_gem_submit *submit, *tmp;  	int i; -	WARN_ON(!mutex_is_locked(&dev->struct_mutex)); -  	/* Retire the commits starting with highest priority */  	for (i = 0; i < gpu->nr_rings; i++) {  		struct msm_ringbuffer *ring = gpu->rb[i]; -		list_for_each_entry_safe(submit, tmp, &ring->submits, node) { -			if (dma_fence_is_signaled(submit->fence)) +		while (true) { +			struct msm_gem_submit *submit = NULL; + +			spin_lock(&ring->submit_lock); +			submit = list_first_entry_or_null(&ring->submits, +					struct msm_gem_submit, node); +			spin_unlock(&ring->submit_lock); + +			/* +			 * If no submit, we are done.  If submit->fence hasn't +			 * been signalled, then later submits are not signalled +			 * either, so we are also done. +			 */ +			if (submit && dma_fence_is_signaled(submit->fence)) {  				retire_submit(gpu, ring, submit); +			} else { +				break; +			}  		}  	}  } -static void retire_worker(struct work_struct *work) +static void retire_worker(struct kthread_work *work)  {  	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); -	struct drm_device *dev = gpu->dev;  	int i;  	for (i = 0; i < gpu->nr_rings; i++)  		update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); -	mutex_lock(&dev->struct_mutex);  	retire_submits(gpu); -	mutex_unlock(&dev->struct_mutex);  }  /* call from irq handler to schedule work to retire bo's */  void msm_gpu_retire(struct msm_gpu *gpu)  { -	struct msm_drm_private *priv = gpu->dev->dev_private; -	queue_work(priv->wq, &gpu->retire_work); +	kthread_queue_work(gpu->worker, &gpu->retire_work);  	update_sw_cntrs(gpu);  } @@ -766,8 +802,6 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)  	submit->seqno = ++ring->seqno; -	list_add_tail(&submit->node, &ring->submits); -  	msm_rd_dump_submit(priv->rd, submit, NULL);  	update_sw_cntrs(gpu); @@ -777,14 +811,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)  		struct drm_gem_object *drm_obj = &msm_obj->base;  		uint64_t iova; -		/* can't happen yet.. but when we add 2d support we'll have -		 * to deal w/ cross-ring synchronization: -		 */ -		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); -  		/* submit takes a reference to the bo and iova until retired: */  		drm_gem_object_get(&msm_obj->base); -		msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova); +		msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);  		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)  			dma_resv_add_excl_fence(drm_obj->resv, submit->fence); @@ -794,6 +823,16 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)  		msm_gem_active_get(drm_obj, gpu);  	} +	/* +	 * ring->submits holds a ref to the submit, to deal with the case +	 * that a submit completes before msm_ioctl_gem_submit() returns. +	 */ +	msm_gem_submit_get(submit); + +	spin_lock(&ring->submit_lock); +	list_add_tail(&submit->node, &ring->submits); +	spin_unlock(&ring->submit_lock); +  	gpu->funcs->submit(gpu, submit);  	priv->lastctx = submit->queue->ctx; @@ -869,10 +908,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,  	gpu->funcs = funcs;  	gpu->name = name; -	INIT_LIST_HEAD(&gpu->active_list); -	INIT_WORK(&gpu->retire_work, retire_worker); -	INIT_WORK(&gpu->recover_work, recover_worker); +	gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name); +	if (IS_ERR(gpu->worker)) { +		ret = PTR_ERR(gpu->worker); +		gpu->worker = NULL; +		goto fail; +	} +	sched_set_fifo_low(gpu->worker->task); + +	INIT_LIST_HEAD(&gpu->active_list); +	kthread_init_work(&gpu->retire_work, retire_worker); +	kthread_init_work(&gpu->recover_work, recover_worker);  	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); @@ -1005,4 +1052,10 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)  		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);  		msm_gem_address_space_put(gpu->aspace);  	} + +	if (gpu->worker) { +		kthread_destroy_worker(gpu->worker); +	} + +	devfreq_cooling_unregister(gpu->cooling);  } | 
