diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gem.c | 38 | 
1 files changed, 34 insertions, 4 deletions
| diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8b55ece97967..d0b9f6a9953f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -37,6 +37,7 @@  #include <linux/shmem_fs.h>  #include <linux/dma-buf.h>  #include <linux/mem_encrypt.h> +#include <linux/pagevec.h>  #include <drm/drmP.h>  #include <drm/drm_vma_manager.h>  #include <drm/drm_gem.h> @@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)  }  EXPORT_SYMBOL(drm_gem_create_mmap_offset); +/* + * Move pages to appropriate lru and release the pagevec, decrementing the + * ref count of those pages. + */ +static void drm_gem_check_release_pagevec(struct pagevec *pvec) +{ +	check_move_unevictable_pages(pvec); +	__pagevec_release(pvec); +	cond_resched(); +} +  /**   * drm_gem_get_pages - helper to allocate backing pages for a GEM object   * from shmem @@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)  {  	struct address_space *mapping;  	struct page *p, **pages; +	struct pagevec pvec;  	int i, npages;  	/* This is the shared memory object that backs the GEM resource */ @@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)  	if (pages == NULL)  		return ERR_PTR(-ENOMEM); +	mapping_set_unevictable(mapping); +  	for (i = 0; i < npages; i++) {  		p = shmem_read_mapping_page(mapping, i);  		if (IS_ERR(p)) @@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)  	return pages;  fail: -	while (i--) -		put_page(pages[i]); +	mapping_clear_unevictable(mapping); +	pagevec_init(&pvec); +	while (i--) { +		if (!pagevec_add(&pvec, pages[i])) +			drm_gem_check_release_pagevec(&pvec); +	} +	if (pagevec_count(&pvec)) +		drm_gem_check_release_pagevec(&pvec);  	kvfree(pages);  	return ERR_CAST(p); @@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,  		bool dirty, bool accessed)  {  	int i, npages; +	struct address_space *mapping; +	struct pagevec pvec; + +	mapping = file_inode(obj->filp)->i_mapping; +	mapping_clear_unevictable(mapping);  	/* We already BUG_ON() for non-page-aligned sizes in  	 * drm_gem_object_init(), so we should never hit this unless @@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,  	npages = obj->size >> PAGE_SHIFT; +	pagevec_init(&pvec);  	for (i = 0; i < npages; i++) {  		if (dirty)  			set_page_dirty(pages[i]); @@ -622,15 +649,18 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,  			mark_page_accessed(pages[i]);  		/* Undo the reference we took when populating the table */ -		put_page(pages[i]); +		if (!pagevec_add(&pvec, pages[i])) +			drm_gem_check_release_pagevec(&pvec);  	} +	if (pagevec_count(&pvec)) +		drm_gem_check_release_pagevec(&pvec);  	kvfree(pages);  }  EXPORT_SYMBOL(drm_gem_put_pages);  /** - * drm_gem_object_lookup - look up a GEM object from it's handle + * drm_gem_object_lookup - look up a GEM object from its handle   * @filp: DRM file private date   * @handle: userspace handle   * | 
