diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_iommu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_iommu.c | 302 | 
1 files changed, 293 insertions, 9 deletions
| diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 739ce2c283a4..55c29f49b788 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -6,13 +6,18 @@  #include <linux/adreno-smmu-priv.h>  #include <linux/io-pgtable.h> +#include <linux/kmemleak.h>  #include "msm_drv.h" +#include "msm_gpu_trace.h"  #include "msm_mmu.h"  struct msm_iommu {  	struct msm_mmu base;  	struct iommu_domain *domain;  	atomic_t pagetables; +	struct page *prr_page; + +	struct kmem_cache *pt_cache;  };  #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) @@ -26,6 +31,9 @@ struct msm_iommu_pagetable {  	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */  	phys_addr_t ttbr;  	u32 asid; + +	/** @root_page_table: Stores the root page table pointer. */ +	void *root_page_table;  };  static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)  { @@ -93,15 +101,24 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,  {  	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);  	struct io_pgtable_ops *ops = pagetable->pgtbl_ops; +	int ret = 0;  	while (size) { -		size_t unmapped, pgsize, count; +		size_t pgsize, count; +		ssize_t unmapped;  		pgsize = calc_pgsize(pagetable, iova, iova, size, &count);  		unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); -		if (!unmapped) -			break; +		if (unmapped <= 0) { +			ret = -EINVAL; +			/* +			 * Continue attempting to unamp the remained of the +			 * range, so we don't end up with some dangling +			 * mapped pages +			 */ +			unmapped = PAGE_SIZE; +		}  		iova += unmapped;  		size -= unmapped; @@ -109,11 +126,42 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,  	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain); -	return (size == 0) ? 0 : -EINVAL; +	return ret; +} + +static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot) +{ +	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); +	struct io_pgtable_ops *ops = pagetable->pgtbl_ops; +	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); +	phys_addr_t phys = page_to_phys(iommu->prr_page); +	u64 addr = iova; + +	while (len) { +		size_t mapped = 0; +		size_t size = PAGE_SIZE; +		int ret; + +		ret = ops->map_pages(ops, addr, phys, size, 1, prot, GFP_KERNEL, &mapped); + +		/* map_pages could fail after mapping some of the pages, +		 * so update the counters before error handling. +		 */ +		addr += mapped; +		len  -= mapped; + +		if (ret) { +			msm_iommu_pagetable_unmap(mmu, iova, addr - iova); +			return -EINVAL; +		} +	} + +	return 0;  }  static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, -		struct sg_table *sgt, size_t len, int prot) +				   struct sg_table *sgt, size_t off, size_t len, +				   int prot)  {  	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);  	struct io_pgtable_ops *ops = pagetable->pgtbl_ops; @@ -121,10 +169,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,  	u64 addr = iova;  	unsigned int i; +	if (!sgt) +		return msm_iommu_pagetable_map_prr(mmu, iova, len, prot); +  	for_each_sgtable_sg(sgt, sg, i) {  		size_t size = sg->length;  		phys_addr_t phys = sg_phys(sg); +		if (!len) +			break; + +		if (size <= off) { +			off -= size; +			continue; +		} + +		phys += off; +		size -= off; +		size = min_t(size_t, size, len); +		off = 0; +  		while (size) {  			size_t pgsize, count, mapped = 0;  			int ret; @@ -140,6 +204,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,  			phys += mapped;  			addr += mapped;  			size -= mapped; +			len  -= mapped;  			if (ret) {  				msm_iommu_pagetable_unmap(mmu, iova, addr - iova); @@ -162,9 +227,16 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)  	 * If this is the last attached pagetable for the parent,  	 * disable TTBR0 in the arm-smmu driver  	 */ -	if (atomic_dec_return(&iommu->pagetables) == 0) +	if (atomic_dec_return(&iommu->pagetables) == 0) {  		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL); +		if (adreno_smmu->set_prr_bit) { +			adreno_smmu->set_prr_bit(adreno_smmu->cookie, false); +			__free_page(iommu->prr_page); +			iommu->prr_page = NULL; +		} +	} +  	free_io_pgtable_ops(pagetable->pgtbl_ops);  	kfree(pagetable);  } @@ -217,7 +289,148 @@ msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[  	return 0;  } +static void +msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct msm_mmu_prealloc *p, +				   uint64_t iova, size_t len) +{ +	u64 pt_count; + +	/* +	 * L1, L2 and L3 page tables. +	 * +	 * We could optimize L3 allocation by iterating over the sgt and merging +	 * 2M contiguous blocks, but it's simpler to over-provision and return +	 * the pages if they're not used. +	 * +	 * The first level descriptor (v8 / v7-lpae page table format) encodes +	 * 30 bits of address.  The second level encodes 29.  For the 3rd it is +	 * 39. +	 * +	 * https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB +	 */ +	pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) + +		   ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) + +		   ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21); + +	p->count += pt_count; +} + +static struct kmem_cache * +get_pt_cache(struct msm_mmu *mmu) +{ +	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); +	return to_msm_iommu(pagetable->parent)->pt_cache; +} + +static int +msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_prealloc *p) +{ +	struct kmem_cache *pt_cache = get_pt_cache(mmu); +	int ret; + +	p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL); +	if (!p->pages) +		return -ENOMEM; + +	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages); +	if (ret != p->count) { +		p->count = ret; +		return -ENOMEM; +	} + +	return 0; +} + +static void +msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_prealloc *p) +{ +	struct kmem_cache *pt_cache = get_pt_cache(mmu); +	uint32_t remaining_pt_count = p->count - p->ptr; + +	if (p->count > 0) +		trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count); + +	kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]); +	kvfree(p->pages); +} + +/** + * alloc_pt() - Custom page table allocator + * @cookie: Cookie passed at page table allocation time. + * @size: Size of the page table. This size should be fixed, + * and determined at creation time based on the granule size. + * @gfp: GFP flags. + * + * We want a custom allocator so we can use a cache for page table + * allocations and amortize the cost of the over-reservation that's + * done to allow asynchronous VM operations. + * + * Return: non-NULL on success, NULL if the allocation failed for any + * reason. + */ +static void * +msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp) +{ +	struct msm_iommu_pagetable *pagetable = cookie; +	struct msm_mmu_prealloc *p = pagetable->base.prealloc; +	void *page; + +	/* Allocation of the root page table happening during init. */ +	if (unlikely(!pagetable->root_page_table)) { +		struct page *p; + +		p = alloc_pages_node(dev_to_node(pagetable->iommu_dev), +				     gfp | __GFP_ZERO, get_order(size)); +		page = p ? page_address(p) : NULL; +		pagetable->root_page_table = page; +		return page; +	} + +	if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count)) +		return NULL; + +	page = p->pages[p->ptr++]; +	memset(page, 0, size); + +	/* +	 * Page table entries don't use virtual addresses, which trips out +	 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses +	 * are mixed with other fields, and I fear kmemleak won't detect that +	 * either. +	 * +	 * Let's just ignore memory passed to the page-table driver for now. +	 */ +	kmemleak_ignore(page); + +	return page; +} + + +/** + * free_pt() - Custom page table free function + * @cookie: Cookie passed at page table allocation time. + * @data: Page table to free. + * @size: Size of the page table. This size should be fixed, + * and determined at creation time based on the granule size. + */ +static void +msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size) +{ +	struct msm_iommu_pagetable *pagetable = cookie; + +	if (unlikely(pagetable->root_page_table == data)) { +		free_pages((unsigned long)data, get_order(size)); +		pagetable->root_page_table = NULL; +		return; +	} + +	kmem_cache_free(get_pt_cache(&pagetable->base), data); +} +  static const struct msm_mmu_funcs pagetable_funcs = { +		.prealloc_count = msm_iommu_pagetable_prealloc_count, +		.prealloc_allocate = msm_iommu_pagetable_prealloc_allocate, +		.prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup,  		.map = msm_iommu_pagetable_map,  		.unmap = msm_iommu_pagetable_unmap,  		.destroy = msm_iommu_pagetable_destroy, @@ -268,7 +481,18 @@ static const struct iommu_flush_ops tlb_ops = {  static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,  		unsigned long iova, int flags, void *arg); -struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) +static size_t get_tblsz(const struct io_pgtable_cfg *cfg) +{ +	int pg_shift, bits_per_level; + +	pg_shift = __ffs(cfg->pgsize_bitmap); +	/* arm_lpae_iopte is u64: */ +	bits_per_level = pg_shift - ilog2(sizeof(u64)); + +	return sizeof(u64) << bits_per_level; +} + +struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)  {  	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);  	struct msm_iommu *iommu = to_msm_iommu(parent); @@ -302,6 +526,36 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)  	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;  	ttbr0_cfg.tlb = &tlb_ops; +	if (!kernel_managed) { +		ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN; + +		/* +		 * With userspace managed VM (aka VM_BIND), we need to pre- +		 * allocate pages ahead of time for map/unmap operations, +		 * handing them to io-pgtable via custom alloc/free ops as +		 * needed: +		 */ +		ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt; +		ttbr0_cfg.free  = msm_iommu_pagetable_free_pt; + +		/* +		 * Restrict to single page granules.  Otherwise we may run +		 * into a situation where userspace wants to unmap/remap +		 * only a part of a larger block mapping, which is not +		 * possible without unmapping the entire block.  Which in +		 * turn could cause faults if the GPU is accessing other +		 * parts of the block mapping. +		 * +		 * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm: +		 * Remove split on unmap behavior)" this was handled in +		 * io-pgtable-arm.  But this apparently does not work +		 * correctly on SMMUv3. +		 */ +		WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE)); +		ttbr0_cfg.pgsize_bitmap = PAGE_SIZE; +	} + +	pagetable->iommu_dev = ttbr1_cfg->iommu_dev;  	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,  		&ttbr0_cfg, pagetable); @@ -321,12 +575,30 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)  			kfree(pagetable);  			return ERR_PTR(ret);  		} + +		BUG_ON(iommu->prr_page); +		if (adreno_smmu->set_prr_bit) { +			/* +			 * We need a zero'd page for two reasons: +			 * +			 * 1) Reserve a known physical address to use when +			 *    mapping NULL / sparsely resident regions +			 * 2) Read back zero +			 * +			 * It appears the hw drops writes to the PRR region +			 * on the floor, but reads actually return whatever +			 * is in the PRR page. +			 */ +			iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO); +			adreno_smmu->set_prr_addr(adreno_smmu->cookie, +						  page_to_phys(iommu->prr_page)); +			adreno_smmu->set_prr_bit(adreno_smmu->cookie, true); +		}  	}  	/* Needed later for TLB flush */  	pagetable->parent = parent;  	pagetable->tlb = ttbr1_cfg->tlb; -	pagetable->iommu_dev = ttbr1_cfg->iommu_dev;  	pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;  	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; @@ -388,11 +660,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu)  }  static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, -		struct sg_table *sgt, size_t len, int prot) +			 struct sg_table *sgt, size_t off, size_t len, +			 int prot)  {  	struct msm_iommu *iommu = to_msm_iommu(mmu);  	size_t ret; +	WARN_ON(off != 0); +  	/* The arm-smmu driver expects the addresses to be sign extended */  	if (iova & BIT_ULL(48))  		iova |= GENMASK_ULL(63, 49); @@ -419,6 +694,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)  {  	struct msm_iommu *iommu = to_msm_iommu(mmu);  	iommu_domain_free(iommu->domain); +	kmem_cache_destroy(iommu->pt_cache);  	kfree(iommu);  } @@ -492,6 +768,14 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig  		return mmu;  	iommu = to_msm_iommu(mmu); +	if (adreno_smmu && adreno_smmu->cookie) { +		const struct io_pgtable_cfg *cfg = +			adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); +		size_t tblsz = get_tblsz(cfg); + +		iommu->pt_cache = +			kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL); +	}  	iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);  	/* Enable stall on iommu fault: */ | 
