diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_migrate.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.c | 44 | 
1 files changed, 28 insertions, 16 deletions
| diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index ba1cff2e4cda..84f412fd3c5d 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -408,7 +408,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)  	/* Special layout, prepared below.. */  	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | -			  XE_VM_FLAG_SET_TILE_ID(tile)); +			  XE_VM_FLAG_SET_TILE_ID(tile), NULL);  	if (IS_ERR(vm))  		return ERR_CAST(vm); @@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,  	if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||  	    !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {  		int buf_offset = 0; +		void *bounce; +		int err; + +		BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES)); +		bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL); +		if (!bounce) +			return -ENOMEM;  		/*  		 * Less than ideal for large unaligned access but this should be  		 * fairly rare, can fixup if this becomes common.  		 */  		do { -			u8 bounce[XE_CACHELINE_BYTES]; -			void *ptr = (void *)bounce; -			int err;  			int copy_bytes = min_t(int, bytes_left,  					       XE_CACHELINE_BYTES -  					       (offset & XE_CACHELINE_MASK)); @@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,  			err = xe_migrate_access_memory(m, bo,  						       offset &  						       ~XE_CACHELINE_MASK, -						       (void *)ptr, -						       sizeof(bounce), 0); +						       bounce, +						       XE_CACHELINE_BYTES, 0);  			if (err) -				return err; +				break;  			if (write) { -				memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes); +				memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);  				err = xe_migrate_access_memory(m, bo,  							       offset & ~XE_CACHELINE_MASK, -							       (void *)ptr, -							       sizeof(bounce), write); +							       bounce, +							       XE_CACHELINE_BYTES, write);  				if (err) -					return err; +					break;  			} else { -				memcpy(buf + buf_offset, ptr + ptr_offset, +				memcpy(buf + buf_offset, bounce + ptr_offset,  				       copy_bytes);  			} @@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,  			offset += copy_bytes;  		} while (bytes_left); -		return 0; +		kfree(bounce); +		return err;  	}  	dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); @@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,  		else  			current_bytes = min_t(int, bytes_left, cursor.size); -		if (fence) -			dma_fence_put(fence); +		if (current_bytes & ~PAGE_MASK) { +			int pitch = 4; + +			current_bytes = min_t(int, current_bytes, S16_MAX * pitch); +		}  		__fence = xe_migrate_vram(m, current_bytes,  					  (unsigned long)buf & ~PAGE_MASK, @@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,  					  XE_MIGRATE_COPY_TO_VRAM :  					  XE_MIGRATE_COPY_TO_SRAM);  		if (IS_ERR(__fence)) { -			if (fence) +			if (fence) {  				dma_fence_wait(fence, false); +				dma_fence_put(fence); +			}  			fence = __fence;  			goto out_err;  		} + +		dma_fence_put(fence);  		fence = __fence;  		buf += current_bytes; | 
