summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2025-02-10 08:43:39 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-06-04 14:32:37 +0200
commitc3ccbd8625c84f7808e7be29de871440a8c62e3a (patch)
treeccd829a713e14bc056e0983ab7ac06d6a3576587
parent9fe7b0804390dbf2204bd20d69765255ba825c84 (diff)
xen/swiotlb: relax alignment requirements
commit 85fcb57c983f423180ba6ec5d0034242da05cc54 upstream. When mapping a buffer for DMA via .map_page or .map_sg DMA operations, there is no need to check the machine frames to be aligned according to the mapped areas size. All what is needed in these cases is that the buffer is contiguous at machine level. So carve out the alignment check from range_straddles_page_boundary() and move it to a helper called by xen_swiotlb_alloc_coherent() and xen_swiotlb_free_coherent() directly. Fixes: 9f40ec84a797 ("xen/swiotlb: add alignment check for dma buffers") Reported-by: Jan Vejvalka <jan.vejvalka@lfmotol.cuni.cz> Tested-by: Jan Vejvalka <jan.vejvalka@lfmotol.cuni.cz> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/xen/swiotlb-xen.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 5bd29d0bffa2..382ee7dc3d5d 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -85,19 +85,21 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
return xen_phys_to_bus(virt_to_phys(address));
}
+static inline bool range_requires_alignment(phys_addr_t p, size_t size)
+{
+ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+ phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
+
+ return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
+}
+
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
- phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
- /* If buffer is physically aligned, ensure DMA alignment. */
- if (IS_ALIGNED(p, algn) &&
- !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
- return 1;
-
for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 1;
@@ -320,7 +322,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
phys = *dma_handle;
dev_addr = xen_phys_to_bus(phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
- !range_straddles_page_boundary(phys, size))
+ !range_straddles_page_boundary(phys, size) &&
+ !range_requires_alignment(phys, size))
*dma_handle = dev_addr;
else {
if (xen_create_contiguous_region(phys, order,
@@ -360,6 +363,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
range_straddles_page_boundary(phys, size)) &&
+ !range_requires_alignment(phys, size) &&
TestClearPageXenRemapped(page))
xen_destroy_contiguous_region(phys, order);