diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-22 13:19:43 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-22 13:19:43 -0800 |
commit | 347d81b68b8f7044c9ce3fefa130a736ca916176 (patch) | |
tree | ee07d2f350f34ae0a8bdd904251faa61144cb778 /kernel/dma/mapping.c | |
parent | 4f06f210673e6841439a0f91fcde64960cdbeb5c (diff) | |
parent | 7679325702c90aecd393cd7cde685576c14489c0 (diff) |
Merge tag 'dma-mapping-5.11' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- support for a partial IOMMU bypass (Alexey Kardashevskiy)
- add a DMA API benchmark (Barry Song)
- misc fixes (Tiezhu Yang, tangjianqiang)
* tag 'dma-mapping-5.11' of git://git.infradead.org/users/hch/dma-mapping:
selftests/dma: add test application for DMA_MAP_BENCHMARK
dma-mapping: add benchmark support for streaming DMA APIs
dma-contiguous: fix a typo error in a comment
dma-pool: no need to check return value of debugfs_create functions
powerpc/dma: Fallback to dma_ops when persistent memory present
dma-mapping: Allow mixing bypass and mapped DMA operation
Diffstat (limited to 'kernel/dma/mapping.c')
-rw-r--r-- | kernel/dma/mapping.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 51bb8fa8eb89..f87a89d08654 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -149,7 +149,8 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, if (WARN_ON_ONCE(!dev->dma_mask)) return DMA_MAPPING_ERROR; - if (dma_map_direct(dev, ops)) + if (dma_map_direct(dev, ops) || + arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); else addr = ops->map_page(dev, page, offset, size, dir, attrs); @@ -165,7 +166,8 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (dma_map_direct(dev, ops)) + if (dma_map_direct(dev, ops) || + arch_dma_unmap_page_direct(dev, addr + size)) dma_direct_unmap_page(dev, addr, size, dir, attrs); else if (ops->unmap_page) ops->unmap_page(dev, addr, size, dir, attrs); @@ -188,7 +190,8 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, if (WARN_ON_ONCE(!dev->dma_mask)) return 0; - if (dma_map_direct(dev, ops)) + if (dma_map_direct(dev, ops) || + arch_dma_map_sg_direct(dev, sg, nents)) ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); else ents = ops->map_sg(dev, sg, nents, dir, attrs); @@ -207,7 +210,8 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); - if (dma_map_direct(dev, ops)) + if (dma_map_direct(dev, ops) || + arch_dma_unmap_sg_direct(dev, sg, nents)) dma_direct_unmap_sg(dev, sg, nents, dir, attrs); else if (ops->unmap_sg) ops->unmap_sg(dev, sg, nents, dir, attrs); |