FROMGIT: iommu: generalize the batched sync after map interface
For the upcoming IOVA-based DMA API we want to batch the
ops->iotlb_sync_map() call after mapping multiple IOVAs from
dma-iommu without having a scatterlist. Improve the API.
Add a wrapper for the map_sync as iommu_sync_map() so that callers
don't need to poke into the methods directly.
Formalize __iommu_map() into iommu_map_nosync() which requires the
caller to call iommu_sync_map() after all maps are completed.
Refactor the existing sanity checks from all the different layers
into iommu_map_nosync().
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Will Deacon <will@kernel.org>
Tested-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
(cherry picked from commit 5c87cffe2d3853cfae61e9373ee98a0409839178
https://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux.git)
Bug: 416098329
[Daniel: Adopted to commit 1a608d2b0d ("BACKPORT: FROMLIST: drivers/iommu:
Add deferred map_sg operations")]
Signed-off-by: Daniel Mentz <danielmentz@google.com>
Change-Id: Iefa5c86c91bc5b89113a68798898a3b5c6896edc
This commit is contained in:
committed by
Carlos Llamas
parent
15ad0760b8
commit
5c59ff3809
@@ -2438,8 +2438,8 @@ out_set_count:
|
||||
return pgsize;
|
||||
}
|
||||
|
||||
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
unsigned long orig_iova = iova;
|
||||
@@ -2448,12 +2448,19 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t orig_paddr = paddr;
|
||||
int ret = 0;
|
||||
|
||||
might_sleep_if(gfpflags_allow_blocking(gfp));
|
||||
|
||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL))
|
||||
return -ENODEV;
|
||||
|
||||
/* Discourage passing strange GFP flags */
|
||||
if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
|
||||
__GFP_HIGHMEM)))
|
||||
return -EINVAL;
|
||||
|
||||
/* find out the minimum page size supported */
|
||||
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
|
||||
|
||||
@@ -2501,31 +2508,27 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size)
|
||||
{
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
|
||||
if (!ops->iotlb_sync_map)
|
||||
return 0;
|
||||
return ops->iotlb_sync_map(domain, iova, size);
|
||||
}
|
||||
|
||||
int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
int ret;
|
||||
|
||||
might_sleep_if(gfpflags_allow_blocking(gfp));
|
||||
ret = iommu_map_nosync(domain, iova, paddr, size, prot, gfp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Discourage passing strange GFP flags */
|
||||
if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
|
||||
__GFP_HIGHMEM)))
|
||||
return -EINVAL;
|
||||
|
||||
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
|
||||
if (ret == 0 && ops->iotlb_sync_map) {
|
||||
ret = ops->iotlb_sync_map(domain, iova, size);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_err:
|
||||
/* undo mappings already done */
|
||||
iommu_unmap(domain, iova, size);
|
||||
ret = iommu_sync_map(domain, iova, size);
|
||||
if (ret)
|
||||
iommu_unmap(domain, iova, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -2651,13 +2654,6 @@ ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
ops->consume_deferred_map_sg;
|
||||
struct iommu_map_cookie_sg *cookie_sg;
|
||||
|
||||
might_sleep_if(gfpflags_allow_blocking(gfp));
|
||||
|
||||
/* Discourage passing strange GFP flags */
|
||||
if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
|
||||
__GFP_HIGHMEM)))
|
||||
return -EINVAL;
|
||||
|
||||
if (deferred_sg) {
|
||||
cookie_sg = ops->alloc_cookie_sg(iova, prot, nents, gfp);
|
||||
if (!cookie_sg) {
|
||||
@@ -2674,9 +2670,8 @@ ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
if (deferred_sg)
|
||||
ret = __iommu_add_sg(cookie_sg, iova + mapped, start, len);
|
||||
else
|
||||
ret = __iommu_map(domain, iova + mapped, start,
|
||||
ret = iommu_map_nosync(domain, iova + mapped, start,
|
||||
len, prot, gfp);
|
||||
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
@@ -2710,11 +2705,10 @@ next:
|
||||
}
|
||||
}
|
||||
|
||||
if (ops->iotlb_sync_map) {
|
||||
ret = ops->iotlb_sync_map(domain, iova, mapped);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
ret = iommu_sync_map(domain, iova, mapped);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
return mapped;
|
||||
|
||||
out_err:
|
||||
|
||||
@@ -821,6 +821,10 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
||||
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
|
||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
|
||||
int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
|
||||
int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
|
||||
|
||||
Reference in New Issue
Block a user