ANDROID: iommu/iommu: Handle multi-page deferred sg mappings
The `__iommu_add_sg` function currently assumes that a scatter-gather
entry will always map to a single page size. However, a single
scatter-gather entry can span multiple IOMMU page sizes if there is
a larger page size mapping. This can lead to issues with deferred
scatter-gather mappings.
This patch modifies `__iommu_add_sg` to iteratively call
`ops->add_deferred_map_sg` for each IOMMU page-sized chunk within a
scatter-gather entry, ensuring that all parts of the entry are
correctly handled for deferred mappings.
Fixes: 1a608d2b0d ("BACKPORT: FROMLIST: drivers/iommu: Add deferred map_sg operations")
Change-Id: I2d2edbd5fa3d37c0b1e4a9976824b77a9ef39676
Signed-off-by: Ryan Huang <tzukui@google.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
This commit is contained in:
committed by
Mostafa Saleh
parent
740d42d181
commit
672185e575
@@ -1747,14 +1747,22 @@ struct kvm_iommu_sg {
|
||||
unsigned int pgcount;
|
||||
};
|
||||
|
||||
|
||||
#define kvm_iommu_sg_nents_size(n) (PAGE_ALIGN((n) * sizeof(struct kvm_iommu_sg)))
|
||||
|
||||
static inline unsigned int kvm_iommu_sg_nents_round(unsigned int nents)
|
||||
{
|
||||
return kvm_iommu_sg_nents_size(nents) / sizeof(struct kvm_iommu_sg);
|
||||
}
|
||||
|
||||
static inline struct kvm_iommu_sg *kvm_iommu_sg_alloc(unsigned int nents, gfp_t gfp)
|
||||
{
|
||||
return alloc_pages_exact(PAGE_ALIGN(nents * sizeof(struct kvm_iommu_sg)), gfp);
|
||||
return alloc_pages_exact(kvm_iommu_sg_nents_size(nents), gfp);
|
||||
}
|
||||
|
||||
static inline void kvm_iommu_sg_free(struct kvm_iommu_sg *sg, unsigned int nents)
|
||||
{
|
||||
free_pages_exact(sg, PAGE_ALIGN(nents * sizeof(struct kvm_iommu_sg)));
|
||||
free_pages_exact(sg, kvm_iommu_sg_nents_size(nents));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -411,6 +411,7 @@ struct kvm_arm_smmu_map_sg {
|
||||
int prot;
|
||||
gfp_t gfp;
|
||||
unsigned int nents;
|
||||
size_t total_mapped;
|
||||
};
|
||||
|
||||
static struct iommu_map_cookie_sg *kvm_arm_smmu_alloc_cookie_sg(unsigned long iova,
|
||||
@@ -424,16 +425,17 @@ static struct iommu_map_cookie_sg *kvm_arm_smmu_alloc_cookie_sg(unsigned long io
|
||||
if (!map_sg)
|
||||
return NULL;
|
||||
|
||||
map_sg->sg = kvm_iommu_sg_alloc(nents, gfp);
|
||||
/* Rounds nents to allocate to page aligned size. */
|
||||
map_sg->nents = kvm_iommu_sg_nents_round(nents);
|
||||
map_sg->sg = kvm_iommu_sg_alloc(map_sg->nents, gfp);
|
||||
if (!map_sg->sg)
|
||||
return NULL;
|
||||
map_sg->iova = iova;
|
||||
map_sg->prot = prot;
|
||||
map_sg->gfp = gfp;
|
||||
map_sg->nents = nents;
|
||||
ret = kvm_iommu_share_hyp_sg(map_sg->sg, nents);
|
||||
ret = kvm_iommu_share_hyp_sg(map_sg->sg, map_sg->nents);
|
||||
if (ret) {
|
||||
kvm_iommu_sg_free(map_sg->sg, nents);
|
||||
kvm_iommu_sg_free(map_sg->sg, map_sg->nents);
|
||||
kfree(map_sg);
|
||||
return NULL;
|
||||
}
|
||||
@@ -447,6 +449,17 @@ static int kvm_arm_smmu_add_deferred_map_sg(struct iommu_map_cookie_sg *cookie,
|
||||
struct kvm_arm_smmu_map_sg *map_sg = container_of(cookie, struct kvm_arm_smmu_map_sg,
|
||||
cookie);
|
||||
struct kvm_iommu_sg *sg = map_sg->sg;
|
||||
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(map_sg->cookie.domain);
|
||||
size_t mapped;
|
||||
|
||||
/* Out of space, flush the list. */
|
||||
if (map_sg->nents == map_sg->ptr) {
|
||||
mapped = kvm_iommu_map_sg(kvm_smmu_domain->id, sg, map_sg->iova,
|
||||
map_sg->ptr, map_sg->prot, map_sg->gfp);
|
||||
map_sg->ptr = 0;
|
||||
map_sg->iova += mapped;
|
||||
map_sg->total_mapped += mapped;
|
||||
}
|
||||
|
||||
sg[map_sg->ptr].phys = paddr;
|
||||
sg[map_sg->ptr].pgsize = pgsize;
|
||||
@@ -461,11 +474,10 @@ static size_t kvm_arm_smmu_consume_deferred_map_sg(struct iommu_map_cookie_sg *c
|
||||
cookie);
|
||||
struct kvm_iommu_sg *sg = map_sg->sg;
|
||||
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(map_sg->cookie.domain);
|
||||
size_t total_mapped;
|
||||
|
||||
total_mapped = kvm_iommu_map_sg(kvm_smmu_domain->id, sg, map_sg->iova, map_sg->ptr,
|
||||
map_sg->prot, map_sg->gfp);
|
||||
size_t total_mapped = map_sg->total_mapped;
|
||||
|
||||
total_mapped += kvm_iommu_map_sg(kvm_smmu_domain->id, sg, map_sg->iova,
|
||||
map_sg->ptr, map_sg->prot, map_sg->gfp);
|
||||
kvm_iommu_unshare_hyp_sg(sg, map_sg->nents);
|
||||
kvm_iommu_sg_free(sg, map_sg->nents);
|
||||
kfree(map_sg);
|
||||
|
||||
@@ -2638,7 +2638,7 @@ static int __iommu_add_sg(struct iommu_map_cookie_sg *cookie_sg,
|
||||
struct iommu_domain *domain = cookie_sg->domain;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
unsigned int min_pagesz;
|
||||
size_t pgsize, count;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||
return -EINVAL;
|
||||
@@ -2659,8 +2659,22 @@ static int __iommu_add_sg(struct iommu_map_cookie_sg *cookie_sg,
|
||||
iova, &paddr, size, min_pagesz);
|
||||
return -EINVAL;
|
||||
}
|
||||
pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
|
||||
return ops->add_deferred_map_sg(cookie_sg, paddr, pgsize, count);
|
||||
|
||||
while (size) {
|
||||
size_t pgsize, count, added;
|
||||
|
||||
pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
|
||||
ret = ops->add_deferred_map_sg(cookie_sg, paddr, pgsize, count);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
added = pgsize * count;
|
||||
size -= added;
|
||||
iova += added;
|
||||
paddr += added;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
@@ -2720,7 +2734,7 @@ next:
|
||||
size_t consumed;
|
||||
|
||||
consumed = ops->consume_deferred_map_sg(cookie_sg);
|
||||
if (consumed != mapped) {
|
||||
if (WARN_ON(consumed != mapped)) {
|
||||
mapped = consumed;
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
|
||||
Reference in New Issue
Block a user