diff --git a/arch/arm64/kvm/hyp/include/nvhe/iommu.h b/arch/arm64/kvm/hyp/include/nvhe/iommu.h index f112d61044ed..e6e0f4b28dbc 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/iommu.h +++ b/arch/arm64/kvm/hyp/include/nvhe/iommu.h @@ -69,7 +69,7 @@ struct kvm_iommu_ops { phys_addr_t (*iova_to_phys)(struct kvm_hyp_iommu_domain *domain, unsigned long iova); void (*iotlb_sync)(struct kvm_hyp_iommu_domain *domain, struct iommu_iotlb_gather *gather); - bool (*dabt_handler)(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr); + bool (*dabt_handler)(struct user_pt_regs *regs, u64 esr, u64 addr); void (*host_stage2_idmap)(struct kvm_hyp_iommu_domain *domain, phys_addr_t start, phys_addr_t end, int prot); void (*host_stage2_idmap_complete)(bool map); diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c index 2b7a44eae040..e8e1ed32c38a 100644 --- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c +++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c @@ -609,7 +609,7 @@ bool kvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u64 esr, u64 bool ret = false; if (kvm_iommu_ops && kvm_iommu_ops->dabt_handler) - ret = kvm_iommu_ops->dabt_handler(host_ctxt, esr, addr); + ret = kvm_iommu_ops->dabt_handler(&host_ctxt->regs, esr, addr); if (ret) kvm_skip_host_instr(); diff --git a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c index 0e43706c23e9..fb0c0c57cebc 100644 --- a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c @@ -1503,7 +1503,7 @@ static phys_addr_t smmu_iova_to_phys(struct kvm_hyp_iommu_domain *domain, } static bool smmu_dabt_device(struct hyp_arm_smmu_v3_device *smmu, - struct kvm_cpu_context *host_ctxt, + struct user_pt_regs *regs, u64 esr, u32 off) { bool is_write = esr & ESR_ELx_WNR; @@ -1539,9 +1539,9 @@ static bool smmu_dabt_device(struct hyp_arm_smmu_v3_device *smmu, if (!mask) return false; if (is_write) - writel_relaxed(cpu_reg(host_ctxt, rd) & mask, smmu->base + off); + writel_relaxed(regs->regs[rd] & mask, smmu->base + off); else - cpu_reg(host_ctxt, rd) = readl_relaxed(smmu->base + off); + regs->regs[rd] = readl_relaxed(smmu->base + off); return true; } @@ -1601,14 +1601,14 @@ static int smmu_dev_block_dma(struct kvm_hyp_iommu *iommu, u32 sid, bool is_host return ret; } -static bool smmu_dabt_handler(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr) +static bool smmu_dabt_handler(struct user_pt_regs *regs, u64 esr, u64 addr) { struct hyp_arm_smmu_v3_device *smmu; for_each_smmu(smmu) { if (addr < smmu->mmio_addr || addr >= smmu->mmio_addr + smmu->mmio_size) continue; - return smmu_dabt_device(smmu, host_ctxt, esr, addr - smmu->mmio_addr); + return smmu_dabt_device(smmu, regs, esr, addr - smmu->mmio_addr); } return false; }