ANDROID: KVM: arm64: Add __pkvm_host_donate_sglist_hyp
This module-only function allows the caller to donate an array of of discontiguous pages. It intends to batch IOMMU unmap operations. This is similar to __pkvm_host_donate_sglist_guest(). Bug: 357781595 Bug: 409860221 Change-Id: I097994fc7317258cd173af1960b1dca09435b570 Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
This commit is contained in:
committed by
Carlos Llamas
parent
a7667808d9
commit
887fb3f16c
@@ -20,6 +20,11 @@ enum pkvm_psci_notification {
|
||||
PKVM_PSCI_CPU_ENTRY,
|
||||
};
|
||||
|
||||
struct pkvm_sglist_page {
|
||||
u64 pfn : 40;
|
||||
u8 order;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct pkvm_module_ops - pKVM modules callbacks
|
||||
* @create_private_mapping: Map a memory region into the hypervisor private
|
||||
@@ -132,6 +137,9 @@ enum pkvm_psci_notification {
|
||||
* full control is given to the hypervisor.
|
||||
* @host_donate_hyp_prot: As host_donate_hyp_prot, but this variant sets
|
||||
* the prot of the hyp.
|
||||
* @host_donate_sglist_hyp: Similar to host_donate_hyp but take an array of PFNs
|
||||
* (kvm_sglist_page) as an argument. This intends to
|
||||
* batch IOMMU updates.
|
||||
* @hyp_donate_host: The page @pfn whom control has previously been
|
||||
* given to the hypervisor (@host_donate_hyp) is
|
||||
* given back to the host.
|
||||
@@ -225,6 +233,7 @@ struct pkvm_module_ops {
|
||||
int (*register_unmask_serror)(bool (*unmask)(void), void (*mask)(void));
|
||||
int (*host_donate_hyp)(u64 pfn, u64 nr_pages, bool accept_mmio);
|
||||
int (*host_donate_hyp_prot)(u64 pfn, u64 nr_pages, bool accept_mmio, enum kvm_pgtable_prot prot);
|
||||
int (*host_donate_sglist_hyp)(struct pkvm_sglist_page *sglist, size_t nr_pages);
|
||||
int (*hyp_donate_host)(u64 pfn, u64 nr_pages);
|
||||
int (*host_share_hyp)(u64 pfn);
|
||||
int (*host_unshare_hyp)(u64 pfn);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <asm/kvm_pkvm_module.h>
|
||||
#include <asm/virt.h>
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/pkvm.h>
|
||||
@@ -45,6 +46,7 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
|
||||
int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio);
|
||||
int ___pkvm_host_donate_hyp_prot(u64 pfn, u64 nr_pages,
|
||||
bool accept_mmio, enum kvm_pgtable_prot prot);
|
||||
int __pkvm_host_donate_sglist_hyp(struct pkvm_sglist_page *sglist, size_t nr_pages);
|
||||
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages, enum kvm_pgtable_prot prot);
|
||||
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
|
||||
int __pkvm_guest_share_hyp_page(struct pkvm_hyp_vcpu *vcpu, u64 ipa, u64 *hyp_va);
|
||||
|
||||
@@ -2354,6 +2354,83 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __pkvm_host_donate_sglist_hyp(struct pkvm_sglist_page *sglist, size_t nr_pages)
|
||||
{
|
||||
int p, ret;
|
||||
|
||||
host_lock_component();
|
||||
hyp_lock_component();
|
||||
|
||||
/* Checking we are reading hyp private memory */
|
||||
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG))
|
||||
WARN_ON(__hyp_check_page_state_range((u64)sglist, nr_pages * sizeof(*sglist),
|
||||
PKVM_PAGE_OWNED));
|
||||
|
||||
for (p = 0; p < nr_pages; p++) {
|
||||
u64 phys = hyp_pfn_to_phys(sglist[p].pfn);
|
||||
size_t size;
|
||||
|
||||
if (check_shl_overflow(PAGE_SIZE, sglist[p].order, &size)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!addr_is_memory(phys)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
|
||||
ret = __hyp_check_page_state_range((u64)__hyp_va(phys), size, PKVM_NOPAGE);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
for (p = 0; p < nr_pages; p++) {
|
||||
size_t size = PAGE_SIZE << sglist[p].order;
|
||||
u64 phys = hyp_pfn_to_phys(sglist[p].pfn);
|
||||
enum kvm_pgtable_prot prot;
|
||||
|
||||
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
|
||||
ret = pkvm_create_mappings_locked(__hyp_va(phys), __hyp_va(phys) + size, prot);
|
||||
if (ret) {
|
||||
WARN_ON(ret != -ENOMEM);
|
||||
|
||||
kvm_iommu_host_stage2_idmap_complete(false);
|
||||
|
||||
/* Rollback */
|
||||
for (; p >= 0; p--) {
|
||||
phys = hyp_pfn_to_phys(sglist[p].pfn);
|
||||
size = PAGE_SIZE << sglist[p].order;
|
||||
|
||||
WARN_ON(host_stage2_idmap_locked(phys, size,
|
||||
PKVM_HOST_MEM_PROT, false));
|
||||
kvm_iommu_host_stage2_idmap(phys, phys + size, PKVM_HOST_MEM_PROT);
|
||||
pkvm_remove_mappings_locked(__hyp_va(phys), __hyp_va(phys) + size);
|
||||
}
|
||||
kvm_iommu_host_stage2_idmap_complete(true);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
WARN_ON(__host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP, true, 0, false));
|
||||
kvm_iommu_host_stage2_idmap(phys, phys + size, 0);
|
||||
}
|
||||
|
||||
kvm_iommu_host_stage2_idmap_complete(false);
|
||||
|
||||
unlock:
|
||||
hyp_unlock_component();
|
||||
host_unlock_component();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hyp_poison_page(phys_addr_t phys, size_t size)
|
||||
{
|
||||
WARN_ON(!PAGE_ALIGNED(size));
|
||||
|
||||
@@ -236,6 +236,7 @@ const struct pkvm_module_ops module_ops = {
|
||||
.register_unmask_serror = __pkvm_register_unmask_serror,
|
||||
.host_donate_hyp = ___pkvm_host_donate_hyp,
|
||||
.host_donate_hyp_prot = ___pkvm_host_donate_hyp_prot,
|
||||
.host_donate_sglist_hyp = __pkvm_host_donate_sglist_hyp,
|
||||
.hyp_donate_host = __pkvm_hyp_donate_host,
|
||||
.host_share_hyp = __pkvm_host_share_hyp,
|
||||
.host_unshare_hyp = __pkvm_host_unshare_hyp,
|
||||
|
||||
Reference in New Issue
Block a user