ANDROID: KVM: arm64: Implement MEM_RELINQUISH SMCCC hypercall

This allows a VM running on PKVM to notify the hypervisor (and host)
that it is returning pages to host ownership.

Bug: 357781595
Change-Id: I4644736db04afacd7da4c6f465130c73c2e44b93
Signed-off-by: Keir Fraser <keirf@google.com>
This commit is contained in:
Keir Fraser
2022-11-08 11:04:49 +00:00
parent dfe53de3e1
commit 88b4844da3
10 changed files with 176 additions and 11 deletions

View File

@@ -24,6 +24,7 @@ int pkvm_init_host_vm(struct kvm *kvm, unsigned long type);
int pkvm_create_hyp_vm(struct kvm *kvm);
void pkvm_destroy_hyp_vm(struct kvm *kvm);
bool pkvm_is_hyp_created(struct kvm *kvm);
void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
/*
* Definitions for features to be allowed or restricted for guest virtual

View File

@@ -78,6 +78,8 @@ int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
int __pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
int __pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
bool __pkvm_check_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu);
int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
u64 ipa, u64 *ppa);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);

View File

@@ -104,6 +104,7 @@ int kvm_check_pvm_sysreg_table(void);
void pkvm_reset_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
struct pkvm_hyp_vcpu *pkvm_mpidr_to_hyp_vcpu(struct pkvm_hyp_vm *vm, u64 mpidr);

View File

@@ -99,6 +99,8 @@ static void handle_pvm_entry_hvc64(struct pkvm_hyp_vcpu *hyp_vcpu)
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
fallthrough;
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
fallthrough;
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
vcpu_set_reg(&hyp_vcpu->vcpu, 0, SMCCC_RET_SUCCESS);
break;
default:
@@ -267,6 +269,8 @@ static void handle_pvm_exit_hvc64(struct pkvm_hyp_vcpu *hyp_vcpu)
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
fallthrough;
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
fallthrough;
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
n = 4;
break;

View File

@@ -271,6 +271,7 @@ static int reclaim_walker(const struct kvm_pgtable_visit_ctx *ctx,
{
kvm_pte_t pte = *ctx->ptep;
struct hyp_page *page;
u64 *pa = ctx->arg;
if (!kvm_pte_valid(pte))
return 0;
@@ -282,6 +283,8 @@ static int reclaim_walker(const struct kvm_pgtable_visit_ctx *ctx,
fallthrough;
case PKVM_PAGE_SHARED_BORROWED:
case PKVM_PAGE_SHARED_OWNED:
if (pa)
*pa = kvm_pte_to_phys(pte);
page->flags |= HOST_PAGE_PENDING_RECLAIM;
break;
default:
@@ -321,6 +324,36 @@ void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
}
}
int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
u64 ipa, u64 *ppa)
{
struct kvm_pgtable_walker walker = {
.cb = reclaim_walker,
.arg = ppa,
.flags = KVM_PGTABLE_WALK_LEAF
};
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
int ret;
host_lock_component();
guest_lock_component(vm);
/* Set default pa value to "not found". */
*ppa = 0;
/* If ipa is mapped: sets page flags, and gets the pa. */
ret = kvm_pgtable_walk(&vm->pgt, ipa, PAGE_SIZE, &walker);
/* Zap the guest stage2 pte. */
if (!ret)
kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
guest_unlock_component(vm);
host_unlock_component();
return ret;
}
int __pkvm_prot_finalize(void)
{
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;

View File

@@ -1402,6 +1402,54 @@ static bool pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_
return true;
}
static bool pkvm_meminfo_call(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
u64 arg1 = smccc_get_arg1(vcpu);
u64 arg2 = smccc_get_arg2(vcpu);
u64 arg3 = smccc_get_arg3(vcpu);
if (arg1 || arg2 || arg3)
goto out_guest_err;
smccc_set_retval(vcpu, PAGE_SIZE, 0, 0, 0);
return true;
out_guest_err:
smccc_set_retval(vcpu, SMCCC_RET_INVALID_PARAMETER, 0, 0, 0);
return true;
}
static bool pkvm_memrelinquish_call(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
u64 ipa = smccc_get_arg1(vcpu);
u64 arg2 = smccc_get_arg2(vcpu);
u64 arg3 = smccc_get_arg3(vcpu);
u64 pa = 0;
int ret;
if (arg2 || arg3)
goto out_guest_err;
ret = __pkvm_guest_relinquish_to_host(hyp_vcpu, ipa, &pa);
if (ret)
goto out_guest_err;
if (pa != 0) {
/* Now pass to host. */
return false;
}
/* This was a NOP as no page was actually mapped at the IPA. */
smccc_set_retval(vcpu, 0, 0, 0, 0);
return true;
out_guest_err:
smccc_set_retval(vcpu, SMCCC_RET_INVALID_PARAMETER, 0, 0, 0);
return true;
}
/*
* Handler for protected VM HVC calls.
*
@@ -1436,6 +1484,7 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL);
val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP);
val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP);
val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH);
break;
case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID:
set_bit(KVM_ARCH_FLAG_MMIO_GUARD, &vcpu->kvm->arch.flags);
@@ -1451,18 +1500,13 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
break;
case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID:
case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
if (smccc_get_arg1(vcpu) ||
smccc_get_arg2(vcpu) ||
smccc_get_arg3(vcpu)) {
val[0] = SMCCC_RET_INVALID_PARAMETER;
} else {
val[0] = PAGE_SIZE;
}
break;
return pkvm_meminfo_call(hyp_vcpu);
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
return pkvm_memshare_call(hyp_vcpu, exit_code);
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
return pkvm_memunshare_call(hyp_vcpu);
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
return pkvm_memrelinquish_call(hyp_vcpu);
default:
return pkvm_handle_psci(hyp_vcpu);
}
@@ -1470,3 +1514,26 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
return true;
}
/*
* Handler for non-protected VM HVC calls.
*
* Returns true if the hypervisor has handled the exit, and control should go
* back to the guest, or false if it hasn't.
*/
bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
{
u32 fn = smccc_get_function(vcpu);
struct pkvm_hyp_vcpu *hyp_vcpu;
hyp_vcpu = container_of(vcpu, struct pkvm_hyp_vcpu, vcpu);
switch (fn) {
case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
return pkvm_meminfo_call(hyp_vcpu);
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
return pkvm_memrelinquish_call(hyp_vcpu);
}
return false;
}

View File

@@ -211,6 +211,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
[ESR_ELx_EC_HVC64] = kvm_hyp_handle_hvc64,
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,

View File

@@ -6,6 +6,7 @@
#include <asm/kvm_emulate.h>
#include <asm/stage2_pgtable.h>
#include <asm/kvm_pkvm.h>
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_psci.h>
@@ -14,8 +15,15 @@
GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
#define KVM_ARM_SMCCC_STD_HYP_FEATURES \
GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \
GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES ({ \
unsigned long f; \
f = GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0); \
if (is_protected_kvm_enabled()) { \
f |= BIT(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO); \
f |= BIT(ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH); \
} \
f; \
})
static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
{
@@ -118,6 +126,9 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
&smccc_feat->vendor_hyp_bmap);
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
return test_bit(ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH,
&smccc_feat->vendor_hyp_bmap);
default:
return false;
}
@@ -370,6 +381,10 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
if (kvm_vm_is_protected(vcpu->kvm) && !topup_hyp_memcache(vcpu))
val[0] = SMCCC_RET_SUCCESS;
break;
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
pkvm_host_reclaim_page(vcpu->kvm, smccc_get_arg1(vcpu));
val[0] = SMCCC_RET_SUCCESS;
break;
case ARM_SMCCC_TRNG_VERSION:
case ARM_SMCCC_TRNG_FEATURES:
case ARM_SMCCC_TRNG_GET_UUID:

View File

@@ -31,6 +31,14 @@ static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
phys_addr_t hyp_mem_base;
phys_addr_t hyp_mem_size;
static int rb_ppage_cmp(const void *key, const struct rb_node *node)
{
struct kvm_pinned_page *p = container_of(node, struct kvm_pinned_page, node);
phys_addr_t ipa = (phys_addr_t)key;
return (ipa < p->ipa) ? -1 : (ipa > p->ipa);
}
static int cmp_hyp_memblock(const void *p1, const void *p2)
{
const struct memblock_region *r1 = p1;
@@ -343,6 +351,33 @@ static int __init finalize_pkvm(void)
}
device_initcall_sync(finalize_pkvm);
void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa)
{
struct kvm_pinned_page *ppage;
struct mm_struct *mm = current->mm;
struct rb_node *node;
write_lock(&host_kvm->mmu_lock);
node = rb_find((void *)ipa, &host_kvm->arch.pkvm.pinned_pages,
rb_ppage_cmp);
if (node)
rb_erase(node, &host_kvm->arch.pkvm.pinned_pages);
write_unlock(&host_kvm->mmu_lock);
WARN_ON(!node);
if (!node)
return;
ppage = container_of(node, struct kvm_pinned_page, node);
WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_reclaim_page,
page_to_pfn(ppage->page)));
account_locked_vm(mm, 1, false);
unpin_user_pages_dirty_lock(&ppage->page, 1, true);
kfree(ppage);
}
static int __init pkvm_firmware_rmem_err(struct reserved_mem *rmem,
const char *reason)
{

View File

@@ -123,7 +123,7 @@
#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL 6
#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP 7
#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP 8
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_9 9
#define ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH 9
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_10 10
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_11 11
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_12 12
@@ -219,6 +219,12 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_MEM_UNSHARE)
#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH)
/* ptp_kvm counter type ID */
#define KVM_PTP_VIRT_COUNTER 0
#define KVM_PTP_PHYS_COUNTER 1