ANDROID: KVM: arm64: Add smc64 trap handling for protected guests
Introduce a new interface to allow vendor modules to register a SMC handler for a given protected guest. The pkvm will trap the SMC instruction on the protected guest, and delegate the control flow to the registered SMC handler. Bug: 360102915 Change-Id: I928a8ad890de264ce376ad174a2f4205c7c24688 Signed-off-by: Nikita Ioffe <ioffe@google.com> Signed-off-by: Sebastian Ene <sebastianene@google.com>
This commit is contained in:
committed by
Carlos Llamas
parent
2c1385ae0e
commit
128144da22
@@ -299,6 +299,7 @@ struct kvm_protected_vm {
|
||||
gpa_t pvmfw_load_addr;
|
||||
bool enabled;
|
||||
u32 ffa_support;
|
||||
bool smc_forwarded;
|
||||
};
|
||||
|
||||
struct kvm_mpidr_data {
|
||||
|
||||
@@ -26,7 +26,7 @@ void pkvm_destroy_hyp_vm(struct kvm *kvm);
|
||||
bool pkvm_is_hyp_created(struct kvm *kvm);
|
||||
int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
|
||||
void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||
|
||||
int pvkm_enable_smc_forwarding(struct file *kvm_file);
|
||||
/*
|
||||
* This functions as an allow-list of protected VM capabilities.
|
||||
* Features not explicitly allowed by this function are denied.
|
||||
|
||||
@@ -112,6 +112,10 @@ struct pkvm_sglist_page {
|
||||
* order depends on the registration order. If no
|
||||
* handler return True, the SMC is forwarded to
|
||||
* EL3.
|
||||
* @register_guest_smc_handler: @cb is called when guest identified by the
|
||||
* pkvm_handle issues an SMC that pKVM couldn't
|
||||
* handle. If @cb returns false, then unsupported
|
||||
* operation error is returned back to the guest.
|
||||
* @register_default_trap_handler:
|
||||
* @cb is called whenever EL2 traps EL1 and pKVM
|
||||
* has not handled it. If @cb returns false, the
|
||||
@@ -226,6 +230,9 @@ struct pkvm_module_ops {
|
||||
int (*host_stage2_enable_lazy_pte)(u64 addr, u64 nr_pages);
|
||||
int (*host_stage2_disable_lazy_pte)(u64 addr, u64 nr_pages);
|
||||
int (*register_host_smc_handler)(bool (*cb)(struct user_pt_regs *));
|
||||
int (*register_guest_smc_handler)(bool (*cb)(struct arm_smccc_1_2_regs *regs,
|
||||
struct arm_smccc_1_2_regs *res,
|
||||
pkvm_handle_t handle));
|
||||
int (*register_default_trap_handler)(bool (*cb)(struct user_pt_regs *));
|
||||
int (*register_illegal_abt_notifier)(void (*cb)(struct user_pt_regs *));
|
||||
int (*register_psci_notifier)(void (*cb)(enum pkvm_psci_notification, struct user_pt_regs *));
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
#define HCALL_HANDLED 0
|
||||
#define HCALL_UNHANDLED -1
|
||||
@@ -19,6 +21,8 @@ int handle_host_dynamic_hcall(struct user_pt_regs *regs, int id);
|
||||
void __pkvm_close_module_registration(void);
|
||||
bool module_handle_host_perm_fault(struct user_pt_regs *regs, u64 esr, u64 addr);
|
||||
bool module_handle_host_smc(struct user_pt_regs *regs);
|
||||
bool module_handle_guest_smc(struct arm_smccc_1_2_regs *regs, struct arm_smccc_1_2_regs *res,
|
||||
pkvm_handle_t handle);
|
||||
#else
|
||||
static inline int __pkvm_init_module(void *module_init) { return -EOPNOTSUPP; }
|
||||
static inline int
|
||||
@@ -31,4 +35,9 @@ handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt, int id)
|
||||
static inline void __pkvm_close_module_registration(void) { }
|
||||
bool module_handle_host_perm_fault(struct user_pt_regs *regs, u64 esr, u64 addr) { return false; }
|
||||
bool module_handle_host_smc(struct user_pt_regs *regs) { return false; }
|
||||
bool module_handle_guest_smc(struct arm_smccc_1_2_regs *regs, struct arm_smccc_1_2_regs *res,
|
||||
pkvm_handle_t handle)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -143,6 +143,8 @@ void pkvm_reset_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
|
||||
bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
|
||||
bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
|
||||
|
||||
bool kvm_handle_pvm_smc64(struct kvm_vcpu *vcpu, u64 *exit_code);
|
||||
|
||||
struct pkvm_hyp_vcpu *pkvm_mpidr_to_hyp_vcpu(struct pkvm_hyp_vm *vm, u64 mpidr);
|
||||
|
||||
static inline bool pkvm_hyp_vm_has_pvmfw(struct pkvm_hyp_vm *vm)
|
||||
|
||||
@@ -134,6 +134,7 @@ static int __hyp_smp_processor_id(void)
|
||||
enum mod_handler_type {
|
||||
HOST_FAULT_HANDLER = 0,
|
||||
HOST_SMC_HANDLER,
|
||||
GUEST_SMC_HANDLER,
|
||||
NUM_MOD_HANDLER_TYPES,
|
||||
};
|
||||
|
||||
@@ -180,6 +181,13 @@ static int __register_host_smc_handler(bool (*cb)(struct user_pt_regs *))
|
||||
return mod_handler_register(HOST_SMC_HANDLER, cb);
|
||||
}
|
||||
|
||||
static int __register_guest_smc_handler(bool (*cb)(struct arm_smccc_1_2_regs *regs,
|
||||
struct arm_smccc_1_2_regs *res,
|
||||
pkvm_handle_t handle))
|
||||
{
|
||||
return mod_handler_register(GUEST_SMC_HANDLER, cb);
|
||||
}
|
||||
|
||||
bool module_handle_host_perm_fault(struct user_pt_regs *regs, u64 esr, u64 addr)
|
||||
{
|
||||
int (*cb)(struct user_pt_regs *regs, u64 esr, u64 addr);
|
||||
@@ -206,6 +214,21 @@ bool module_handle_host_smc(struct user_pt_regs *regs)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool module_handle_guest_smc(struct arm_smccc_1_2_regs *regs, struct arm_smccc_1_2_regs *res,
|
||||
pkvm_handle_t handle)
|
||||
{
|
||||
bool (*cb)(struct arm_smccc_1_2_regs *regs, struct arm_smccc_1_2_regs *res,
|
||||
pkvm_handle_t handle);
|
||||
int i;
|
||||
|
||||
for_each_mod_handler(GUEST_SMC_HANDLER, cb, i) {
|
||||
if (cb(regs, res, handle))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
const struct pkvm_module_ops module_ops = {
|
||||
.create_private_mapping = __pkvm_create_private_mapping,
|
||||
.alloc_module_va = __pkvm_alloc_module_va,
|
||||
@@ -229,6 +252,7 @@ const struct pkvm_module_ops module_ops = {
|
||||
.host_stage2_enable_lazy_pte = host_stage2_enable_lazy_pte,
|
||||
.host_stage2_disable_lazy_pte = host_stage2_disable_lazy_pte,
|
||||
.register_host_smc_handler = __register_host_smc_handler,
|
||||
.register_guest_smc_handler = __register_guest_smc_handler,
|
||||
.register_default_trap_handler = __pkvm_register_default_trap_handler,
|
||||
.register_illegal_abt_notifier = __pkvm_register_illegal_abt_notifier,
|
||||
.register_psci_notifier = __pkvm_register_psci_notifier,
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <nvhe/ffa.h>
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/modules.h>
|
||||
#include <nvhe/mm.h>
|
||||
#include <nvhe/pkvm.h>
|
||||
#include <nvhe/pviommu.h>
|
||||
@@ -593,6 +594,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
|
||||
hyp_vm->kvm.arch.pkvm.pvmfw_load_addr = pvmfw_load_addr;
|
||||
|
||||
hyp_vm->kvm.arch.pkvm.ffa_support = READ_ONCE(host_kvm->arch.pkvm.ffa_support);
|
||||
hyp_vm->kvm.arch.pkvm.smc_forwarded = READ_ONCE(host_kvm->arch.pkvm.smc_forwarded);
|
||||
hyp_vm->kvm.arch.mmu.last_vcpu_ran = (int __percpu *)last_ran;
|
||||
memset(last_ran, -1, pkvm_get_last_ran_size());
|
||||
pkvm_init_features_from_host(hyp_vm, host_kvm);
|
||||
@@ -1676,6 +1678,43 @@ static bool pkvm_forward_trng(struct kvm_vcpu *vcpu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_standard_secure_service_call(u64 func_id)
|
||||
{
|
||||
return (func_id >= PSCI_0_2_FN_BASE && func_id <= ARM_CCA_FUNC_END) ||
|
||||
(func_id >= PSCI_0_2_FN64_BASE && func_id <= ARM_CCA_64BIT_FUNC_END);
|
||||
}
|
||||
|
||||
bool kvm_handle_pvm_smc64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
bool handled = false;
|
||||
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
|
||||
struct pkvm_hyp_vm *vm;
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu;
|
||||
struct arm_smccc_1_2_regs regs;
|
||||
struct arm_smccc_1_2_regs res;
|
||||
DECLARE_REG(u64, func_id, ctxt, 0);
|
||||
|
||||
hyp_vcpu = container_of(vcpu, struct pkvm_hyp_vcpu, vcpu);
|
||||
vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
|
||||
|
||||
if (is_standard_secure_service_call(func_id))
|
||||
return false;
|
||||
|
||||
if (!vm->kvm.arch.pkvm.smc_forwarded)
|
||||
return false;
|
||||
|
||||
memcpy(®s, &ctxt->regs, sizeof(regs));
|
||||
handled = module_handle_guest_smc(®s, &res, vm->kvm.arch.pkvm.handle);
|
||||
if (handled)
|
||||
memcpy(&ctxt->regs.regs[0], &res, sizeof(res));
|
||||
else
|
||||
ctxt->regs.regs[0] = -1;
|
||||
|
||||
__kvm_skip_instr(vcpu);
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler for protected VM HVC calls.
|
||||
*
|
||||
|
||||
@@ -318,6 +318,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
static const exit_handler_fn pvm_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = NULL,
|
||||
[ESR_ELx_EC_HVC64] = kvm_handle_pvm_hvc64,
|
||||
[ESR_ELx_EC_SMC64] = kvm_handle_pvm_smc64,
|
||||
[ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
|
||||
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_SME] = kvm_handle_pvm_restricted,
|
||||
|
||||
@@ -753,6 +753,25 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa)
|
||||
kfree(ppage);
|
||||
}
|
||||
|
||||
int pkvm_enable_smc_forwarding(struct file *kvm_file)
|
||||
{
|
||||
struct kvm *host_kvm;
|
||||
|
||||
if (!file_is_kvm(kvm_file))
|
||||
return -EINVAL;
|
||||
|
||||
if (!kvm_get_kvm_safe(kvm_file->private_data))
|
||||
return -EINVAL;
|
||||
|
||||
host_kvm = kvm_file->private_data;
|
||||
if (!host_kvm)
|
||||
return -EINVAL;
|
||||
|
||||
host_kvm->arch.pkvm.smc_forwarded = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init pkvm_firmware_rmem_err(struct reserved_mem *rmem,
|
||||
const char *reason)
|
||||
{
|
||||
|
||||
@@ -343,6 +343,9 @@
|
||||
ARM_SMCCC_OWNER_STANDARD, \
|
||||
0x53)
|
||||
|
||||
#define ARM_CCA_FUNC_END 0x840001CF
|
||||
#define ARM_CCA_64BIT_FUNC_END 0xC40001CF
|
||||
|
||||
/*
|
||||
* Return codes defined in ARM DEN 0070A
|
||||
* ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
|
||||
|
||||
Reference in New Issue
Block a user