ANDROID: KVM: arm64: Eagerly restore host ZCR_EL2 after vcpu run in pKVM

Eagerly restore the host ZCR_EL2 value after every vcpu run in protected
mode if the fpsimd/sve unit was used by the guest, instead of setting
the fpsimd/simd traps and waiting until the host triggers it. The rest
of the fpsimd/sve state is already restored eagerly in Android 16, and
coming (backported) patches will restore ZCR_EL2 eagerly in
non-protected mode.

Bug: 357781595
Bug: 411040189
Change-Id: I5702590331093937c1cd0d08ac754c634054c7f7
Signed-off-by: Fuad Tabba <tabba@google.com>
This commit is contained in:
Fuad Tabba
2025-05-01 10:21:31 +01:00
committed by Will Deacon
parent 86622b5452
commit 5f1b9561a1

View File

@@ -900,30 +900,6 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code)
hyp_vcpu->exit_code = *exit_code;
}
static void fpsimd_host_restore(void)
{
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
isb();
if (unlikely(is_protected_kvm_enabled())) {
struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
if (vcpu_has_sve(&hyp_vcpu->vcpu))
__hyp_sve_save_guest(&hyp_vcpu->vcpu);
else
__fpsimd_save_state(&hyp_vcpu->vcpu.arch.ctxt.fp_regs);
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
}
if (system_supports_sve())
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
SYS_ZCR_EL2);
}
static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
@@ -952,8 +928,6 @@ static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
*last_ran = hyp_vcpu->vcpu.vcpu_id;
}
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
/* Propagate WFx trapping flags */
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI);
@@ -972,9 +946,6 @@ static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
if (hyp_vcpu) {
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
if (guest_owns_fp_regs())
fpsimd_host_restore();
if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu) &&
!vcpu_get_flag(host_vcpu, PKVM_HOST_STATE_DIRTY)) {
__sync_hyp_vcpu(hyp_vcpu);
@@ -995,9 +966,6 @@ static void handle___pkvm_vcpu_sync_state(struct kvm_cpu_context *host_ctxt)
if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
return;
if (guest_owns_fp_regs())
fpsimd_host_restore();
__sync_hyp_vcpu(hyp_vcpu);
}
@@ -1064,14 +1032,8 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
goto out;
flush_hyp_vcpu(hyp_vcpu);
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
sync_hyp_vcpu(hyp_vcpu, &ret);
/* Trap host fpsimd/sve if the guest has used fpsimd/sve. */
if (guest_owns_fp_regs())
cpacr_clear_set(CPACR_ELx_FPEN | CPACR_ELx_ZEN, 0);
} else {
/* The host is fully trusted, run its vCPU directly. */
ret = __kvm_vcpu_run(host_vcpu);
@@ -2038,13 +2000,8 @@ inval:
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);
struct pkvm_hyp_vcpu *hyp_vcpu;
bool handled;
hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
if (hyp_vcpu && guest_owns_fp_regs())
fpsimd_host_restore();
func_id &= ~ARM_SMCCC_CALL_HINTS;
handled = kvm_host_psci_handler(host_ctxt, func_id);
@@ -2079,10 +2036,12 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
case ESR_ELx_EC_SMC64:
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_FP_ASIMD:
case ESR_ELx_EC_SVE:
case ESR_ELx_EC_SME:
fpsimd_host_restore();
BUG_ON(is_protected_kvm_enabled());
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
isb();
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
SYS_ZCR_EL2);
break;
case ESR_ELx_EC_IABT_LOW:
case ESR_ELx_EC_DABT_LOW: