ANDROID: KVM: arm64: Unmap host stage-2 memory on FF-A lend

Add the option to unmap host stage-2 memory on FF-A lend if
Trustzone cannot isolate secure memory from the host. Hide
this behing a command line option to avoid performance
degradation when breaking block mappings from memory donation.
Track the host handles in an array and store the type of
transfer to be able to use the correct share|lend during
the ffa fragmentation calls.

Bug: 352480558
Bug: 278749606
Bug: 381963586
Change-Id: Ifbdc82adb2675f5e7d1ee16ec5356c6f24b031bb
Signed-off-by: Sebastian Ene <sebastianene@google.com>
This commit is contained in:
Sebastian Ene
2024-07-12 11:57:39 +00:00
parent 7368dfbdb8
commit 5d45bc0cb2
6 changed files with 209 additions and 18 deletions

View File

@@ -413,7 +413,8 @@ static inline unsigned long pkvm_selftest_pages(void) { return 32; }
static inline unsigned long pkvm_selftest_pages(void) { return 0; }
#endif
#define KVM_FFA_MBOX_NR_PAGES 1
#define KVM_FFA_MBOX_NR_PAGES 1
#define KVM_FFA_SPM_HANDLE_NR_PAGES 2
/*
* Maximum number of consitutents allowed in a descriptor. This number is
@@ -424,6 +425,7 @@ static inline unsigned long pkvm_selftest_pages(void) { return 0; }
static inline unsigned long hyp_ffa_proxy_pages(void)
{
size_t desc_max;
unsigned long num_pages;
/*
* SG_MAX_SEGMENTS is supposed to bound the number of elements in an
@@ -446,7 +448,9 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
KVM_FFA_MAX_NR_CONSTITUENTS * sizeof(struct ffa_mem_region_addr_range);
/* Plus a page each for the hypervisor's RX and TX mailboxes. */
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
num_pages = (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
return num_pages;
}
static inline size_t pkvm_host_sve_state_size(void)

View File

@@ -149,6 +149,7 @@ KVM_NVHE_ALIAS(__hyp_patchable_function_entries_end);
/* pKVM static key */
KVM_NVHE_ALIAS(kvm_protected_mode_initialized);
KVM_NVHE_ALIAS(kvm_ffa_unmap_on_lend);
#endif /* CONFIG_KVM */
#ifdef CONFIG_EFI_ZBOOT

View File

@@ -65,6 +65,8 @@ int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm, u64 size);
int __pkvm_host_test_clear_young_guest(u64 gfn, u64 size, bool mkold, struct pkvm_hyp_vm *vm);
kvm_pte_t __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
int __pkvm_host_split_guest(u64 gfn, u64 size, struct pkvm_hyp_vcpu *vcpu);
int __pkvm_host_donate_ffa(u64 pfn, u64 nr_pages);
int __pkvm_host_reclaim_ffa(u64 pfn, u64 nr_pages);
int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
u64 nr_pages, u64 *nr_shared);
int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,

View File

@@ -30,6 +30,7 @@
#include <asm/kvm_hypevents.h>
#include <asm/kvm_pkvm.h>
#include <kvm/arm_hypercalls.h>
#include <asm/virt.h>
#include <nvhe/arm-smccc.h>
#include <nvhe/alloc.h>
@@ -41,6 +42,7 @@
#include <nvhe/spinlock.h>
#define VM_FFA_SUPPORTED(vcpu) ((vcpu)->kvm->arch.pkvm.ffa_support)
#define FFA_INVALID_SPM_HANDLE (BIT(63) - 1)
/*
* A buffer to hold the maximum descriptor size we can see from the host,
@@ -60,6 +62,11 @@ struct ffa_translation {
phys_addr_t pa;
};
struct ffa_handle {
u64 handle: 63;
u64 is_lend: 1;
};
/*
* Note that we don't currently lock these buffers explicitly, instead
* relying on the locking of the hyp FFA buffers.
@@ -69,6 +76,9 @@ static struct kvm_ffa_buffers host_buffers;
static u32 hyp_ffa_version;
static bool has_version_negotiated;
static struct ffa_handle *spm_handles, *spm_free_handle;
static u32 num_spm_handles;
static DEFINE_HYP_SPINLOCK(version_lock);
static DEFINE_HYP_SPINLOCK(kvm_ffa_hyp_lock);
@@ -80,6 +90,57 @@ static struct kvm_ffa_buffers *ffa_get_buffers(struct pkvm_hyp_vcpu *hyp_vcpu)
return &pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu)->ffa_buf;
}
DECLARE_STATIC_KEY_FALSE(kvm_ffa_unmap_on_lend);
static int ffa_host_store_handle(u64 ffa_handle, bool is_lend)
{
u32 i;
struct ffa_handle *free_handle = NULL;
if (!static_branch_unlikely(&kvm_ffa_unmap_on_lend))
return 0;
if (spm_free_handle >= spm_handles &&
spm_free_handle < (spm_handles + num_spm_handles)) {
free_handle = spm_free_handle;
} else {
for (i = 0; i < num_spm_handles; i++)
if (spm_handles[i].handle == FFA_INVALID_SPM_HANDLE)
break;
if (i == num_spm_handles)
return -ENOSPC;
free_handle = &spm_handles[i];
}
free_handle->handle = ffa_handle;
free_handle->is_lend = is_lend;
return 0;
}
static struct ffa_handle *ffa_host_get_handle(u64 ffa_handle)
{
u32 i;
for (i = 0; i < num_spm_handles; i++)
if (spm_handles[i].handle == ffa_handle)
return &spm_handles[i];
return NULL;
}
static int ffa_host_clear_handle(u64 ffa_handle)
{
struct ffa_handle *entry = ffa_host_get_handle(ffa_handle);
if (!entry)
return -EINVAL;
entry->handle = FFA_INVALID_SPM_HANDLE;
spm_free_handle = entry;
return 0;
}
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
{
*res = (struct arm_smccc_res) {
@@ -372,9 +433,10 @@ out:
}
static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
u32 nranges, bool is_lend)
{
u32 i;
int ret;
for (i = 0; i < nranges; ++i) {
struct ffa_mem_region_addr_range *range = &ranges[i];
@@ -384,17 +446,27 @@ static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
if (!PAGE_ALIGNED(sz))
break;
if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
if (static_branch_unlikely(&kvm_ffa_unmap_on_lend) && is_lend)
ret = __pkvm_host_donate_ffa(pfn, sz / PAGE_SIZE);
else
ret = __pkvm_host_share_ffa(pfn, sz / PAGE_SIZE);
if (ret)
break;
}
return i;
}
/*
* Verify if the page is lent on shared and unshare it with FF-A.
* On success, return the number of *unshared* pages and store in the
* is_lend argument whether the range was shared or lent.
*/
static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
u32 nranges, bool is_lend)
{
u32 i;
int ret;
for (i = 0; i < nranges; ++i) {
struct ffa_mem_region_addr_range *range = &ranges[i];
@@ -404,7 +476,12 @@ static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
if (!PAGE_ALIGNED(sz))
break;
if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
if (static_branch_unlikely(&kvm_ffa_unmap_on_lend) && is_lend)
ret = __pkvm_host_reclaim_ffa(pfn, sz / PAGE_SIZE);
else
ret = __pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE);
if (ret)
break;
}
@@ -489,13 +566,13 @@ unshare:
}
static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
u32 nranges, bool is_lend)
{
u32 nshared = __ffa_host_share_ranges(ranges, nranges);
u32 nshared = __ffa_host_share_ranges(ranges, nranges, is_lend);
int ret = 0;
if (nshared != nranges) {
WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
WARN_ON(__ffa_host_unshare_ranges(ranges, nshared, is_lend) != nshared);
ret = -EACCES;
}
@@ -503,13 +580,13 @@ static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
}
static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
u32 nranges, bool is_lend)
{
u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
int ret = 0;
u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges, is_lend);
if (nunshared != nranges) {
WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
WARN_ON(__ffa_host_share_ranges(ranges, nunshared, is_lend) != nunshared);
ret = -EACCES;
}
@@ -528,6 +605,9 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
int ret = FFA_RET_INVALID_PARAMETERS;
u32 nr_ranges;
struct kvm_ffa_buffers *ffa_buf;
bool is_lend = false;
u64 host_handle = PACK_HANDLE(handle_lo, handle_hi);
struct ffa_handle *entry;
if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
goto out;
@@ -544,7 +624,17 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
memcpy(buf, ffa_buf->tx, fraglen);
nr_ranges = fraglen / sizeof(*buf);
ret = ffa_host_share_ranges(buf, nr_ranges);
if (static_branch_unlikely(&kvm_ffa_unmap_on_lend)) {
entry = ffa_host_get_handle(host_handle);
if (!entry) {
ffa_to_smccc_error(res, FFA_RET_INVALID_PARAMETERS);
goto out_unlock;
}
is_lend = entry->is_lend;
}
ret = ffa_host_share_ranges(buf, nr_ranges, is_lend);
if (ret) {
/*
* We're effectively aborting the transaction, so we need
@@ -558,7 +648,7 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges, is_lend));
out_unlock:
hyp_spin_unlock(&kvm_ffa_hyp_lock);
@@ -610,6 +700,7 @@ static int __do_ffa_mem_xfer(const u64 func_id,
int ret = 0;
struct ffa_mem_transfer *transfer = NULL;
u64 ffa_handle;
bool is_lend = func_id == FFA_FN64_MEM_LEND;
if (addr_mbz || npages_mbz || fraglen > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
@@ -706,7 +797,7 @@ static int __do_ffa_mem_xfer(const u64 func_id,
temp_reg->addr_range_cnt * sizeof(struct ffa_mem_region_addr_range));
}
} else
ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
ret = ffa_host_share_ranges(reg->constituents, nr_ranges, is_lend);
if (ret)
goto out_unlock;
@@ -728,8 +819,11 @@ static int __do_ffa_mem_xfer(const u64 func_id,
if (hyp_vcpu && transfer) {
transfer->ffa_handle = ffa_handle;
list_add(&transfer->node, &ffa_buf->xfer_list);
} else if (!hyp_vcpu) {
ret = ffa_host_store_handle(ffa_handle, is_lend);
if (ret)
goto err_unshare;
}
hyp_spin_unlock(&kvm_ffa_hyp_lock);
return 0;
out_unlock:
@@ -743,7 +837,7 @@ err_unshare:
if (hyp_vcpu)
ffa_guest_unshare_ranges(hyp_vcpu, transfer);
else
WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges, is_lend));
goto out_unlock;
}
@@ -778,6 +872,8 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
u64 handle;
struct ffa_mem_transfer *transfer = NULL;
struct kvm_ffa_buffers *ffa_buf;
struct ffa_handle *entry;
bool is_lend = false;
handle = PACK_HANDLE(handle_lo, handle_hi);
@@ -796,6 +892,16 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
/* Prevent the host from replicating a transfer handle used by the guest */
WARN_ON(transfer);
if (static_branch_unlikely(&kvm_ffa_unmap_on_lend)) {
entry = ffa_host_get_handle(handle);
if (!entry) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
is_lend = entry->is_lend;
}
}
buf = hyp_buffers.tx;
@@ -859,7 +965,9 @@ out_reclaim:
else {
reg = (void *)buf + offset;
WARN_ON(ffa_host_unshare_ranges(reg->constituents,
reg->addr_range_cnt));
reg->addr_range_cnt, is_lend));
if (static_branch_unlikely(&kvm_ffa_unmap_on_lend))
ffa_host_clear_handle(handle);
}
if (transfer) {
@@ -1426,6 +1534,14 @@ int hyp_ffa_init(void *pages)
rx = pages;
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
if (static_branch_unlikely(&kvm_ffa_unmap_on_lend)) {
spm_handles = pages;
pages += KVM_FFA_SPM_HANDLE_NR_PAGES * PAGE_SIZE;
num_spm_handles = KVM_FFA_SPM_HANDLE_NR_PAGES * PAGE_SIZE /
sizeof(struct ffa_handle);
memset(spm_handles, -1, KVM_FFA_SPM_HANDLE_NR_PAGES * PAGE_SIZE);
}
ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
.buf = pages,
.len = PAGE_SIZE *

View File

@@ -1638,6 +1638,60 @@ unlock:
return ret;
}
int __pkvm_host_donate_ffa(u64 pfn, u64 nr_pages)
{
u64 size, phys = hyp_pfn_to_phys(pfn), end;
struct kvm_mem_range range;
struct memblock_region *reg;
int ret;
if (check_shl_overflow(nr_pages, PAGE_SHIFT, &size) ||
check_add_overflow(phys, size, &end))
return -EINVAL;
reg = find_mem_range(phys, &range);
if (!reg || !is_in_mem_range(end - 1, &range))
return -EPERM;
host_lock_component();
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_FFA));
unlock:
host_unlock_component();
return ret;
}
int __pkvm_host_reclaim_ffa(u64 pfn, u64 nr_pages)
{
u64 size, phys = hyp_pfn_to_phys(pfn), end;
struct memblock_region *reg;
struct kvm_mem_range range;
int ret;
if (check_shl_overflow(nr_pages, PAGE_SHIFT, &size) ||
check_add_overflow(phys, size, &end))
return -EINVAL;
reg = find_mem_range(phys, &range);
if (!reg || !is_in_mem_range(end - 1, &range))
return -EPERM;
host_lock_component();
ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
unlock:
host_unlock_component();
return ret;
}
#define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \
KVM_PGTABLE_PROT_DEVICE | \
KVM_PGTABLE_PROT_NORMAL_NC | \

View File

@@ -194,6 +194,8 @@ static int __init early_hyp_lm_size_mb_cfg(char *arg)
}
early_param("kvm-arm.hyp_lm_size_mb", early_hyp_lm_size_mb_cfg);
DEFINE_STATIC_KEY_FALSE(kvm_ffa_unmap_on_lend);
void __init kvm_hyp_reserve(void)
{
u64 hyp_mem_pages = 0;
@@ -225,6 +227,10 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
hyp_mem_pages += pkvm_selftest_pages();
hyp_mem_pages += hyp_ffa_proxy_pages();
if (static_branch_unlikely(&kvm_ffa_unmap_on_lend))
hyp_mem_pages += KVM_FFA_SPM_HANDLE_NR_PAGES;
hyp_mem_pages++; /* hyp_ppages */
/*
@@ -1846,3 +1852,11 @@ int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void
WARN_ON_ONCE(1);
return -EINVAL;
}
static int early_ffa_unmap_on_lend_cfg(char *arg)
{
static_branch_enable(&kvm_ffa_unmap_on_lend);
return 0;
}
early_param("kvm-arm.ffa-unmap-on-lend", early_ffa_unmap_on_lend_cfg);