ANDROID: KVM: arm64: HVCs to filter Ftrace for pKVM hyp
Introducing two new HVCs to configure which functions to trace. __pkvm_disable_ftrace() is disabling all functions. __pkvm_sync_ftrace() takes a host page containing the list of functions to be traced. The host populated page must have the same order as the original ELF .hyp.patchable_function_entries. The first bit of each address tells if a function must be enabled or not. Bug: 357781595 Change-Id: I81b8e1733153936b0bd2850e189224e82cd94f5a Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
This commit is contained in:
@@ -106,6 +106,8 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_enable_event,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_selftest_event,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_sync_ftrace,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_disable_ftrace,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_refill,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaimable,
|
||||
|
||||
@@ -80,14 +80,22 @@ do { \
|
||||
|
||||
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
|
||||
void hyp_ftrace_setup_core(void);
|
||||
void *hyp_ftrace_sync(unsigned long *func_pg, unsigned long *funcs,
|
||||
unsigned long *funcs_end, unsigned long offset_idx,
|
||||
void *tramp);
|
||||
int hyp_ftrace_setup(unsigned long *funcs, unsigned long *funcs_end,
|
||||
unsigned long hyp_kern_offset, void *tramp);
|
||||
void hyp_ftrace_ret_flush(void);
|
||||
void hyp_ftrace_disable(unsigned long *funcs, unsigned long *funcs_end);
|
||||
int __pkvm_sync_ftrace(unsigned long host_func_pg);
|
||||
int __pkvm_disable_ftrace(void);
|
||||
#else
|
||||
static inline void hyp_ftrace_setup_core(void) { }
|
||||
static inline void hyp_ftrace_ret_flush(void) { }
|
||||
static inline int hyp_ftrace_setup(unsigned long *funcs, unsigned long *funcs_end,
|
||||
unsigned long hyp_kern_offset, void *tramp) { return 0; }
|
||||
static inline int __pkvm_sync_ftrace(unsigned long host_func_pg) { return -EOPNOTSUPP; }
|
||||
static inline int __pkvm_disable_ftrace(void) { return -EOPNOTSUPP; }
|
||||
#endif /* CONFIG_PROTECTED_NVHE_FTRACE */
|
||||
#else /* CONFIG_TRACING */
|
||||
static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
|
||||
@@ -112,5 +120,7 @@ static inline int __pkvm_enable_event(unsigned short id, bool enable) { return
|
||||
|
||||
static inline void hyp_ftrace_setup_core(void) { }
|
||||
static inline void hyp_ftrace_ret_flush(void) { }
|
||||
static inline int __pkvm_sync_ftrace(unsigned long host_func_pg) { return -EOPNOTSUPP; }
|
||||
static inline int __pkvm_disable_ftrace(void) { return -EOPNOTSUPP; }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <nvhe/mm.h>
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/trace/trace.h>
|
||||
|
||||
#include <nvhe/trace/define_events.h>
|
||||
@@ -20,6 +21,32 @@ static struct {
|
||||
struct hyp_event_id *end;
|
||||
} event_id_mod[MAX_EVENT_ID_MOD];
|
||||
|
||||
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
|
||||
int __pkvm_sync_ftrace(unsigned long host_funcs_pg)
|
||||
{
|
||||
unsigned long *funcs_pg = (unsigned long *)kern_hyp_va(host_funcs_pg);
|
||||
u64 pfn = hyp_virt_to_pfn(funcs_pg);
|
||||
int ret;
|
||||
|
||||
ret = __pkvm_host_donate_hyp(pfn, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
funcs_pg = hyp_ftrace_sync(funcs_pg, NULL, NULL, 0, NULL);
|
||||
|
||||
WARN_ON(__pkvm_hyp_donate_host(pfn, 1));
|
||||
|
||||
return funcs_pg ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
int __pkvm_disable_ftrace(void)
|
||||
{
|
||||
hyp_ftrace_disable(NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void hyp_set_key(atomic_t *key, int val)
|
||||
{
|
||||
atomic_t *__key = hyp_fixmap_map(__pkvm_private_range_pa(key));
|
||||
|
||||
@@ -144,6 +144,19 @@ static int __get_offset_idx_ins(unsigned long *func, unsigned long ip, u32 *insn
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __get_disable_ins(unsigned long *func, unsigned long ip, u32 *insn,
|
||||
void *args)
|
||||
{
|
||||
static u32 nop;
|
||||
|
||||
if (!nop)
|
||||
nop = aarch64_insn_gen_nop();
|
||||
|
||||
*insn = cpu_to_le32(nop);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __get_enable_ins(unsigned long ip, u32 *insn, void *tramp)
|
||||
{
|
||||
u32 imm, mask;
|
||||
@@ -188,6 +201,54 @@ static int __get_enable_disable_ins_early(unsigned long *func, unsigned long ip,
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct __ftrace_sync_patch_args {
|
||||
void *tramp;
|
||||
unsigned long offset_idx;
|
||||
unsigned long *funcs_pg;
|
||||
};
|
||||
|
||||
static int
|
||||
__get_enable_disable_ins_from_funcs_pg(unsigned long *func, unsigned long ip,
|
||||
u32 *insn, void *__args)
|
||||
{
|
||||
struct __ftrace_sync_patch_args *args = __args;
|
||||
unsigned long kern_addr;
|
||||
static u32 nop;
|
||||
u32 cur_insn;
|
||||
bool enable;
|
||||
int ret = 0;
|
||||
|
||||
if (funcs_pg_is_end(args->funcs_pg))
|
||||
return -EAGAIN;
|
||||
|
||||
kern_addr = __kern_addr(args->offset_idx, *func);
|
||||
if (get_func(kern_addr) != funcs_pg_func(*args->funcs_pg)) {
|
||||
ret = -EINVAL;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!nop)
|
||||
nop = aarch64_insn_gen_nop();
|
||||
|
||||
enable = funcs_pg_enabled(*args->funcs_pg);
|
||||
cur_insn = *(u32 *)ip;
|
||||
|
||||
/* Are we modifying anything? */
|
||||
if ((cur_insn == nop) != enable) {
|
||||
ret = -EBUSY;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (funcs_pg_enabled(*args->funcs_pg))
|
||||
ret = __get_enable_ins(ip, insn, args->tramp);
|
||||
else
|
||||
*insn = cpu_to_le32(nop);
|
||||
|
||||
end:
|
||||
args->funcs_pg++;
|
||||
return ret;
|
||||
}
|
||||
|
||||
phys_addr_t __get_phys(unsigned long addr)
|
||||
{
|
||||
if (addr >= (unsigned long)__hyp_text_start)
|
||||
@@ -287,3 +348,47 @@ void hyp_ftrace_setup_core(void)
|
||||
__hyp_text_start_kern - (unsigned long)__hyp_text_start,
|
||||
__hyp_ftrace_tramp);
|
||||
}
|
||||
|
||||
/*
|
||||
* funcs_pg is the host donated page containing the list of functions to
|
||||
* enable/disable.
|
||||
*
|
||||
* funcs and funcs_end are the hypervisor owned ELF sections. For security
|
||||
* purposes, funcs_pg is validated against funcs/funcs_end and for efficency
|
||||
* purposes, it is expected from funcs_pg to have the same order as
|
||||
* funcs/funcs_end.
|
||||
*
|
||||
* Returns NULL if the entire funcs_pg has been consumed otherwise the next
|
||||
* entry to process if funcs_end has been reached.
|
||||
*/
|
||||
void *hyp_ftrace_sync(unsigned long *funcs_pg, unsigned long *funcs,
|
||||
unsigned long *funcs_end, unsigned long offset_idx,
|
||||
void *tramp)
|
||||
{
|
||||
struct __ftrace_sync_patch_args args = {
|
||||
.tramp = tramp ? tramp : (void *)__hyp_ftrace_tramp,
|
||||
.offset_idx = funcs ? offset_idx : 0,
|
||||
.funcs_pg = funcs_pg,
|
||||
};
|
||||
|
||||
if (!funcs || !funcs_end) {
|
||||
funcs = __hyp_patchable_function_entries_start;
|
||||
funcs_end = __hyp_patchable_function_entries_end;
|
||||
}
|
||||
|
||||
hyp_ftrace_patch(funcs, funcs_end, 2 * AARCH64_INSN_SIZE,
|
||||
__get_enable_disable_ins_from_funcs_pg, (void *)&args);
|
||||
|
||||
return funcs_pg_is_end(args.funcs_pg) ? NULL : args.funcs_pg;
|
||||
}
|
||||
|
||||
void hyp_ftrace_disable(unsigned long *funcs, unsigned long *funcs_end)
|
||||
{
|
||||
if (!funcs || !funcs_end) {
|
||||
funcs = __hyp_patchable_function_entries_start;
|
||||
funcs_end = __hyp_patchable_function_entries_end;
|
||||
}
|
||||
|
||||
hyp_ftrace_patch(funcs, funcs_end, 2 * AARCH64_INSN_SIZE,
|
||||
__get_disable_ins, NULL);
|
||||
}
|
||||
|
||||
@@ -1545,6 +1545,18 @@ static void handle___pkvm_selftest_event(struct kvm_cpu_context *host_ctxt)
|
||||
cpu_reg(host_ctxt, 1) = ret;
|
||||
}
|
||||
|
||||
static void handle___pkvm_sync_ftrace(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(unsigned long, host_func_pg, host_ctxt, 1);
|
||||
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_sync_ftrace(host_func_pg);
|
||||
}
|
||||
|
||||
static void handle___pkvm_disable_ftrace(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_disable_ftrace();
|
||||
}
|
||||
|
||||
static void handle___pkvm_alloc_module_va(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(u64, nr_pages, host_ctxt, 1);
|
||||
@@ -1856,6 +1868,8 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__pkvm_swap_reader_tracing),
|
||||
HANDLE_FUNC(__pkvm_enable_event),
|
||||
HANDLE_FUNC(__pkvm_selftest_event),
|
||||
HANDLE_FUNC(__pkvm_sync_ftrace),
|
||||
HANDLE_FUNC(__pkvm_disable_ftrace),
|
||||
HANDLE_FUNC(__pkvm_tlb_flush_vmid),
|
||||
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_refill),
|
||||
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_reclaimable),
|
||||
|
||||
Reference in New Issue
Block a user