Merge android16-6.12 into android16-6.12-lts

This merges the android16-6.12 branch into the -lts branch, catching
it up with the latest changes in there.

It contains the following commits:

2cc84bbe12 ANDROID: iommu/arm-smmu-v3-kvm: Wrap around when writing to cmdq
b32e32bf59 ANDROID: KVM: arm64: pviommu: Fix return value in __pkvm_use_dma_locked
42b01ce154 ANDROID: ABI: Update pixel symbol list
258557effa ANDROID: power: Add vendor hook for suspend
7a02c9f4a3 ANDROID: GKI: Update symbol list for vivo
79376c9869 ANDROID: vendor_hooks: Add hook in try_to_unmap_one()
1a6d07106c ANDROID: vendor_hooks: Add hook in mmap_region()
b165e16c2e ANDROID: vendor_hooks: Add hook in shrink_node_memcgs
8f5f6798e1 ANDROID: vendor_hooks: Add vendor hooks in __swap_writepage
7669d436d7 ANDROID: KVM: arm64: Remove args copy for direct FF-A calls
6ce196d4af ANDROID: Enable PKVM_STACKTRACE
265f4173a2 ANDROID: KVM: arm64: Remove DEBUG dependency to PKVM_STACKTRACE
3b1d8cc5a2 ANDROID: KVM: arm64: NVHE_EL2_DEBUG to PKVM_DEBUG menuconfig
58ca789e21 ANDROID: KVM: arm64: PROTECTED_NVHE_FTRACE to PKVM_FTRACE
386af809e4 ANDROID: KVM: arm64: PROTECTED_NVHE_STACKTRACE to PKVM_STACKTRACE
8b2a8f6b2e ANDROID: KVM: arm64: PROTECTED_NVHE_TESTING to PKVM_SELFTESTS
b526a11319 ANDROID: KVM: arm64: Add PKVM_DISABLE_STAGE2_ON_PANIC
e19e2557a0 ANDROID: KVM: arm64: Always resolve nvhe BUGs file and line
7bc24faa5a ANDROID: KVM: arm64: Add PKVM_STRICT_CHECKS
9cfa11e5f1 ANDROID: KVM: arm64: Add PKVM_DUMP_TRACE_ON_PANIC
a3f61dba68 ANDROID: ring-buffer: Handle external writer reader_page fast-forward
10a77358c7 ANDROID: KVM: arm64: Allow relinqush for p-guest with huge-mappings
05a9e97a23 ANDROID: KVM: arm64: Use unmap for pKVM guests memory relinquish
024d995fb6 ANDROID: KVM: arm64: Add hyp request SPLIT
c8303029c0 ANDROID: KVM: arm64: Add host_split_guest for pKVM
2846906d1e ANDROID: KVM: arm64: Check for existing pinned_pages before THP in pkvm_mem_abort
2e30f9c5c2 ANDROID: KVM: arm64: Disable relinquish for p-guest huge-mappings
56ad021f7a ANDROID: KVM: arm64: Disallow #include trace.h for pKVM.
f503b8a51a ANDROID: GKI: Add symbol to symbol list for vivo.
0142de08c8 ANDROID: vendor_hooks: add hooks to modify pageflags
22cf53077b ANDROID: gunyah: GKI: Fix the wrong return value in gunyah driver

Change-Id: I64fde9df60b85399d6d98aed923b00a7430ae226
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-06-06 06:53:26 +00:00
56 changed files with 859 additions and 161 deletions

View File

@@ -97,6 +97,7 @@ CONFIG_ARM_SCMI_CPUFREQ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_PTDUMP_STAGE2_DEBUGFS=y
CONFIG_PKVM_STACKTRACE=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_SHADOW_CALL_STACK=y

View File

@@ -88,6 +88,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest,
__KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest,
__KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest,
__KVM_HOST_SMCCC_FUNC___pkvm_host_split_guest,
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,

View File

@@ -728,6 +728,7 @@ struct kvm_hyp_req {
#define KVM_HYP_LAST_REQ 0
#define KVM_HYP_REQ_TYPE_MEM 1
#define KVM_HYP_REQ_TYPE_MAP 2
#define KVM_HYP_REQ_TYPE_SPLIT 3
u8 type;
union {
struct {
@@ -742,6 +743,10 @@ struct kvm_hyp_req {
unsigned long guest_ipa;
size_t size;
} map;
struct {
unsigned long guest_ipa;
size_t size;
} split;
};
};

View File

@@ -11,7 +11,7 @@
* Hypervisor events definitions.
*/
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
HYP_EVENT(func,
HE_PROTO(unsigned long ip, unsigned long parent),
HE_STRUCT(
@@ -142,7 +142,7 @@ HYP_EVENT(vcpu_illegal_trap,
HE_PRINTK("esr_el2=%llx", __entry->esr)
);
#ifdef CONFIG_PROTECTED_NVHE_TESTING
#ifdef CONFIG_PKVM_SELFTESTS
HYP_EVENT(selftest,
HE_PROTO(void),
HE_STRUCT(),

View File

@@ -186,6 +186,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
int pkvm_mem_abort_range(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, size_t size);
int __pkvm_pgtable_stage2_split(struct kvm_vcpu *vcpu, phys_addr_t ipa, size_t size);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);

View File

@@ -866,8 +866,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
* kvm_pgtable_stage2_split() is best effort: it tries to break as many
* blocks in the input range as allowed by @mc_capacity.
*/
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_mmu_memory_cache *mc);
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc);
/**
* kvm_pgtable_walk() - Walk a page-table.

View File

@@ -534,8 +534,7 @@ int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_
enum kvm_pgtable_walk_flags flags);
kvm_pte_t pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
enum kvm_pgtable_walk_flags flags);
int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_mmu_memory_cache *mc);
int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc);
void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops,
struct kvm_pgtable_pte_ops *pte_ops,
void *pgtable, s8 level);

View File

@@ -140,7 +140,7 @@ KVM_NVHE_ALIAS(__hyp_rodata_end);
KVM_NVHE_ALIAS(__hyp_event_ids_start);
KVM_NVHE_ALIAS(__hyp_event_ids_end);
KVM_NVHE_ALIAS(__hyp_printk_fmts_start);
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
KVM_NVHE_ALIAS(__hyp_patchable_function_entries_start);
KVM_NVHE_ALIAS(__hyp_patchable_function_entries_end);
#endif

View File

@@ -43,42 +43,10 @@ menuconfig KVM
If unsure, say N.
config NVHE_EL2_DEBUG
bool "Debug mode for non-VHE EL2 object"
depends on KVM
select PKVM_SELFTESTS
help
Say Y here to enable the debug mode for the non-VHE KVM EL2 object.
Failure reports will BUG() in the hypervisor. This is intended for
local EL2 hypervisor development.
If unsure, say N.
config PKVM_SELFTESTS
bool "Protected KVM hypervisor selftests"
help
Say Y here to enable Protected KVM (pKVM) hypervisor selftests
during boot. Failure reports will panic the hypervisor. This is
intended for EL2 hypervisor development.
If unsure, say N.
config PROTECTED_NVHE_STACKTRACE
bool "Protected KVM hypervisor stacktraces"
depends on NVHE_EL2_DEBUG
default n
help
Say Y here to enable pKVM hypervisor stacktraces on hyp_panic()
If using protected nVHE mode, but cannot afford the associated
memory cost (less than 0.75 page per CPU) of pKVM stacktraces,
say N.
If unsure, or not using protected nVHE (pKVM), say N.
if KVM
config PTDUMP_STAGE2_DEBUGFS
bool "Present the stage-2 pagetables to debugfs"
depends on KVM
depends on DEBUG_KERNEL
depends on DEBUG_FS
depends on GENERIC_PTDUMP
@@ -93,26 +61,17 @@ config PTDUMP_STAGE2_DEBUGFS
If in doubt, say N.
config PROTECTED_NVHE_TESTING
bool "Protected KVM hypervisor testing infrastructure"
depends on KVM
config PKVM_STACKTRACE
bool "Protected KVM hypervisor stacktraces"
default n
help
Say Y here to enable pKVM hypervisor testing infrastructure.
Say Y here to enable pKVM hypervisor stacktraces on hyp_panic()
If unsure, say N.
If using protected nVHE mode, but cannot afford the associated
memory cost (less than 0.75 page per CPU) of pKVM stacktraces,
say N.
config PROTECTED_NVHE_FTRACE
bool "Protected KVM hypervisor function tracing"
depends on KVM
depends on TRACING
default n
help
Say Y here to enable func and func_ret hypervisor tracing events.
Those events are raised whenever the hypervisor branch to a
function.
If unsure, Say N.
If unsure, or not using protected nVHE (pKVM), say N.
config PKVM_MODULE_PATH
string "Path to pKVM modules"
@@ -121,4 +80,67 @@ config PKVM_MODULE_PATH
Directory where the pKVM modules are found. If empty, the modules
will be searched into the default path /lib/modules/<uname>.
menuconfig PKVM_DEBUG
bool "Debug mode for Protected KVM hypervisor"
help
Say Y here to enable the debug mode for the Protected KVM (pKVM)
hypervisor. Failure reports will BUG() in the hypervisor. This is
intended for local EL2 hypervisor development.
If unsure, say N.
if PKVM_DEBUG
config PKVM_STRICT_CHECKS
bool "Additional checks in the Protected KVM hypervisor"
default y
help
Say Y here to add more checks into the Protected KVM hypervisor.
Those checks have a slight performance cost and will BUG() on a
failure. This is intended for EL2 hypervisor development.
config PKVM_SELFTESTS
bool "Protected KVM hypervisor selftests"
default y
help
Say Y here to enable Protected KVM (pKVM) hypervisor selftests
during boot. Failure reports will panic the hypervisor. This is
intended for EL2 hypervisor development.
If unsure, say N.
config PKVM_DUMP_TRACE_ON_PANIC
bool "Dump Protected KVM hypervisor trace buffer on panic"
default y
help
Say Y here to dump the content of the pKVM hypervisor on either
hypervisor or host panic. In the case of a hyp panic, only the tail
page is dumped. It only has an effect if the command line option
hyp_trace_printk is set.
config PKVM_FTRACE
bool "Protected KVM hypervisor function tracing"
depends on FTRACE
default y
help
Say Y here to enable func and func_ret hypervisor tracing events.
Those events are raised whenever the hypervisor branch to a
function.
If unsure, Say N.
config PKVM_DISABLE_STAGE2_ON_PANIC
bool "Disable the host stage-2 on panic"
default n
help
If panic occurs while the host lock is held and if, plagued by bad
luck the panic path triggers a host stage-2 memory abort, a lockup
would happen. The only way out is to disable the stage-2 page-table.
This however tamper the system security. This option should therefore
solely be enabled to debug specific issues.
Just say N.
endif # PKVM_DEBUG
endif # KVM
endif # VIRTUALIZATION

View File

@@ -404,6 +404,11 @@ static int handle_hyp_req_map(struct kvm_vcpu *vcpu,
return pkvm_mem_abort_range(vcpu, req->map.guest_ipa, req->map.size);
}
static int handle_hyp_req_split(struct kvm_vcpu *vcpu, struct kvm_hyp_req *req)
{
return __pkvm_pgtable_stage2_split(vcpu, req->split.guest_ipa, req->split.size);
}
static int handle_hyp_req(struct kvm_vcpu *vcpu)
{
struct kvm_hyp_req *hyp_req = vcpu->arch.hyp_reqs;
@@ -420,6 +425,9 @@ static int handle_hyp_req(struct kvm_vcpu *vcpu)
case KVM_HYP_REQ_TYPE_MAP:
ret = handle_hyp_req_map(vcpu, hyp_req);
break;
case KVM_HYP_REQ_TYPE_SPLIT:
ret = handle_hyp_req_split(vcpu, hyp_req);
break;
default:
pr_warn("Unknown kvm_hyp_req type: %d\n", hyp_req->type);
ret = -EINVAL;
@@ -545,17 +553,13 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
kvm_err("Invalid host exception to nVHE hyp!\n");
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
esr_brk_comment(esr) == BUG_BRK_IMM) {
struct bug_entry *bug = find_bug(elr_in_kimg);
const char *file = NULL;
unsigned int line = 0;
/* All hyp bugs, including warnings, are treated as fatal. */
if (!is_protected_kvm_enabled() ||
IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
struct bug_entry *bug = find_bug(elr_in_kimg);
if (bug)
bug_get_file_line(bug, &file, &line);
}
if (bug)
bug_get_file_line(bug, &file, &line);
if (file)
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);

View File

@@ -263,7 +263,7 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_PROTECTED_NVHE_FTRACE)
#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_PKVM_FTRACE)
SYM_FUNC_START(__hyp_ftrace_tramp)
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!

View File

@@ -37,6 +37,7 @@ void __pkvm_teardown_tracing(void);
int __pkvm_enable_tracing(bool enable);
int __pkvm_reset_tracing(unsigned int cpu);
int __pkvm_swap_reader_tracing(unsigned int cpu);
void __pkvm_panic_tracing(void);
int __pkvm_enable_event(unsigned short id, bool enable);
extern struct hyp_printk_fmt __hyp_printk_fmts_start[];
@@ -81,7 +82,7 @@ do { \
#define trace_hyp_printk(fmt, ...) \
__trace_hyp_printk_N(fmt, __VA_ARGS__)
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
void hyp_ftrace_setup_core(void);
unsigned long *hyp_ftrace_find_host_func(unsigned long host_func,
unsigned long *funcs,
@@ -105,7 +106,7 @@ static inline void hyp_ftrace_enable(unsigned long *funcs, unsigned long *funcs_
bool enable, void *tramp) { }
static inline int __pkvm_sync_ftrace(unsigned long host_func_pg) { return -EOPNOTSUPP; }
static inline int __pkvm_disable_ftrace(void) { return -EOPNOTSUPP; }
#endif /* CONFIG_PROTECTED_NVHE_FTRACE */
#endif /* CONFIG_PKVM_FTRACE */
#else /* CONFIG_TRACING */
static inline int
register_hyp_mod_events(void *event_ids, size_t nr_events, void *funcs, void *funcs_end,
@@ -127,6 +128,7 @@ static inline void __pkvm_teardown_tracing(void) { }
static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
static inline int __pkvm_reset_tracing(unsigned int cpu) { return -ENODEV; }
static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; }
static inline void __pkvm_panic_tracing(void) { }
static inline int __pkvm_enable_event(unsigned short id, bool enable) { return -ENODEV; }
#define trace_hyp_printk(fmt, ...)

View File

@@ -64,6 +64,7 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_
int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm, u64 size);
int __pkvm_host_test_clear_young_guest(u64 gfn, u64 size, bool mkold, struct pkvm_hyp_vm *vm);
kvm_pte_t __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
int __pkvm_host_split_guest(u64 gfn, u64 size, struct pkvm_hyp_vcpu *vcpu);
int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
u64 nr_pages, u64 *nr_shared);
int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,

View File

@@ -41,9 +41,9 @@ int __pkvm_map_module_page(u64 pfn, void *va, enum kvm_pgtable_prot prot, bool i
void __pkvm_unmap_module_page(u64 pfn, void *va);
void *__pkvm_alloc_module_va(u64 nr_pages);
int pkvm_remap_range(void *va, int nr_pages, bool nc);
#ifdef CONFIG_NVHE_EL2_DEBUG
#ifdef CONFIG_PKVM_STRICT_CHECKS
void assert_in_mod_range(unsigned long addr);
#else
static inline void assert_in_mod_range(unsigned long addr) { }
#endif /* CONFIG_NVHE_EL2_DEBUG */
#endif /* CONFIG_PKVM_STRICT_CHECKS */
#endif /* __KVM_HYP_MM_H */

View File

@@ -117,7 +117,7 @@ static inline void hyp_read_unlock(hyp_rwlock_t *lock)
: "memory");
}
#ifdef CONFIG_NVHE_EL2_DEBUG
#ifdef CONFIG_PKVM_STRICT_CHECKS
static inline void hyp_assert_write_lock_held(hyp_rwlock_t *lock)
{
BUG_ON(!(READ_ONCE(lock->__val) & BIT(__HYP_RWLOCK_WRITER_BIT)));

View File

@@ -105,7 +105,7 @@ static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock)
return lockval.owner != lockval.next;
}
#ifdef CONFIG_NVHE_EL2_DEBUG
#ifdef CONFIG_PKVM_STRICT_CHECKS
static inline void hyp_assert_lock_held(hyp_spinlock_t *lock)
{
/*

View File

@@ -14,7 +14,7 @@ hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../../../kernel/smccc-call.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
hyp-obj-$(CONFIG_TRACING) += clock.o events.o trace.o
hyp-obj-$(CONFIG_PROTECTED_NVHE_FTRACE) += ftrace.o
hyp-obj-$(CONFIG_PKVM_FTRACE) += ftrace.o
hyp-obj-$(CONFIG_MODULES) += modules.o
hyp-obj-y += $(lib-objs)

View File

@@ -5,9 +5,6 @@ $(obj)/hyp.lds: arch/arm64/kvm/hyp/nvhe/module.lds.S FORCE
include $(srctree)/arch/arm64/kvm/hyp/nvhe/Makefile.nvhe
# Deprecated; subject to removal.
ccflags-y += -I$(srctree)/arch/arm64/kvm/hyp/include/module/nvhe
# LINT.IfChange(includes)
ccflags-y += -I$(srctree)/arch/arm64/kvm/hyp/include/module
# LINT.ThenChange(/BUILD.bazel:pkvm_includes)

View File

@@ -14,7 +14,7 @@ ccflags-y += -fno-stack-protector \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
ifeq ($(CONFIG_PROTECTED_NVHE_FTRACE), y)
ifeq ($(CONFIG_PKVM_FTRACE), y)
ccflags-y += -fpatchable-function-entry=3
endif

View File

@@ -29,7 +29,7 @@ static struct {
} funcs;
} hyp_mod_events[MAX_MOD_EVENTS];
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
int __pkvm_sync_ftrace(unsigned long host_funcs_pg)
{
unsigned long *funcs_pg = (unsigned long *)kern_hyp_va(host_funcs_pg);

View File

@@ -1111,47 +1111,21 @@ out_unlock:
static void do_ffa_direct_msg(struct kvm_cpu_context *ctxt,
u64 vm_handle)
{
DECLARE_REG(u32, func_id, ctxt, 0);
DECLARE_REG(u32, endp, ctxt, 1);
DECLARE_REG(u32, msg_flags, ctxt, 2);
DECLARE_REG(u32, w3, ctxt, 3);
DECLARE_REG(u32, w4, ctxt, 4);
DECLARE_REG(u32, w5, ctxt, 5);
DECLARE_REG(u32, w6, ctxt, 6);
DECLARE_REG(u32, w7, ctxt, 7);
struct arm_smccc_1_2_regs req, resp;
struct arm_smccc_1_2_regs *reg = (void *)&ctxt->regs.regs[0];
if (FIELD_GET(FFA_SRC_ENDPOINT_MASK, endp) != vm_handle) {
resp = (struct arm_smccc_1_2_regs) {
.a0 = FFA_ERROR,
.a2 = FFA_RET_INVALID_PARAMETERS,
};
struct arm_smccc_res res;
ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
ffa_set_retval(ctxt, &res);
return;
}
req = (struct arm_smccc_1_2_regs) {
.a0 = func_id,
.a1 = endp,
.a2 = msg_flags,
.a3 = w3,
.a4 = w4,
.a5 = w5,
.a6 = w6,
.a7 = w7,
};
/*
* In case SMCCC 1.2 is not supported we should preserve the
* host registers.
*/
memcpy(&resp, &ctxt->regs.regs[0], sizeof(resp));
__hyp_exit();
arm_smccc_1_2_smc(&req, &resp);
arm_smccc_1_2_smc(reg, reg);
__hyp_enter();
memcpy(&ctxt->regs.regs[0], &resp, sizeof(resp));
}
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)

View File

@@ -120,7 +120,7 @@ SYM_FUNC_START(__hyp_do_panic)
mov x29, x0
#ifdef CONFIG_NVHE_EL2_DEBUG
#ifdef CONFIG_PKVM_DISABLE_STAGE2_ON_PANIC
/* Ensure host stage-2 is disabled */
mrs x0, hcr_el2
bic x0, x0, #HCR_VM

View File

@@ -1217,6 +1217,29 @@ out:
cpu_reg(host_ctxt, 1) = pte;
}
static void handle___pkvm_host_split_guest(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, gfn, host_ctxt, 1);
DECLARE_REG(u64, size, host_ctxt, 2);
struct pkvm_hyp_vcpu *hyp_vcpu;
int ret = -EINVAL;
if (!is_protected_kvm_enabled())
goto out;
hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
if (!hyp_vcpu)
goto out;
if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu))
goto out;
ret = __pkvm_host_split_guest(gfn, size, hyp_vcpu);
out:
cpu_reg(host_ctxt, 1) = ret;
}
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
{
struct pkvm_hyp_vcpu *hyp_vcpu;
@@ -1571,7 +1594,7 @@ static void handle___pkvm_selftest_event(struct kvm_cpu_context *host_ctxt)
{
int smc_ret = SMCCC_RET_NOT_SUPPORTED, ret = -EOPNOTSUPP;
#ifdef CONFIG_PROTECTED_NVHE_TESTING
#ifdef CONFIG_PKVM_SELFTESTS
trace_selftest();
smc_ret = SMCCC_RET_SUCCESS;
ret = 0;
@@ -1912,6 +1935,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_host_wrprotect_guest),
HANDLE_FUNC(__pkvm_host_test_clear_young_guest),
HANDLE_FUNC(__pkvm_host_mkyoung_guest),
HANDLE_FUNC(__pkvm_host_split_guest),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_timer_set_cntvoff),

View File

@@ -410,6 +410,12 @@ int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
if (ret || !kvm_pte_valid(pte))
goto end;
/* We don't support splitting non-leaf mappings */
if (level != KVM_PGTABLE_LAST_LEVEL) {
ret = -E2BIG;
goto end;
}
state = guest_get_page_state(pte, ipa);
if (state != PKVM_PAGE_OWNED) {
ret = -EPERM;
@@ -429,8 +435,7 @@ int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
psci_mem_protect_dec(1);
/* Zap the guest stage2 pte and return ownership to the host */
ret = kvm_pgtable_stage2_annotate(&vm->pgt, ipa, PAGE_SIZE,
&vcpu->vcpu.arch.stage2_mc, 0);
ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
if (ret)
goto end;
@@ -1260,7 +1265,7 @@ int __pkvm_host_share_hyp(u64 pfn)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
if (IS_ENABLED(CONFIG_PKVM_STRICT_CHECKS)) {
ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
if (ret)
goto unlock;
@@ -1581,7 +1586,7 @@ int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages, enum kvm_pgtable_prot p
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
if (IS_ENABLED(CONFIG_PKVM_STRICT_CHECKS)) {
ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
if (ret)
goto unlock;
@@ -1618,7 +1623,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
if (IS_ENABLED(CONFIG_PKVM_STRICT_CHECKS)) {
ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
@@ -1881,7 +1886,7 @@ static int __pkvm_use_dma_locked(phys_addr_t phys_addr, size_t size,
enum kvm_pgtable_prot prot;
if (hyp_vcpu)
return EINVAL;
return -EINVAL;
ret = ___host_check_page_state_range(phys_addr, size,
PKVM_PAGE_TAINTED,
@@ -2188,6 +2193,30 @@ kvm_pte_t __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
return pte;
}
int __pkvm_host_split_guest(u64 gfn, u64 size, struct pkvm_hyp_vcpu *vcpu)
{
struct kvm_hyp_memcache *mc = &vcpu->vcpu.arch.stage2_mc;
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 ipa = hyp_pfn_to_phys(gfn);
int ret;
if (size != PMD_SIZE)
return -EINVAL;
guest_lock_component(vm);
/*
* stage2_split() already checks the existing mapping is valid and PMD-level.
* No other check is necessary.
*/
ret = kvm_pgtable_stage2_split(&vm->pgt, ipa, size, mc);
guest_unlock_component(vm);
return ret;
}
static int __host_set_owner_guest(struct pkvm_hyp_vcpu *vcpu, u64 phys, u64 ipa,
size_t size, bool is_memory)
{

View File

@@ -111,7 +111,7 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
return err;
}
#ifdef CONFIG_NVHE_EL2_DEBUG
#ifdef CONFIG_PKVM_STRICT_CHECKS
static unsigned long mod_range_start = ULONG_MAX;
static unsigned long mod_range_end;
static DEFINE_HYP_SPINLOCK(mod_range_lock);

View File

@@ -41,7 +41,7 @@ SECTIONS {
*(__patchable_function_entries __patchable_function_entries.*)
}
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
.hyp.text.ftrace_tramp : {
. = . +20;
}

View File

@@ -1626,9 +1626,19 @@ static bool pkvm_memrelinquish_call(struct pkvm_hyp_vcpu *hyp_vcpu,
goto out_guest_err;
ret = __pkvm_guest_relinquish_to_host(hyp_vcpu, ipa, &pa);
if (ret == -ENOMEM) {
if (pkvm_handle_empty_memcache(hyp_vcpu, exit_code))
if (ret == -E2BIG) {
struct kvm_hyp_req *req = pkvm_hyp_req_reserve(hyp_vcpu, KVM_HYP_REQ_TYPE_SPLIT);
if (!req) {
ret = -ENOMEM;
goto out_guest_err;
}
req->split.guest_ipa = ALIGN_DOWN(ipa, PMD_SIZE);
req->split.size = PMD_SIZE;
write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
*exit_code = ARM_EXCEPTION_HYP_REQ;
return false;
} else if (ret) {

View File

@@ -34,7 +34,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
stacktrace_info->pc = pc;
}
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
#ifdef CONFIG_PKVM_STACKTRACE
#include <asm/stacktrace/nvhe.h>
DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
@@ -134,11 +134,11 @@ static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
unwind(&state, pkvm_save_backtrace_entry, &idx);
}
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
#else /* !CONFIG_PKVM_STACKTRACE */
static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
{
}
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
#endif /* CONFIG_PKVM_STACKTRACE */
/*
* kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace

View File

@@ -524,6 +524,8 @@ asmlinkage void __noreturn hyp_panic(void)
kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
_THIS_IP_);
__pkvm_panic_tracing();
__hyp_do_panic(host_ctxt, spsr, elr, par);
unreachable();
}

View File

@@ -41,6 +41,7 @@ struct hyp_rb_per_cpu {
#define HYP_RB_UNAVAILABLE 0
#define HYP_RB_READY 1
#define HYP_RB_WRITING 2
#define HYP_RB_PANIC 3
DEFINE_PER_CPU(struct hyp_rb_per_cpu, trace_rb);
DEFINE_HYP_SPINLOCK(trace_rb_lock);
@@ -254,7 +255,7 @@ void *tracing_reserve_entry(unsigned long length)
struct ring_buffer_event *rb_event;
if (atomic_cmpxchg(&cpu_buffer->status, HYP_RB_READY, HYP_RB_WRITING)
== HYP_RB_UNAVAILABLE)
!= HYP_RB_READY)
return NULL;
rb_event = rb_reserve_next(cpu_buffer, length);
@@ -321,12 +322,15 @@ static int rb_cpu_disable_writing(struct hyp_rb_per_cpu *cpu_buffer)
static int rb_cpu_enable_writing(struct hyp_rb_per_cpu *cpu_buffer)
{
int prev_status;
if (!rb_cpu_loaded(cpu_buffer))
return -ENODEV;
atomic_cmpxchg(&cpu_buffer->status, HYP_RB_UNAVAILABLE, HYP_RB_READY);
prev_status = atomic_cmpxchg(&cpu_buffer->status, HYP_RB_UNAVAILABLE,
HYP_RB_READY);
return 0;
return prev_status == HYP_RB_UNAVAILABLE ? 0 : -ENODEV;
}
static int rb_cpu_reset(struct hyp_rb_per_cpu *cpu_buffer)
@@ -614,3 +618,49 @@ int __pkvm_reset_tracing(unsigned int cpu)
return ret;
}
void __pkvm_panic_tracing(void)
{
#ifdef CONFIG_PKVM_DUMP_TRACE_ON_PANIC
int cpu;
hyp_spin_lock(&trace_rb_lock);
for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
int prev_status, skipped = 0;
if (!rb_cpu_loaded(cpu_buffer))
continue;
do {
prev_status = atomic_cmpxchg_acquire(&cpu_buffer->status, HYP_RB_READY,
HYP_RB_PANIC);
} while (prev_status == HYP_RB_WRITING);
/* Allow the host to read the very last events */
while (cpu_buffer->tail_page != cpu_buffer->reader_page) {
struct hyp_buffer_page *prev_reader = cpu_buffer->reader_page;
if (rb_swap_reader_page(cpu_buffer))
break;
/*
* The reader is still on the previous reader page and events there can
* still be read.
*/
if (++skipped == 1)
continue;
WRITE_ONCE(cpu_buffer->meta->overrun,
cpu_buffer->meta->overrun + prev_reader->entries);
cpu_buffer->meta->reader.lost_events = cpu_buffer->meta->overrun -
cpu_buffer->last_overrun;
WRITE_ONCE(meta_pages_lost(cpu_buffer->meta),
meta_pages_lost(cpu_buffer->meta) + 1);
}
}
hyp_spin_unlock(&trace_rb_lock);
#endif
}

View File

@@ -1112,7 +1112,7 @@ static void debug_check_table_before_coalescing(
struct stage2_map_data *data,
kvm_pte_t *ptep, u64 pa)
{
#ifdef CONFIG_NVHE_EL2_DEBUG
#ifdef CONFIG_PKVM_STRICT_CHECKS
u64 granule = kvm_granule_size(ctx->level + 1);
int i;
@@ -1786,13 +1786,48 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
return 0;
}
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_mmu_memory_cache *mc)
static int pkvm_stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
struct stage2_map_data *data = ctx->arg;
struct kvm_pgtable *pgt = data->mmu->pgt;
struct kvm_hyp_memcache *mc = data->memcache;
enum kvm_pgtable_prot prot;
kvm_pte_t pte = ctx->old;
kvm_pte_t *childp;
if (ctx->level == KVM_PGTABLE_LAST_LEVEL)
return 0;
/* We can only split PMD-level blocks */
if (!kvm_pte_valid(pte) || ctx->level != KVM_PGTABLE_LAST_LEVEL - 1)
return -EINVAL;
prot = kvm_pgtable_stage2_pte_prot(pte);
childp = kvm_pgtable_stage2_create_unlinked(pgt, kvm_pte_to_phys(pte),
ctx->level, prot, mc, true);
if (IS_ERR(childp))
return PTR_ERR(childp);
WARN_ON(!stage2_try_break_pte(ctx, data->mmu));
stage2_make_pte(ctx, kvm_init_table_pte(childp, ctx->mm_ops));
return 0;
}
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc)
{
struct stage2_map_data data = {
.mmu = pgt->mmu,
.memcache = mc,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_split_walker,
.cb = static_branch_unlikely(&kvm_protected_mode_initialized) ?
pkvm_stage2_split_walker : stage2_split_walker,
.arg = static_branch_unlikely(&kvm_protected_mode_initialized) ?
&data : mc,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = mc,
};
int ret;

View File

@@ -101,7 +101,7 @@ static const char *hyp_printk_fmt_from_id(u8 fmt_id)
return fmt ? fmt->fmt : "Unknown Format";
}
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
extern unsigned long __hyp_patchable_function_entries_start[];
extern unsigned long __hyp_patchable_function_entries_end[];
extern unsigned long kvm_nvhe_sym(__hyp_text_start_kern);
@@ -835,7 +835,7 @@ void hyp_trace_init_event_tracefs(struct dentry *parent)
{
int nr_events = nr_entries(__hyp_events_start, __hyp_events_end);
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
tracefs_create_file("set_ftrace_filter", 0600, parent, (void *)true,
&hyp_ftrace_filter_fops);
tracefs_create_file("set_ftrace_notrace", 0200, parent, (void *)false,

View File

@@ -6,6 +6,7 @@
#include <linux/arm-smccc.h>
#include <linux/percpu-defs.h>
#include <linux/panic_notifier.h>
#include <linux/trace_events.h>
#include <linux/tracefs.h>
@@ -55,6 +56,8 @@ static struct hyp_trace_buffer {
static size_t hyp_trace_buffer_size = 7 << 10;
static bool hyp_trace_panic __read_mostly;
/* Number of pages the ring-buffer requires to accommodate for size */
#define NR_PAGES(size) \
((PAGE_ALIGN(size) >> PAGE_SHIFT) + 1)
@@ -174,6 +177,10 @@ static void hyp_clock_wait(struct hyp_trace_buffer *hyp_buffer)
static int __get_reader_page(int cpu)
{
/* we'd better no try to call the hyp if it has panic'ed */
if (hyp_trace_panic)
return 0;
return kvm_call_hyp_nvhe(__pkvm_swap_reader_tracing, cpu);
}
@@ -877,7 +884,7 @@ static int hyp_trace_clock_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(hyp_trace_clock);
#ifdef CONFIG_PROTECTED_NVHE_TESTING
#ifdef CONFIG_PKVM_SELFTESTS
static int selftest_event_open(struct inode *inode, struct file *file)
{
if (file->f_mode & FMODE_WRITE)
@@ -949,6 +956,28 @@ static void hyp_trace_buffer_printk(struct hyp_trace_buffer *hyp_buffer)
}
}
static int hyp_trace_panic_handler(struct notifier_block *self,
unsigned long ev, void *v)
{
#ifdef CONFIG_PKVM_DUMP_TRACE_ON_PANIC
if (!hyp_trace_buffer_loaded(&hyp_trace_buffer) ||
!hyp_trace_buffer.printk_iter)
return NOTIFY_DONE;
if (!strncmp("HYP panic:", v, 10))
hyp_trace_panic = true;
ring_buffer_poll_writer(hyp_trace_buffer.trace_buffer, RING_BUFFER_ALL_CPUS);
hyp_trace_buffer_printk(&hyp_trace_buffer);
#endif
return NOTIFY_DONE;
}
static struct notifier_block hyp_trace_panic_notifier = {
.notifier_call = hyp_trace_panic_handler,
.priority = INT_MAX - 1,
};
void hyp_trace_enable_event_early(void)
{
if (hyp_event_early_probe()) {
@@ -1026,5 +1055,7 @@ int hyp_trace_init_tracefs(void)
hyp_trace_buffer_printk_init(&hyp_trace_buffer))
pr_warn("Failed to init ht_printk");
atomic_notifier_chain_register(&panic_notifier_list, &hyp_trace_panic_notifier);
return 0;
}

View File

@@ -1730,9 +1730,8 @@ __pkvm_pages_to_ppages(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t g
read_lock(&kvm->mmu_lock);
while (p < nr_pages) {
phys_addr_t ipa = gfn << PAGE_SHIFT;
long skip, page_size = PAGE_SIZE;
struct page *page = pages[p];
long page_size, skip;
unsigned long hva;
u64 pfn;
ppage = kvm_pinned_pages_iter_first(&kvm->arch.pkvm.pinned_pages,
@@ -1743,8 +1742,14 @@ __pkvm_pages_to_ppages(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t g
}
pfn = page_to_pfn(page);
hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL);
page_size = transparent_hugepage_adjust(kvm, memslot, hva, &pfn, &ipa);
if (!kvm_pinned_pages_iter_first(&kvm->arch.pkvm.pinned_pages,
ALIGN_DOWN(ipa, PMD_SIZE),
ALIGN(ipa + 1, PMD_SIZE) - 1)){
unsigned long hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL);
page_size = transparent_hugepage_adjust(kvm, memslot, hva, &pfn, &ipa);
}
/* Pop a ppage from the pre-allocated list */
ppage = list_first_entry(&ppage_prealloc, struct kvm_pinned_page, list_node);
@@ -2014,6 +2019,107 @@ int pkvm_mem_abort_prefault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return pkvm_mem_abort(vcpu, fault_ipa, size, memslot);
}
/*
* Splitting is only expected on the back of a relinquish guest HVC in the pKVM case, while
* pkvm_pgtable_stage2_split() can be called with dirty logging.
*/
int __pkvm_pgtable_stage2_split(struct kvm_vcpu *vcpu, phys_addr_t ipa, size_t size)
{
struct list_head ppage_prealloc = LIST_HEAD_INIT(ppage_prealloc);
struct kvm_hyp_memcache *hyp_memcache = &vcpu->arch.stage2_mc;
struct kvm_pinned_page *ppage, *tmp;
struct kvm_memory_slot *memslot;
struct kvm *kvm = vcpu->kvm;
int idx, p, ret, nr_pages;
struct page **pages;
kvm_pfn_t pfn;
gfn_t gfn;
if (WARN_ON(!kvm->arch.pkvm.enabled))
return -EINVAL;
if (!IS_ALIGNED(ipa, PMD_SIZE) || size != PMD_SIZE)
return -EINVAL;
ret = topup_hyp_memcache_account(vcpu->kvm, hyp_memcache, 1, 0);
if (ret)
return ret;
/* We already have 1 pin on the Huge Page */
nr_pages = (size >> PAGE_SHIFT) - 1;
gfn = (ipa >> PAGE_SHIFT) + 1;
/* Pre-allocate kvm_pinned_page before acquiring the mmu_lock */
for (p = 0; p < nr_pages; p++) {
ppage = kzalloc(sizeof(*ppage), GFP_KERNEL_ACCOUNT);
if (!ppage) {
ret = -ENOMEM;
goto free_pinned_pages;
}
list_add(&ppage->list_node, &ppage_prealloc);
}
idx = srcu_read_lock(&vcpu->kvm->srcu);
memslot = gfn_to_memslot(vcpu->kvm, gfn);
ret = __pkvm_pin_user_pages(kvm, memslot, gfn, nr_pages, &pages);
if (ret)
goto unlock_srcu;
write_lock(&kvm->mmu_lock);
ppage = find_ppage(kvm, ipa);
if (!ppage) {
ret = -EPERM;
goto end;
} else if (!ppage->order) {
ret = 0;
goto end;
}
ret = kvm_call_hyp_nvhe(__pkvm_host_split_guest, ipa >> PAGE_SHIFT, size);
if (ret)
goto end;
ppage->order = 0;
ppage->pins = 1;
pfn = page_to_pfn(ppage->page) + 1;
ipa = ipa + PAGE_SIZE;
while (nr_pages--) {
/* Pop a ppage from the pre-allocated list */
ppage = list_first_entry(&ppage_prealloc, struct kvm_pinned_page, list_node);
list_del_init(&ppage->list_node);
ppage->page = pfn_to_page(pfn);
ppage->ipa = ipa;
ppage->order = 0;
ppage->pins = 1;
insert_ppage(kvm, ppage);
pfn += 1;
ipa += PAGE_SIZE;
}
end:
write_unlock(&kvm->mmu_lock);
if (ret)
unpin_user_pages(pages, nr_pages);
kfree(pages);
unlock_srcu:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_pinned_pages:
/* Free unused pre-allocated kvm_pinned_page */
list_for_each_entry_safe(ppage, tmp, &ppage_prealloc, list_node) {
list_del(&ppage->list_node);
kfree(ppage);
}
return ret;
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot,

View File

@@ -733,10 +733,10 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa)
ppage = kvm_pinned_pages_iter_first(&host_kvm->arch.pkvm.pinned_pages,
ipa, ipa + PAGE_SIZE - 1);
if (ppage) {
WARN_ON_ONCE(ppage->pins != 1);
if (ppage->pins)
ppage->pins--;
else
WARN_ON(1);
pins = ppage->pins;
if (!pins)
@@ -1113,7 +1113,7 @@ static struct module *pkvm_el2_mod_to_module(struct pkvm_el2_module *hyp_mod)
return container_of(arch, struct module, arch);
}
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
#ifdef CONFIG_PKVM_STACKTRACE
unsigned long pkvm_el2_mod_kern_va(unsigned long addr)
{
struct pkvm_el2_module *mod;
@@ -1485,7 +1485,7 @@ EXPORT_SYMBOL(__pkvm_register_el2_call);
void pkvm_el2_mod_frob_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings)
{
#ifdef CONFIG_PROTECTED_NVHE_FTRACE
#ifdef CONFIG_PKVM_FTRACE
int i;
for (i = 0; i < ehdr->e_shnum; i++) {
@@ -1841,8 +1841,7 @@ kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys
return NULL;
}
int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_mmu_memory_cache *mc)
int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc)
{
WARN_ON_ONCE(1);
return -EINVAL;

View File

@@ -206,7 +206,7 @@ static void hyp_dump_backtrace(unsigned long hyp_offset)
kvm_nvhe_dump_backtrace_end();
}
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
#ifdef CONFIG_PKVM_STACKTRACE
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
pkvm_stacktrace);
@@ -234,12 +234,12 @@ static void pkvm_dump_backtrace(unsigned long hyp_offset)
kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
kvm_nvhe_dump_backtrace_end();
}
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
#else /* !CONFIG_PKVM_STACKTRACE */
static void pkvm_dump_backtrace(unsigned long hyp_offset)
{
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PKVM_STACKTRACE\n");
}
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
#endif /* CONFIG_PKVM_STACKTRACE */
/*
* kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.

View File

@@ -72,6 +72,7 @@
#include <trace/hooks/fsnotify.h>
#include <trace/hooks/fuse.h>
#include <trace/hooks/ogki_honor.h>
#include <trace/hooks/suspend.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@@ -545,3 +546,15 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_swapmem_gather_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_swapmem_gather_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_gfp_zone_flags);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_readahead_gfp_mask);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_swapin_folio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_wp_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_anonymous_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_uprobes_replace_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_writepage);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmap_region);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_unmap_one);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_early_resume_begin);

View File

@@ -209,8 +209,6 @@ static int smmu_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
static int smmu_issue_cmds(struct hyp_arm_smmu_v3_device *smmu,
u64 *cmds, int n)
{
int idx = Q_IDX(smmu, smmu->cmdq_prod);
u64 *slot = smmu->cmdq_base + idx * CMDQ_ENT_DWORDS;
int i;
int ret;
u32 prod;
@@ -219,8 +217,14 @@ static int smmu_issue_cmds(struct hyp_arm_smmu_v3_device *smmu,
if (ret)
return ret;
for (i = 0; i < CMDQ_ENT_DWORDS * n; i++)
slot[i] = cpu_to_le64(cmds[i]);
for (i = 0; i < n; i++) {
int j;
int idx = Q_IDX(smmu, smmu->cmdq_prod + i);
u64 *slot = smmu->cmdq_base + idx * CMDQ_ENT_DWORDS;
for (j = 0; j < CMDQ_ENT_DWORDS; j++)
slot[j] = cpu_to_le64(cmds[i * CMDQ_ENT_DWORDS + j]);
}
prod = (Q_WRAP(smmu, smmu->cmdq_prod) | Q_IDX(smmu, smmu->cmdq_prod)) + n;
smmu->cmdq_prod = Q_OVF(smmu->cmdq_prod) | Q_WRAP(smmu, prod) | Q_IDX(smmu, prod);

View File

@@ -3,7 +3,7 @@
#define __PKVM_SMC_FILTER_HYPEVENTS_H_
#ifdef __KVM_NVHE_HYPERVISOR__
#include <trace.h>
#include <nvhe/trace.h>
#endif
HYP_EVENT(filtered_smc,

View File

@@ -11,7 +11,7 @@
#include "events.h"
#define HYP_EVENT_FILE ../../../../drivers/misc/pkvm-smc/pkvm/events.h
#include <define_events.h>
#include <nvhe/define_events.h>
const struct pkvm_module_ops *pkvm_ops;
bool permissive;

View File

@@ -209,7 +209,7 @@ static long gunyah_qtvm_attach(struct gunyah_vm *ghvm, struct gunyah_auth_desc *
list_add(&vm->list, &gunyah_qtvm_list);
mutex_unlock(&gunyah_qtvm_lock);
return -EINVAL;
return 0;
}
static void gunyah_qtvm_detach(struct gunyah_vm *ghvm)

View File

@@ -352475,6 +352475,15 @@ function {
parameter_id: 0x3e10b518
parameter_id: 0x6720d32f
}
function {
id: 0x9b2612dc
return_type_id: 0x6720d32f
parameter_id: 0x18bd6530
parameter_id: 0x2170d06d
parameter_id: 0x32bee099
parameter_id: 0x3360dff4
parameter_id: 0x27162aac
}
function {
id: 0x9b263487
return_type_id: 0x6720d32f
@@ -352515,6 +352524,16 @@ function {
parameter_id: 0x6720d32f
parameter_id: 0x11cfee5a
}
function {
id: 0x9b2837bd
return_type_id: 0x6720d32f
parameter_id: 0x18bd6530
parameter_id: 0x2170d06d
parameter_id: 0x0a134144
parameter_id: 0x33756485
parameter_id: 0x18bd6530
parameter_id: 0x6d7f5ff6
}
function {
id: 0x9b2899c2
return_type_id: 0x6720d32f
@@ -354358,6 +354377,13 @@ function {
parameter_id: 0x1a8d1bcb
parameter_id: 0x2a5a21f9
}
function {
id: 0x9b8b2a3f
return_type_id: 0x6720d32f
parameter_id: 0x18bd6530
parameter_id: 0x0a134144
parameter_id: 0x33756485
}
function {
id: 0x9b8bfa37
return_type_id: 0x6720d32f
@@ -354427,6 +354453,13 @@ function {
parameter_id: 0x18bd6530
parameter_id: 0x082be49e
}
function {
id: 0x9b8fab52
return_type_id: 0x6720d32f
parameter_id: 0x18bd6530
parameter_id: 0x0a134144
parameter_id: 0x2170d06d
}
function {
id: 0x9b91093a
return_type_id: 0x6720d32f
@@ -355256,6 +355289,13 @@ function {
parameter_id: 0x064d6086
parameter_id: 0x064d6086
}
function {
id: 0x9bb72f36
return_type_id: 0x6720d32f
parameter_id: 0x18bd6530
parameter_id: 0x064d6086
parameter_id: 0x06835e9c
}
function {
id: 0x9bb75fa6
return_type_id: 0x6720d32f
@@ -356759,6 +356799,13 @@ function {
parameter_id: 0x6720d32f
parameter_id: 0x6720d32f
}
function {
id: 0x9bdea720
return_type_id: 0x6720d32f
parameter_id: 0x18bd6530
parameter_id: 0x1d5bae2a
parameter_id: 0x11cfee5a
}
function {
id: 0x9bdf4df8
return_type_id: 0x6720d32f
@@ -375947,6 +375994,15 @@ elf_symbol {
type_id: 0x9b69f545
full_name: "__traceiter_android_vh_dma_heap_buffer_alloc_start"
}
elf_symbol {
id: 0x5e9ed424
name: "__traceiter_android_vh_do_anonymous_page"
is_defined: true
symbol_type: FUNCTION
crc: 0x451cad94
type_id: 0x9b8fab52
full_name: "__traceiter_android_vh_do_anonymous_page"
}
elf_symbol {
id: 0xb7d9e03a
name: "__traceiter_android_vh_do_async_mmap_readahead"
@@ -376010,6 +376066,15 @@ elf_symbol {
type_id: 0x9b3c0938
full_name: "__traceiter_android_vh_do_shrink_slab"
}
elf_symbol {
id: 0x54bc5972
name: "__traceiter_android_vh_do_swap_page"
is_defined: true
symbol_type: FUNCTION
crc: 0xd506cb7f
type_id: 0x9b2612dc
full_name: "__traceiter_android_vh_do_swap_page"
}
elf_symbol {
id: 0x9dbd7b92
name: "__traceiter_android_vh_do_wake_up_sync"
@@ -376019,6 +376084,15 @@ elf_symbol {
type_id: 0x9bf7b86e
full_name: "__traceiter_android_vh_do_wake_up_sync"
}
elf_symbol {
id: 0x2576f1c7
name: "__traceiter_android_vh_do_wp_page"
is_defined: true
symbol_type: FUNCTION
crc: 0xba178168
type_id: 0x9b2a7922
full_name: "__traceiter_android_vh_do_wp_page"
}
elf_symbol {
id: 0xf80eb64b
name: "__traceiter_android_vh_drain_all_pages_bypass"
@@ -376046,6 +376120,15 @@ elf_symbol {
type_id: 0x9bdc9aae
full_name: "__traceiter_android_vh_dup_task_struct"
}
elf_symbol {
id: 0x02c8f91b
name: "__traceiter_android_vh_early_resume_begin"
is_defined: true
symbol_type: FUNCTION
crc: 0x1e2afff7
type_id: 0x9bcd4ff7
full_name: "__traceiter_android_vh_early_resume_begin"
}
elf_symbol {
id: 0xdcaa59a3
name: "__traceiter_android_vh_enable_thermal_genl_check"
@@ -376910,6 +376993,15 @@ elf_symbol {
type_id: 0x9b52c397
full_name: "__traceiter_android_vh_mm_may_oom_exit"
}
elf_symbol {
id: 0xe44dacb1
name: "__traceiter_android_vh_mmap_region"
is_defined: true
symbol_type: FUNCTION
crc: 0x077369e0
type_id: 0x9b8b2a3f
full_name: "__traceiter_android_vh_mmap_region"
}
elf_symbol {
id: 0x5612c9d1
name: "__traceiter_android_vh_mmput"
@@ -377369,6 +377461,15 @@ elf_symbol {
type_id: 0x9bb1c4e2
full_name: "__traceiter_android_vh_reserve_highatomic_bypass"
}
elf_symbol {
id: 0xf10ce16f
name: "__traceiter_android_vh_resume_end"
is_defined: true
symbol_type: FUNCTION
crc: 0x1e2afff7
type_id: 0x9bcd4ff7
full_name: "__traceiter_android_vh_resume_end"
}
elf_symbol {
id: 0xdd585356
name: "__traceiter_android_vh_reweight_entity"
@@ -377747,6 +377848,15 @@ elf_symbol {
type_id: 0x9bc6b348
full_name: "__traceiter_android_vh_shmem_mod_swapped"
}
elf_symbol {
id: 0x70e34072
name: "__traceiter_android_vh_shmem_swapin_folio"
is_defined: true
symbol_type: FUNCTION
crc: 0xba178168
type_id: 0x9b2a7922
full_name: "__traceiter_android_vh_shmem_swapin_folio"
}
elf_symbol {
id: 0x53263f73
name: "__traceiter_android_vh_should_alloc_pages_retry"
@@ -377837,6 +377947,15 @@ elf_symbol {
type_id: 0x9b30973c
full_name: "__traceiter_android_vh_shrink_folio_list"
}
elf_symbol {
id: 0x709279fd
name: "__traceiter_android_vh_shrink_node_memcgs"
is_defined: true
symbol_type: FUNCTION
crc: 0x9110a90c
type_id: 0x9bdea720
full_name: "__traceiter_android_vh_shrink_node_memcgs"
}
elf_symbol {
id: 0x08ba7287
name: "__traceiter_android_vh_shrink_node_memcgs_bypass"
@@ -377972,6 +378091,15 @@ elf_symbol {
type_id: 0x9be885da
full_name: "__traceiter_android_vh_split_large_folio_bypass"
}
elf_symbol {
id: 0xb993d4e0
name: "__traceiter_android_vh_swap_writepage"
is_defined: true
symbol_type: FUNCTION
crc: 0x99b8b548
type_id: 0x9bb72f36
full_name: "__traceiter_android_vh_swap_writepage"
}
elf_symbol {
id: 0x445e3749
name: "__traceiter_android_vh_swapmem_gather_add_bypass"
@@ -378233,6 +378361,15 @@ elf_symbol {
type_id: 0x9b6eb253
full_name: "__traceiter_android_vh_try_fixup_sea"
}
elf_symbol {
id: 0xd9f43028
name: "__traceiter_android_vh_try_to_unmap_one"
is_defined: true
symbol_type: FUNCTION
crc: 0xcb6e6604
type_id: 0x9b2837bd
full_name: "__traceiter_android_vh_try_to_unmap_one"
}
elf_symbol {
id: 0xc0482ac3
name: "__traceiter_android_vh_tune_mmap_readaround"
@@ -378476,6 +378613,15 @@ elf_symbol {
type_id: 0x9bdaaaaa
full_name: "__traceiter_android_vh_uplink_send_msg"
}
elf_symbol {
id: 0x0266a7bc
name: "__traceiter_android_vh_uprobes_replace_page"
is_defined: true
symbol_type: FUNCTION
crc: 0xc3602e86
type_id: 0x9b222516
full_name: "__traceiter_android_vh_uprobes_replace_page"
}
elf_symbol {
id: 0x297c974d
name: "__traceiter_android_vh_use_vm_swappiness"
@@ -381716,6 +381862,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_dma_heap_buffer_alloc_start"
}
elf_symbol {
id: 0x325e746a
name: "__tracepoint_android_vh_do_anonymous_page"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_do_anonymous_page"
}
elf_symbol {
id: 0x066e59a8
name: "__tracepoint_android_vh_do_async_mmap_readahead"
@@ -381779,6 +381934,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_do_shrink_slab"
}
elf_symbol {
id: 0xeb9f1c78
name: "__tracepoint_android_vh_do_swap_page"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_do_swap_page"
}
elf_symbol {
id: 0xe2d7542c
name: "__tracepoint_android_vh_do_wake_up_sync"
@@ -381788,6 +381952,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_do_wake_up_sync"
}
elf_symbol {
id: 0x15374b6d
name: "__tracepoint_android_vh_do_wp_page"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_do_wp_page"
}
elf_symbol {
id: 0x8405c9a1
name: "__tracepoint_android_vh_drain_all_pages_bypass"
@@ -381815,6 +381988,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_dup_task_struct"
}
elf_symbol {
id: 0xe5deb919
name: "__tracepoint_android_vh_early_resume_begin"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_early_resume_begin"
}
elf_symbol {
id: 0x54b2cd01
name: "__tracepoint_android_vh_enable_thermal_genl_check"
@@ -382679,6 +382861,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_mm_may_oom_exit"
}
elf_symbol {
id: 0xb6da564f
name: "__tracepoint_android_vh_mmap_region"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_mmap_region"
}
elf_symbol {
id: 0x41c8d09b
name: "__tracepoint_android_vh_mmput"
@@ -383138,6 +383329,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_reserve_highatomic_bypass"
}
elf_symbol {
id: 0xd6904c6d
name: "__tracepoint_android_vh_resume_end"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_resume_end"
}
elf_symbol {
id: 0x3cdecaec
name: "__tracepoint_android_vh_reweight_entity"
@@ -383516,6 +383716,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_shmem_mod_swapped"
}
elf_symbol {
id: 0x6ed615c4
name: "__tracepoint_android_vh_shmem_swapin_folio"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_shmem_swapin_folio"
}
elf_symbol {
id: 0xd860c719
name: "__tracepoint_android_vh_should_alloc_pages_retry"
@@ -383606,6 +383815,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_shrink_folio_list"
}
elf_symbol {
id: 0x87db1583
name: "__tracepoint_android_vh_shrink_node_memcgs"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_shrink_node_memcgs"
}
elf_symbol {
id: 0xec82f33d
name: "__tracepoint_android_vh_shrink_node_memcgs_bypass"
@@ -383741,6 +383959,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_split_large_folio_bypass"
}
elf_symbol {
id: 0xd5f8f162
name: "__tracepoint_android_vh_swap_writepage"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_swap_writepage"
}
elf_symbol {
id: 0x052557d7
name: "__tracepoint_android_vh_swapmem_gather_add_bypass"
@@ -384002,6 +384229,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_try_fixup_sea"
}
elf_symbol {
id: 0xf9580976
name: "__tracepoint_android_vh_try_to_unmap_one"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_try_to_unmap_one"
}
elf_symbol {
id: 0x0f6579e9
name: "__tracepoint_android_vh_tune_mmap_readaround"
@@ -384245,6 +384481,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_uplink_send_msg"
}
elf_symbol {
id: 0x7d0336a2
name: "__tracepoint_android_vh_uprobes_replace_page"
is_defined: true
symbol_type: OBJECT
crc: 0x7c5aa8a7
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_vh_uprobes_replace_page"
}
elf_symbol {
id: 0x6440d1d7
name: "__tracepoint_android_vh_use_vm_swappiness"
@@ -450074,6 +450319,7 @@ interface {
symbol_id: 0x5f33b294
symbol_id: 0x94b48bb0
symbol_id: 0xbcd1a27c
symbol_id: 0x5e9ed424
symbol_id: 0xb7d9e03a
symbol_id: 0xd593b3ef
symbol_id: 0xb5020a7f
@@ -450081,10 +450327,13 @@ interface {
symbol_id: 0xea6452e1
symbol_id: 0x1cc3aec5
symbol_id: 0xb12728da
symbol_id: 0x54bc5972
symbol_id: 0x9dbd7b92
symbol_id: 0x2576f1c7
symbol_id: 0xf80eb64b
symbol_id: 0x42312ccc
symbol_id: 0xf432d1c9
symbol_id: 0x02c8f91b
symbol_id: 0xdcaa59a3
symbol_id: 0xf586d5b6
symbol_id: 0x9073c9ee
@@ -450181,6 +450430,7 @@ interface {
symbol_id: 0x29c67d40
symbol_id: 0x6f5c8275
symbol_id: 0xf182fb15
symbol_id: 0xe44dacb1
symbol_id: 0x5612c9d1
symbol_id: 0xa77effd1
symbol_id: 0x306a6019
@@ -450232,6 +450482,7 @@ interface {
symbol_id: 0xe064853d
symbol_id: 0xe2d75052
symbol_id: 0xd0707ed1
symbol_id: 0xf10ce16f
symbol_id: 0xdd585356
symbol_id: 0xa8cef421
symbol_id: 0xd6100bd9
@@ -450274,6 +450525,7 @@ interface {
symbol_id: 0x98a943de
symbol_id: 0xb6a93669
symbol_id: 0x7a1f2e3e
symbol_id: 0x70e34072
symbol_id: 0x53263f73
symbol_id: 0xe828d15b
symbol_id: 0xda8424a2
@@ -450284,6 +450536,7 @@ interface {
symbol_id: 0xd83423df
symbol_id: 0x6d9f8b13
symbol_id: 0x21e28aab
symbol_id: 0x709279fd
symbol_id: 0x08ba7287
symbol_id: 0xdfd06b97
symbol_id: 0x69c3749b
@@ -450299,6 +450552,7 @@ interface {
symbol_id: 0x08f4cbb0
symbol_id: 0x57cb67b6
symbol_id: 0x7068c1fb
symbol_id: 0xb993d4e0
symbol_id: 0x445e3749
symbol_id: 0x3f14ea30
symbol_id: 0x64e397a9
@@ -450328,6 +450582,7 @@ interface {
symbol_id: 0xe334c57d
symbol_id: 0xfeff2e7f
symbol_id: 0xeac5f4dd
symbol_id: 0xd9f43028
symbol_id: 0xc0482ac3
symbol_id: 0x351ae4d8
symbol_id: 0x39155e73
@@ -450355,6 +450610,7 @@ interface {
symbol_id: 0x6a8145ff
symbol_id: 0x008832a3
symbol_id: 0xc2a5e712
symbol_id: 0x0266a7bc
symbol_id: 0x297c974d
symbol_id: 0x0002a5ea
symbol_id: 0xcbec9d66
@@ -450715,6 +450971,7 @@ interface {
symbol_id: 0xe829284e
symbol_id: 0x30442e2e
symbol_id: 0x9d32ef5e
symbol_id: 0x325e746a
symbol_id: 0x066e59a8
symbol_id: 0x9fe99d05
symbol_id: 0xa628b8f1
@@ -450722,10 +450979,13 @@ interface {
symbol_id: 0xff7bbbff
symbol_id: 0x82ce823f
symbol_id: 0x474d211c
symbol_id: 0xeb9f1c78
symbol_id: 0xe2d7542c
symbol_id: 0x15374b6d
symbol_id: 0x8405c9a1
symbol_id: 0x988719fa
symbol_id: 0x732a182b
symbol_id: 0xe5deb919
symbol_id: 0x54b2cd01
symbol_id: 0xe7584e1c
symbol_id: 0x50b74da0
@@ -450822,6 +451082,7 @@ interface {
symbol_id: 0xddcff44a
symbol_id: 0x0f593caf
symbol_id: 0x47bcd15f
symbol_id: 0xb6da564f
symbol_id: 0x41c8d09b
symbol_id: 0xe2ad11db
symbol_id: 0x65023f2f
@@ -450873,6 +451134,7 @@ interface {
symbol_id: 0xf540e9a7
symbol_id: 0x13b2fb38
symbol_id: 0xa8bc195b
symbol_id: 0xd6904c6d
symbol_id: 0x3cdecaec
symbol_id: 0xcad24ea3
symbol_id: 0xfebada43
@@ -450915,6 +451177,7 @@ interface {
symbol_id: 0x5cfdecb8
symbol_id: 0x0d23f467
symbol_id: 0x392adba0
symbol_id: 0x6ed615c4
symbol_id: 0xd860c719
symbol_id: 0x5b6bc3a9
symbol_id: 0x88acddf0
@@ -450925,6 +451188,7 @@ interface {
symbol_id: 0xc62d5cd5
symbol_id: 0x8712dd01
symbol_id: 0xbd6dfce1
symbol_id: 0x87db1583
symbol_id: 0xec82f33d
symbol_id: 0x3e70324d
symbol_id: 0xc8a7ac69
@@ -450940,6 +451204,7 @@ interface {
symbol_id: 0x547e3912
symbol_id: 0xb11763a0
symbol_id: 0x02bb68bd
symbol_id: 0xd5f8f162
symbol_id: 0x052557d7
symbol_id: 0x6fb8113a
symbol_id: 0xab09fdb7
@@ -450969,6 +451234,7 @@ interface {
symbol_id: 0xfcee39f3
symbol_id: 0x69721329
symbol_id: 0x0896a337
symbol_id: 0xf9580976
symbol_id: 0x0f6579e9
symbol_id: 0xfa2b4906
symbol_id: 0x49b955bd
@@ -450996,6 +451262,7 @@ interface {
symbol_id: 0xc09d36c9
symbol_id: 0x14b85591
symbol_id: 0x56b11d7c
symbol_id: 0x7d0336a2
symbol_id: 0x6440d1d7
symbol_id: 0xaf3a3494
symbol_id: 0x55476a7c

View File

@@ -1989,6 +1989,7 @@
regmap_async_complete
regmap_bulk_read
regmap_bulk_write
regmap_check_range_table
regmap_field_read
regmap_field_update_bits_base
regmap_get_device
@@ -2269,6 +2270,8 @@
snd_soc_set_runtime_hwparams
snd_soc_unregister_card
snd_soc_unregister_component
snd_usb_register_platform_ops
snd_usb_unregister_platform_ops
snprintf
soc_device_register
sock_alloc_send_pskb
@@ -2477,6 +2480,7 @@
__traceiter_android_vh_check_new_page
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_early_resume_begin
__traceiter_android_vh_enable_thermal_genl_check
__traceiter_android_vh_filemap_get_folio
__traceiter_android_vh_free_pages_prepare_init
@@ -2485,6 +2489,7 @@
__traceiter_android_vh_mm_compaction_end
__traceiter_android_vh_mm_kcompactd_cpu_online
__traceiter_android_vh_post_alloc_hook
__traceiter_android_vh_resume_end
__traceiter_android_vh_rmqueue
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_si_meminfo_adjust
@@ -2540,6 +2545,7 @@
__tracepoint_android_vh_check_new_page
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_early_resume_begin
__tracepoint_android_vh_enable_thermal_genl_check
__tracepoint_android_vh_filemap_get_folio
__tracepoint_android_vh_free_pages_prepare_init
@@ -2548,6 +2554,7 @@
__tracepoint_android_vh_mm_compaction_end
__tracepoint_android_vh_mm_kcompactd_cpu_online
__tracepoint_android_vh_post_alloc_hook
__tracepoint_android_vh_resume_end
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_si_meminfo_adjust
@@ -2804,6 +2811,7 @@
virtqueue_kick
virtqueue_kick_prepare
virtqueue_notify
virtqueue_set_dma_premapped
vmalloc_noprof
vmalloc_nr_pages
vmalloc_to_page
@@ -2867,14 +2875,21 @@
xas_pause
__xa_store
xa_store
__xfrm_state_destroy
xfrm_state_alloc
xfrm_state_delete
__xfrm_state_destroy
xfrm_state_lookup_byspi
xfrm_state_migrate
xfrm_stateonly_find
xhci_gen_setup
xhci_get_endpoint_index
xhci_init_driver
xhci_resume
xhci_run
xhci_sideband_add_endpoint
xhci_sideband_create_interrupter
xhci_sideband_register
xhci_sideband_remove_endpoint
xhci_sideband_remove_interrupter
xhci_sideband_unregister
xhci_suspend

View File

@@ -113,9 +113,12 @@
__traceiter_android_vh_do_send_sig_info
__traceiter_android_vh_clear_rwsem_reader_owned
__traceiter_android_vh_clear_rwsem_writer_owned
__traceiter_android_vh_do_anonymous_page
__traceiter_android_vh_do_async_mmap_readahead
__traceiter_android_vh_do_group_exit
__traceiter_android_vh_do_new_mount_fc
__traceiter_android_vh_do_swap_page
__traceiter_android_vh_do_wp_page
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_f2fs_file_open
__traceiter_android_vh_f2fs_ra_op_flags
@@ -131,6 +134,7 @@
__traceiter_android_vh_lruvec_add_folio
__traceiter_android_vh_lruvec_del_folio
__traceiter_android_vh_mglru_aging_bypass
__traceiter_android_vh_mmap_region
__traceiter_android_vh_mutex_unlock_slowpath
__traceiter_android_vh_mutex_unlock_slowpath_before_wakeq
__traceiter_android_vh_mutex_wait_finish
@@ -148,18 +152,23 @@
__traceiter_android_vh_rwsem_write_wait_start
__traceiter_android_vh_rwsem_read_trylock_failed
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_shmem_swapin_folio
__traceiter_android_vh_shrink_folio_list
__traceiter_android_vh_shrink_node_memcgs
__traceiter_android_vh_shrink_node_memcgs_bypass
__traceiter_android_vh_sk_alloc
__traceiter_android_vh_sk_free
__traceiter_android_vh_swapmem_gather_add_bypass
__traceiter_android_vh_swapmem_gather_finish
__traceiter_android_vh_swapmem_gather_init
__traceiter_android_vh_swap_writepage
__traceiter_android_vh_percpu_rwsem_down_read
__traceiter_android_vh_percpu_rwsem_up_write
__traceiter_android_vh_sync_txn_recvd
__traceiter_android_vh_tcp_rtt_estimator
__traceiter_android_vh_try_to_unmap_one
__traceiter_android_vh_udp_enqueue_schedule_skb
__traceiter_android_vh_uprobes_replace_page
__traceiter_cpu_frequency_limits
__traceiter_sched_switch
__traceiter_sched_waking
@@ -213,9 +222,12 @@
__tracepoint_android_vh_do_send_sig_info
__tracepoint_android_vh_clear_rwsem_reader_owned
__tracepoint_android_vh_clear_rwsem_writer_owned
__tracepoint_android_vh_do_anonymous_page
__tracepoint_android_vh_do_async_mmap_readahead
__tracepoint_android_vh_do_group_exit
__tracepoint_android_vh_do_new_mount_fc
__tracepoint_android_vh_do_swap_page
__tracepoint_android_vh_do_wp_page
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_f2fs_file_open
__tracepoint_android_vh_f2fs_ra_op_flags
@@ -231,6 +243,7 @@
__tracepoint_android_vh_lruvec_add_folio
__tracepoint_android_vh_lruvec_del_folio
__tracepoint_android_vh_mglru_aging_bypass
__tracepoint_android_vh_mmap_region
__tracepoint_android_vh_mutex_unlock_slowpath
__tracepoint_android_vh_mutex_unlock_slowpath_before_wakeq
__tracepoint_android_vh_mutex_wait_finish
@@ -248,18 +261,23 @@
__tracepoint_android_vh_rwsem_write_wait_start
__tracepoint_android_vh_rwsem_read_trylock_failed
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_shmem_swapin_folio
__tracepoint_android_vh_shrink_folio_list
__tracepoint_android_vh_shrink_node_memcgs
__tracepoint_android_vh_shrink_node_memcgs_bypass
__tracepoint_android_vh_sk_alloc
__tracepoint_android_vh_sk_free
__tracepoint_android_vh_swapmem_gather_add_bypass
__tracepoint_android_vh_swapmem_gather_finish
__tracepoint_android_vh_swapmem_gather_init
__tracepoint_android_vh_swap_writepage
__tracepoint_android_vh_sync_txn_recvd
__tracepoint_android_vh_tcp_rtt_estimator
__tracepoint_android_vh_try_to_unmap_one
__tracepoint_android_vh_udp_enqueue_schedule_skb
__tracepoint_android_vh_percpu_rwsem_down_read
__tracepoint_android_vh_percpu_rwsem_up_write
__tracepoint_android_vh_uprobes_replace_page
__tracepoint_cpu_frequency_limits
__tracepoint_sched_switch
__tracepoint_sched_waking

View File

@@ -497,6 +497,32 @@ DECLARE_HOOK(android_vh_oom_swapmem_gather_init,
DECLARE_HOOK(android_vh_oom_swapmem_gather_finish,
TP_PROTO(struct mm_struct *mm),
TP_ARGS(mm));
DECLARE_HOOK(android_vh_do_anonymous_page,
TP_PROTO(struct vm_area_struct *vma, struct folio *folio),
TP_ARGS(vma, folio));
DECLARE_HOOK(android_vh_do_swap_page,
TP_PROTO(struct folio *folio, pte_t *pte, struct vm_fault *vmf,
swp_entry_t entry),
TP_ARGS(folio, pte, vmf, entry));
DECLARE_HOOK(android_vh_do_wp_page,
TP_PROTO(struct folio *folio),
TP_ARGS(folio));
DECLARE_HOOK(android_vh_uprobes_replace_page,
TP_PROTO(struct folio *new_folio, struct folio *old_folio),
TP_ARGS(new_folio, old_folio));
DECLARE_HOOK(android_vh_shmem_swapin_folio,
TP_PROTO(struct folio *folio),
TP_ARGS(folio));
DECLARE_HOOK(android_vh_swap_writepage,
TP_PROTO(unsigned long *sis_flags, struct page *page),
TP_ARGS(sis_flags, page));
DECLARE_HOOK(android_vh_mmap_region,
TP_PROTO(struct vm_area_struct *vma, unsigned long addr),
TP_ARGS(vma, addr));
DECLARE_HOOK(android_vh_try_to_unmap_one,
TP_PROTO(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg, bool ret),
TP_ARGS(folio, vma, addr, arg, ret));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM suspend
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_SUSPEND_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_SUSPEND_H
#include <trace/hooks/vendor_hooks.h>
DECLARE_HOOK(android_vh_resume_begin,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_resume_end,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_HOOK(android_vh_early_resume_begin,
TP_PROTO(void *unused),
TP_ARGS(unused));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SUSPEND_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -101,7 +101,9 @@ DECLARE_HOOK(android_vh_direct_reclaim_end,
DECLARE_HOOK(android_vh_throttle_direct_reclaim_bypass,
TP_PROTO(bool *bypass),
TP_ARGS(bypass));
DECLARE_HOOK(android_vh_shrink_node_memcgs,
TP_PROTO(struct mem_cgroup *memcg, bool *skip),
TP_ARGS(memcg, skip));
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -29,6 +29,9 @@
#include <linux/uprobes.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
@@ -202,6 +205,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
folio_get(new_folio);
folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, vma);
trace_android_vh_uprobes_replace_page(new_folio, old_folio);
} else
/* no new page, just dec_mm_counter for old_page */
dec_mm_counter(mm, MM_ANONPAGES);

View File

@@ -31,6 +31,7 @@
#include <linux/compiler.h>
#include <linux/moduleparam.h>
#include <linux/wakeup_reason.h>
#include <trace/hooks/suspend.h>
#include "power.h"
@@ -466,6 +467,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
error = suspend_ops->enter(state);
trace_suspend_resume(TPS("machine_suspend"),
state, false);
trace_android_vh_early_resume_begin(NULL);
} else if (*wakeup) {
error = -EBUSY;
}
@@ -534,6 +536,7 @@ int suspend_devices_and_enter(suspend_state_t state)
} while (!error && !wakeup && platform_suspend_again(state));
Resume_devices:
trace_android_vh_resume_begin(NULL);
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
@@ -544,6 +547,7 @@ int suspend_devices_and_enter(suspend_state_t state)
Close:
platform_resume_end(state);
pm_suspend_target_state = PM_SUSPEND_ON;
trace_android_vh_resume_end(NULL);
return error;
Recover_platform:

View File

@@ -5288,8 +5288,9 @@ __rb_get_reader_page_from_writer(struct ring_buffer_per_cpu *cpu_buffer)
}
prev_reader = cpu_buffer->meta_page->reader.id;
if (cpu_buffer->meta_page->reader.id == cpu_buffer->reader_page->id)
WARN_ON(cpu_buffer->writer->get_reader_page(cpu_buffer->cpu));
WARN_ON(cpu_buffer->writer->get_reader_page(cpu_buffer->cpu));
/* nr_pages doesn't include the reader page */
if (cpu_buffer->meta_page->reader.id > cpu_buffer->nr_pages) {
WARN_ON(1);
@@ -5303,7 +5304,8 @@ __rb_get_reader_page_from_writer(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
cpu_buffer->lost_events = cpu_buffer->meta_page->reader.lost_events;
WARN_ON(prev_reader == cpu_buffer->meta_page->reader.id);
WARN_ON(!IS_ENABLED(CONFIG_PKVM_DUMP_TRACE_ON_PANIC) &&
prev_reader == cpu_buffer->meta_page->reader.id);
if (!rb_page_size(cpu_buffer->reader_page))
return NULL;

View File

@@ -3739,6 +3739,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
return wp_page_shared(vmf, folio);
}
trace_android_vh_do_wp_page(folio);
/*
* Private mapping: create an exclusive anonymous page copy if reuse
* is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
@@ -4564,6 +4566,7 @@ check_folio:
pte = pte_mksoft_dirty(pte);
if (pte_swp_uffd_wp(vmf->orig_pte))
pte = pte_mkuffd_wp(pte);
trace_android_vh_do_swap_page(folio, &pte, vmf, entry);
/*
* Same logic as in do_wp_page(); however, optimize for pages that are
@@ -4828,6 +4831,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
*/
__folio_mark_uptodate(folio);
trace_android_vh_do_anonymous_page(vma, folio);
entry = mk_pte(&folio->page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)

View File

@@ -57,6 +57,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/mmap.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#include "internal.h"
@@ -1580,6 +1582,8 @@ expanded:
vma_set_page_prot(vma);
trace_android_vh_mmap_region(vma, addr);
return addr;
unmap_and_free_file_vma:

View File

@@ -27,6 +27,9 @@
#include <linux/zswap.h>
#include "swap.h"
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
static void __end_swap_bio_write(struct bio *bio)
{
struct folio *folio = bio_first_folio_all(bio);
@@ -452,6 +455,7 @@ static void swap_writepage_bdev_async(struct folio *folio,
void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
{
struct swap_info_struct *sis = swp_swap_info(folio->swap);
unsigned long sis_flags = 0;
VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
/*
@@ -459,14 +463,16 @@ void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
if (data_race(sis->flags & SWP_FS_OPS))
sis_flags = data_race(sis->flags);
trace_android_vh_swap_writepage(&sis_flags, &folio->page);
if (sis_flags & SWP_FS_OPS)
swap_writepage_fs(folio, wbc);
/*
* ->flags can be updated non-atomicially (scan_swap_map_slots),
* but that will never affect __SWP_WRITE_SYNCHRONOUS_IO, so the data_race
* is safe.
*/
else if (data_race(sis->flags & __SWP_WRITE_SYNCHRONOUS_IO))
else if (sis_flags & __SWP_WRITE_SYNCHRONOUS_IO)
swap_writepage_bdev_sync(folio, wbc, sis);
else
swap_writepage_bdev_async(folio, wbc, sis);

View File

@@ -82,6 +82,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/tlb.h>
#include <trace/events/migrate.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
@@ -1957,6 +1959,7 @@ walk_done:
}
mmu_notifier_invalidate_range_end(&range);
trace_android_vh_try_to_unmap_one(folio, vma, address, arg, ret);
return ret;
}

View File

@@ -2178,6 +2178,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
/* We have to do this with folio locked to prevent races */
folio_lock(folio);
trace_android_vh_shmem_swapin_folio(folio);
if (!folio_test_swapcache(folio) ||
folio->swap.val != swap.val ||
!shmem_confirm_swap(mapping, index, swap)) {

View File

@@ -6036,6 +6036,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
bool bypass = false;
bool skip = false;
/*
* This loop can become CPU-bound when target memcgs
@@ -6045,6 +6046,10 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
*/
cond_resched();
trace_android_vh_shrink_node_memcgs(memcg, &skip);
if (skip)
continue;
mem_cgroup_calculate_protection(target_memcg, memcg);
if (mem_cgroup_below_min(target_memcg, memcg)) {