ANDROID: KVM: arm64: Add PKVM_DUMP_TRACE_ON_PANIC

If the system panics and hyp_trace_printk is on, dump what can be
dumped, that is the current reader page and the tail.

Bug: 357781595
Bug: 391974813
Change-Id: Ib77c4c6feaddac6eff35280dc59b35117044ebe7
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
This commit is contained in:
Vincent Donnefort
2025-01-24 15:05:03 +00:00
parent a3f61dba68
commit 9cfa11e5f1
5 changed files with 96 additions and 3 deletions

View File

@@ -63,6 +63,14 @@ config PKVM_SELFTESTS
If unsure, say N.
config PKVM_DUMP_TRACE_ON_PANIC
bool "Dump Protected KVM hypervisor trace buffer on panic"
help
Say Y here to dump the content of the pKVM hypervisor on either
hypervisor or host panic. In the case of a hyp panic, only the tail
page is dumped. It only has an effect if the command line option
hyp_trace_printk is set.
config PROTECTED_NVHE_STACKTRACE
bool "Protected KVM hypervisor stacktraces"
depends on NVHE_EL2_DEBUG

View File

@@ -37,6 +37,7 @@ void __pkvm_teardown_tracing(void);
int __pkvm_enable_tracing(bool enable);
int __pkvm_reset_tracing(unsigned int cpu);
int __pkvm_swap_reader_tracing(unsigned int cpu);
void __pkvm_panic_tracing(void);
int __pkvm_enable_event(unsigned short id, bool enable);
extern struct hyp_printk_fmt __hyp_printk_fmts_start[];
@@ -127,6 +128,7 @@ static inline void __pkvm_teardown_tracing(void) { }
static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
static inline int __pkvm_reset_tracing(unsigned int cpu) { return -ENODEV; }
static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; }
static inline void __pkvm_panic_tracing(void) { }
static inline int __pkvm_enable_event(unsigned short id, bool enable) { return -ENODEV; }
#define trace_hyp_printk(fmt, ...)

View File

@@ -524,6 +524,8 @@ asmlinkage void __noreturn hyp_panic(void)
kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
_THIS_IP_);
__pkvm_panic_tracing();
__hyp_do_panic(host_ctxt, spsr, elr, par);
unreachable();
}

View File

@@ -41,6 +41,7 @@ struct hyp_rb_per_cpu {
#define HYP_RB_UNAVAILABLE 0
#define HYP_RB_READY 1
#define HYP_RB_WRITING 2
#define HYP_RB_PANIC 3
DEFINE_PER_CPU(struct hyp_rb_per_cpu, trace_rb);
DEFINE_HYP_SPINLOCK(trace_rb_lock);
@@ -254,7 +255,7 @@ void *tracing_reserve_entry(unsigned long length)
struct ring_buffer_event *rb_event;
if (atomic_cmpxchg(&cpu_buffer->status, HYP_RB_READY, HYP_RB_WRITING)
== HYP_RB_UNAVAILABLE)
!= HYP_RB_READY)
return NULL;
rb_event = rb_reserve_next(cpu_buffer, length);
@@ -321,12 +322,15 @@ static int rb_cpu_disable_writing(struct hyp_rb_per_cpu *cpu_buffer)
static int rb_cpu_enable_writing(struct hyp_rb_per_cpu *cpu_buffer)
{
int prev_status;
if (!rb_cpu_loaded(cpu_buffer))
return -ENODEV;
atomic_cmpxchg(&cpu_buffer->status, HYP_RB_UNAVAILABLE, HYP_RB_READY);
prev_status = atomic_cmpxchg(&cpu_buffer->status, HYP_RB_UNAVAILABLE,
HYP_RB_READY);
return 0;
return prev_status == HYP_RB_UNAVAILABLE ? 0 : -ENODEV;
}
static int rb_cpu_reset(struct hyp_rb_per_cpu *cpu_buffer)
@@ -614,3 +618,49 @@ int __pkvm_reset_tracing(unsigned int cpu)
return ret;
}
void __pkvm_panic_tracing(void)
{
#ifdef CONFIG_PKVM_DUMP_TRACE_ON_PANIC
int cpu;
hyp_spin_lock(&trace_rb_lock);
for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu);
int prev_status, skipped = 0;
if (!rb_cpu_loaded(cpu_buffer))
continue;
do {
prev_status = atomic_cmpxchg_acquire(&cpu_buffer->status, HYP_RB_READY,
HYP_RB_PANIC);
} while (prev_status == HYP_RB_WRITING);
/* Allow the host to read the very last events */
while (cpu_buffer->tail_page != cpu_buffer->reader_page) {
struct hyp_buffer_page *prev_reader = cpu_buffer->reader_page;
if (rb_swap_reader_page(cpu_buffer))
break;
/*
* The reader is still on the previous reader page and events there can
* still be read.
*/
if (++skipped == 1)
continue;
WRITE_ONCE(cpu_buffer->meta->overrun,
cpu_buffer->meta->overrun + prev_reader->entries);
cpu_buffer->meta->reader.lost_events = cpu_buffer->meta->overrun -
cpu_buffer->last_overrun;
WRITE_ONCE(meta_pages_lost(cpu_buffer->meta),
meta_pages_lost(cpu_buffer->meta) + 1);
}
}
hyp_spin_unlock(&trace_rb_lock);
#endif
}

View File

@@ -6,6 +6,7 @@
#include <linux/arm-smccc.h>
#include <linux/percpu-defs.h>
#include <linux/panic_notifier.h>
#include <linux/trace_events.h>
#include <linux/tracefs.h>
@@ -55,6 +56,8 @@ static struct hyp_trace_buffer {
static size_t hyp_trace_buffer_size = 7 << 10;
static bool hyp_trace_panic __read_mostly;
/* Number of pages the ring-buffer requires to accommodate for size */
#define NR_PAGES(size) \
((PAGE_ALIGN(size) >> PAGE_SHIFT) + 1)
@@ -174,6 +177,10 @@ static void hyp_clock_wait(struct hyp_trace_buffer *hyp_buffer)
static int __get_reader_page(int cpu)
{
/* we'd better no try to call the hyp if it has panic'ed */
if (hyp_trace_panic)
return 0;
return kvm_call_hyp_nvhe(__pkvm_swap_reader_tracing, cpu);
}
@@ -949,6 +956,28 @@ static void hyp_trace_buffer_printk(struct hyp_trace_buffer *hyp_buffer)
}
}
static int hyp_trace_panic_handler(struct notifier_block *self,
unsigned long ev, void *v)
{
#ifdef CONFIG_PKVM_DUMP_TRACE_ON_PANIC
if (!hyp_trace_buffer_loaded(&hyp_trace_buffer) ||
!hyp_trace_buffer.printk_iter)
return NOTIFY_DONE;
if (!strncmp("HYP panic:", v, 10))
hyp_trace_panic = true;
ring_buffer_poll_writer(hyp_trace_buffer.trace_buffer, RING_BUFFER_ALL_CPUS);
hyp_trace_buffer_printk(&hyp_trace_buffer);
#endif
return NOTIFY_DONE;
}
static struct notifier_block hyp_trace_panic_notifier = {
.notifier_call = hyp_trace_panic_handler,
.priority = INT_MAX - 1,
};
void hyp_trace_enable_event_early(void)
{
if (hyp_event_early_probe()) {
@@ -1026,5 +1055,7 @@ int hyp_trace_init_tracefs(void)
hyp_trace_buffer_printk_init(&hyp_trace_buffer))
pr_warn("Failed to init ht_printk");
atomic_notifier_chain_register(&panic_notifier_list, &hyp_trace_panic_notifier);
return 0;
}