diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index a8b57ba825db..1378f2dc4601 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -63,6 +63,14 @@ config PKVM_SELFTESTS If unsure, say N. +config PKVM_DUMP_TRACE_ON_PANIC + bool "Dump Protected KVM hypervisor trace buffer on panic" + help + Say Y here to dump the content of the pKVM hypervisor on either + hypervisor or host panic. In the case of a hyp panic, only the tail + page is dumped. It only has an effect if the command line option + hyp_trace_printk is set. + config PROTECTED_NVHE_STACKTRACE bool "Protected KVM hypervisor stacktraces" depends on NVHE_EL2_DEBUG diff --git a/arch/arm64/kvm/hyp/include/module/nvhe/trace.h b/arch/arm64/kvm/hyp/include/module/nvhe/trace.h index 28b68f43895d..bec5ef2ed28e 100644 --- a/arch/arm64/kvm/hyp/include/module/nvhe/trace.h +++ b/arch/arm64/kvm/hyp/include/module/nvhe/trace.h @@ -37,6 +37,7 @@ void __pkvm_teardown_tracing(void); int __pkvm_enable_tracing(bool enable); int __pkvm_reset_tracing(unsigned int cpu); int __pkvm_swap_reader_tracing(unsigned int cpu); +void __pkvm_panic_tracing(void); int __pkvm_enable_event(unsigned short id, bool enable); extern struct hyp_printk_fmt __hyp_printk_fmts_start[]; @@ -127,6 +128,7 @@ static inline void __pkvm_teardown_tracing(void) { } static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; } static inline int __pkvm_reset_tracing(unsigned int cpu) { return -ENODEV; } static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; } +static inline void __pkvm_panic_tracing(void) { } static inline int __pkvm_enable_event(unsigned short id, bool enable) { return -ENODEV; } #define trace_hyp_printk(fmt, ...) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index bcc85cc81799..db397b6a112c 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -524,6 +524,8 @@ asmlinkage void __noreturn hyp_panic(void) kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0), _THIS_IP_); + __pkvm_panic_tracing(); + __hyp_do_panic(host_ctxt, spsr, elr, par); unreachable(); } diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c index 3799dd91fd2c..4b993c992ea7 100644 --- a/arch/arm64/kvm/hyp/nvhe/trace.c +++ b/arch/arm64/kvm/hyp/nvhe/trace.c @@ -41,6 +41,7 @@ struct hyp_rb_per_cpu { #define HYP_RB_UNAVAILABLE 0 #define HYP_RB_READY 1 #define HYP_RB_WRITING 2 +#define HYP_RB_PANIC 3 DEFINE_PER_CPU(struct hyp_rb_per_cpu, trace_rb); DEFINE_HYP_SPINLOCK(trace_rb_lock); @@ -254,7 +255,7 @@ void *tracing_reserve_entry(unsigned long length) struct ring_buffer_event *rb_event; if (atomic_cmpxchg(&cpu_buffer->status, HYP_RB_READY, HYP_RB_WRITING) - == HYP_RB_UNAVAILABLE) + != HYP_RB_READY) return NULL; rb_event = rb_reserve_next(cpu_buffer, length); @@ -321,12 +322,15 @@ static int rb_cpu_disable_writing(struct hyp_rb_per_cpu *cpu_buffer) static int rb_cpu_enable_writing(struct hyp_rb_per_cpu *cpu_buffer) { + int prev_status; + if (!rb_cpu_loaded(cpu_buffer)) return -ENODEV; - atomic_cmpxchg(&cpu_buffer->status, HYP_RB_UNAVAILABLE, HYP_RB_READY); + prev_status = atomic_cmpxchg(&cpu_buffer->status, HYP_RB_UNAVAILABLE, + HYP_RB_READY); - return 0; + return prev_status == HYP_RB_UNAVAILABLE ? 0 : -ENODEV; } static int rb_cpu_reset(struct hyp_rb_per_cpu *cpu_buffer) @@ -614,3 +618,49 @@ int __pkvm_reset_tracing(unsigned int cpu) return ret; } + +void __pkvm_panic_tracing(void) +{ +#ifdef CONFIG_PKVM_DUMP_TRACE_ON_PANIC + int cpu; + + hyp_spin_lock(&trace_rb_lock); + + for (cpu = 0; cpu < hyp_nr_cpus; cpu++) { + struct hyp_rb_per_cpu *cpu_buffer = per_cpu_ptr(&trace_rb, cpu); + int prev_status, skipped = 0; + + if (!rb_cpu_loaded(cpu_buffer)) + continue; + + do { + prev_status = atomic_cmpxchg_acquire(&cpu_buffer->status, HYP_RB_READY, + HYP_RB_PANIC); + } while (prev_status == HYP_RB_WRITING); + + /* Allow the host to read the very last events */ + while (cpu_buffer->tail_page != cpu_buffer->reader_page) { + struct hyp_buffer_page *prev_reader = cpu_buffer->reader_page; + + if (rb_swap_reader_page(cpu_buffer)) + break; + + /* + * The reader is still on the previous reader page and events there can + * still be read. + */ + if (++skipped == 1) + continue; + + WRITE_ONCE(cpu_buffer->meta->overrun, + cpu_buffer->meta->overrun + prev_reader->entries); + cpu_buffer->meta->reader.lost_events = cpu_buffer->meta->overrun - + cpu_buffer->last_overrun; + WRITE_ONCE(meta_pages_lost(cpu_buffer->meta), + meta_pages_lost(cpu_buffer->meta) + 1); + } + } + + hyp_spin_unlock(&trace_rb_lock); +#endif +} diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c index 327f525ab9e0..06ebdf11a930 100644 --- a/arch/arm64/kvm/hyp_trace.c +++ b/arch/arm64/kvm/hyp_trace.c @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -55,6 +56,8 @@ static struct hyp_trace_buffer { static size_t hyp_trace_buffer_size = 7 << 10; +static bool hyp_trace_panic __read_mostly; + /* Number of pages the ring-buffer requires to accommodate for size */ #define NR_PAGES(size) \ ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 1) @@ -174,6 +177,10 @@ static void hyp_clock_wait(struct hyp_trace_buffer *hyp_buffer) static int __get_reader_page(int cpu) { + /* we'd better no try to call the hyp if it has panic'ed */ + if (hyp_trace_panic) + return 0; + return kvm_call_hyp_nvhe(__pkvm_swap_reader_tracing, cpu); } @@ -949,6 +956,28 @@ static void hyp_trace_buffer_printk(struct hyp_trace_buffer *hyp_buffer) } } +static int hyp_trace_panic_handler(struct notifier_block *self, + unsigned long ev, void *v) +{ +#ifdef CONFIG_PKVM_DUMP_TRACE_ON_PANIC + if (!hyp_trace_buffer_loaded(&hyp_trace_buffer) || + !hyp_trace_buffer.printk_iter) + return NOTIFY_DONE; + + if (!strncmp("HYP panic:", v, 10)) + hyp_trace_panic = true; + + ring_buffer_poll_writer(hyp_trace_buffer.trace_buffer, RING_BUFFER_ALL_CPUS); + hyp_trace_buffer_printk(&hyp_trace_buffer); +#endif + return NOTIFY_DONE; +} + +static struct notifier_block hyp_trace_panic_notifier = { + .notifier_call = hyp_trace_panic_handler, + .priority = INT_MAX - 1, +}; + void hyp_trace_enable_event_early(void) { if (hyp_event_early_probe()) { @@ -1026,5 +1055,7 @@ int hyp_trace_init_tracefs(void) hyp_trace_buffer_printk_init(&hyp_trace_buffer)) pr_warn("Failed to init ht_printk"); + atomic_notifier_chain_register(&panic_notifier_list, &hyp_trace_panic_notifier); + return 0; }