xref: /linux/arch/arm64/kvm/stacktrace.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
19f5fee05SMarc Zyngier /* SPDX-License-Identifier: GPL-2.0-only */
29f5fee05SMarc Zyngier /*
39f5fee05SMarc Zyngier  * KVM nVHE hypervisor stack tracing support.
49f5fee05SMarc Zyngier  *
59f5fee05SMarc Zyngier  * The unwinder implementation depends on the nVHE mode:
69f5fee05SMarc Zyngier  *
79f5fee05SMarc Zyngier  *   1) Non-protected nVHE mode - the host can directly access the
89f5fee05SMarc Zyngier  *      HYP stack pages and unwind the HYP stack in EL1. This saves having
99f5fee05SMarc Zyngier  *      to allocate shared buffers for the host to read the unwinded
109f5fee05SMarc Zyngier  *      stacktrace.
119f5fee05SMarc Zyngier  *
129f5fee05SMarc Zyngier  *   2) pKVM (protected nVHE) mode - the host cannot directly access
139f5fee05SMarc Zyngier  *      the HYP memory. The stack is unwinded in EL2 and dumped to a shared
149f5fee05SMarc Zyngier  *      buffer where the host can read and print the stacktrace.
159f5fee05SMarc Zyngier  *
169f5fee05SMarc Zyngier  * Copyright (C) 2022 Google LLC
179f5fee05SMarc Zyngier  */
189f5fee05SMarc Zyngier 
199f5fee05SMarc Zyngier #include <linux/kvm.h>
209f5fee05SMarc Zyngier #include <linux/kvm_host.h>
219f5fee05SMarc Zyngier 
229f5fee05SMarc Zyngier #include <asm/stacktrace/nvhe.h>
239f5fee05SMarc Zyngier 
stackinfo_get_overflow(void)24d1f684e4SMark Rutland static struct stack_info stackinfo_get_overflow(void)
254e00532fSMarc Zyngier {
264e00532fSMarc Zyngier 	struct kvm_nvhe_stacktrace_info *stacktrace_info
274e00532fSMarc Zyngier 				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
284e00532fSMarc Zyngier 	unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
294e00532fSMarc Zyngier 	unsigned long high = low + OVERFLOW_STACK_SIZE;
304e00532fSMarc Zyngier 
31d1f684e4SMark Rutland 	return (struct stack_info) {
32d1f684e4SMark Rutland 		.low = low,
33d1f684e4SMark Rutland 		.high = high,
34d1f684e4SMark Rutland 	};
354e00532fSMarc Zyngier }
364e00532fSMarc Zyngier 
stackinfo_get_overflow_kern_va(void)37*4b5e694eSMark Rutland static struct stack_info stackinfo_get_overflow_kern_va(void)
38*4b5e694eSMark Rutland {
39*4b5e694eSMark Rutland 	unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
40*4b5e694eSMark Rutland 	unsigned long high = low + OVERFLOW_STACK_SIZE;
41*4b5e694eSMark Rutland 
42*4b5e694eSMark Rutland 	return (struct stack_info) {
43*4b5e694eSMark Rutland 		.low = low,
44*4b5e694eSMark Rutland 		.high = high,
45*4b5e694eSMark Rutland 	};
46*4b5e694eSMark Rutland }
47*4b5e694eSMark Rutland 
stackinfo_get_hyp(void)48d1f684e4SMark Rutland static struct stack_info stackinfo_get_hyp(void)
494e00532fSMarc Zyngier {
504e00532fSMarc Zyngier 	struct kvm_nvhe_stacktrace_info *stacktrace_info
514e00532fSMarc Zyngier 				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
524e00532fSMarc Zyngier 	unsigned long low = (unsigned long)stacktrace_info->stack_base;
534e00532fSMarc Zyngier 	unsigned long high = low + PAGE_SIZE;
544e00532fSMarc Zyngier 
55d1f684e4SMark Rutland 	return (struct stack_info) {
56d1f684e4SMark Rutland 		.low = low,
57d1f684e4SMark Rutland 		.high = high,
58d1f684e4SMark Rutland 	};
594e00532fSMarc Zyngier }
604e00532fSMarc Zyngier 
stackinfo_get_hyp_kern_va(void)61*4b5e694eSMark Rutland static struct stack_info stackinfo_get_hyp_kern_va(void)
62*4b5e694eSMark Rutland {
63*4b5e694eSMark Rutland 	unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
64*4b5e694eSMark Rutland 	unsigned long high = low + PAGE_SIZE;
65*4b5e694eSMark Rutland 
66*4b5e694eSMark Rutland 	return (struct stack_info) {
67*4b5e694eSMark Rutland 		.low = low,
68*4b5e694eSMark Rutland 		.high = high,
69*4b5e694eSMark Rutland 	};
70*4b5e694eSMark Rutland }
71*4b5e694eSMark Rutland 
72bd8abd68SMark Rutland /*
73bd8abd68SMark Rutland  * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
74bd8abd68SMark Rutland  *
75bd8abd68SMark Rutland  * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
76bd8abd68SMark Rutland  * allow for guard pages below the stack. Consequently, the fixed offset address
77bd8abd68SMark Rutland  * translation macros won't work here.
78bd8abd68SMark Rutland  *
79bd8abd68SMark Rutland  * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
80bd8abd68SMark Rutland  * stack base.
81bd8abd68SMark Rutland  *
82bd8abd68SMark Rutland  * Returns true on success and updates @addr to its corresponding kernel VA;
83bd8abd68SMark Rutland  * otherwise returns false.
84bd8abd68SMark Rutland  */
kvm_nvhe_stack_kern_va(unsigned long * addr,unsigned long size)85bd8abd68SMark Rutland static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
86bd8abd68SMark Rutland {
87*4b5e694eSMark Rutland 	struct stack_info stack_hyp, stack_kern;
88bd8abd68SMark Rutland 
89*4b5e694eSMark Rutland 	stack_hyp = stackinfo_get_hyp();
90*4b5e694eSMark Rutland 	stack_kern = stackinfo_get_hyp_kern_va();
91*4b5e694eSMark Rutland 	if (stackinfo_on_stack(&stack_hyp, *addr, size))
92bd8abd68SMark Rutland 		goto found;
93bd8abd68SMark Rutland 
94*4b5e694eSMark Rutland 	stack_hyp = stackinfo_get_overflow();
95*4b5e694eSMark Rutland 	stack_kern = stackinfo_get_overflow_kern_va();
96*4b5e694eSMark Rutland 	if (stackinfo_on_stack(&stack_hyp, *addr, size))
97bd8abd68SMark Rutland 		goto found;
98bd8abd68SMark Rutland 
99bd8abd68SMark Rutland 	return false;
100bd8abd68SMark Rutland 
101bd8abd68SMark Rutland found:
102*4b5e694eSMark Rutland 	*addr = *addr - stack_hyp.low + stack_kern.low;
103bd8abd68SMark Rutland 	return true;
104bd8abd68SMark Rutland }
105bd8abd68SMark Rutland 
106bd8abd68SMark Rutland /*
107bd8abd68SMark Rutland  * Convert a KVN nVHE HYP frame record address to a kernel VA
108bd8abd68SMark Rutland  */
kvm_nvhe_stack_kern_record_va(unsigned long * addr)109bd8abd68SMark Rutland static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
110bd8abd68SMark Rutland {
111bd8abd68SMark Rutland 	return kvm_nvhe_stack_kern_va(addr, 16);
112bd8abd68SMark Rutland }
113bd8abd68SMark Rutland 
unwind_next(struct unwind_state * state)1144e00532fSMarc Zyngier static int unwind_next(struct unwind_state *state)
1154e00532fSMarc Zyngier {
116*4b5e694eSMark Rutland 	/*
117*4b5e694eSMark Rutland 	 * The FP is in the hypervisor VA space. Convert it to the kernel VA
118*4b5e694eSMark Rutland 	 * space so it can be unwound by the regular unwind functions.
119*4b5e694eSMark Rutland 	 */
120*4b5e694eSMark Rutland 	if (!kvm_nvhe_stack_kern_record_va(&state->fp))
121*4b5e694eSMark Rutland 		return -EINVAL;
122*4b5e694eSMark Rutland 
123*4b5e694eSMark Rutland 	return unwind_next_frame_record(state);
1244e00532fSMarc Zyngier }
1254e00532fSMarc Zyngier 
unwind(struct unwind_state * state,stack_trace_consume_fn consume_entry,void * cookie)1264e00532fSMarc Zyngier static void unwind(struct unwind_state *state,
1274e00532fSMarc Zyngier 		   stack_trace_consume_fn consume_entry, void *cookie)
1284e00532fSMarc Zyngier {
1294e00532fSMarc Zyngier 	while (1) {
1304e00532fSMarc Zyngier 		int ret;
1314e00532fSMarc Zyngier 
1324e00532fSMarc Zyngier 		if (!consume_entry(cookie, state->pc))
1334e00532fSMarc Zyngier 			break;
1344e00532fSMarc Zyngier 		ret = unwind_next(state);
1354e00532fSMarc Zyngier 		if (ret < 0)
1364e00532fSMarc Zyngier 			break;
1374e00532fSMarc Zyngier 	}
1384e00532fSMarc Zyngier }
1394e00532fSMarc Zyngier 
1409f5fee05SMarc Zyngier /*
1419f5fee05SMarc Zyngier  * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
1429f5fee05SMarc Zyngier  *
1439f5fee05SMarc Zyngier  * @arg    : the hypervisor offset, used for address translation
1449f5fee05SMarc Zyngier  * @where  : the program counter corresponding to the stack frame
1459f5fee05SMarc Zyngier  */
kvm_nvhe_dump_backtrace_entry(void * arg,unsigned long where)1469f5fee05SMarc Zyngier static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
1479f5fee05SMarc Zyngier {
1489f5fee05SMarc Zyngier 	unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
1499f5fee05SMarc Zyngier 	unsigned long hyp_offset = (unsigned long)arg;
1509f5fee05SMarc Zyngier 
1519f5fee05SMarc Zyngier 	/* Mask tags and convert to kern addr */
1529f5fee05SMarc Zyngier 	where = (where & va_mask) + hyp_offset;
1539f5fee05SMarc Zyngier 	kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
1549f5fee05SMarc Zyngier 
1559f5fee05SMarc Zyngier 	return true;
1569f5fee05SMarc Zyngier }
1579f5fee05SMarc Zyngier 
kvm_nvhe_dump_backtrace_start(void)1589f5fee05SMarc Zyngier static void kvm_nvhe_dump_backtrace_start(void)
1599f5fee05SMarc Zyngier {
1609f5fee05SMarc Zyngier 	kvm_err("nVHE call trace:\n");
1619f5fee05SMarc Zyngier }
1629f5fee05SMarc Zyngier 
kvm_nvhe_dump_backtrace_end(void)1639f5fee05SMarc Zyngier static void kvm_nvhe_dump_backtrace_end(void)
1649f5fee05SMarc Zyngier {
1659f5fee05SMarc Zyngier 	kvm_err("---[ end nVHE call trace ]---\n");
1669f5fee05SMarc Zyngier }
1679f5fee05SMarc Zyngier 
1689f5fee05SMarc Zyngier /*
1699f5fee05SMarc Zyngier  * hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
1709f5fee05SMarc Zyngier  *
1719f5fee05SMarc Zyngier  * @hyp_offset: hypervisor offset, used for address translation.
1729f5fee05SMarc Zyngier  *
1739f5fee05SMarc Zyngier  * The host can directly access HYP stack pages in non-protected
1749f5fee05SMarc Zyngier  * mode, so the unwinding is done directly from EL1. This removes
1759f5fee05SMarc Zyngier  * the need for shared buffers between host and hypervisor for
1769f5fee05SMarc Zyngier  * the stacktrace.
1779f5fee05SMarc Zyngier  */
hyp_dump_backtrace(unsigned long hyp_offset)1789f5fee05SMarc Zyngier static void hyp_dump_backtrace(unsigned long hyp_offset)
1799f5fee05SMarc Zyngier {
1809f5fee05SMarc Zyngier 	struct kvm_nvhe_stacktrace_info *stacktrace_info;
1818df13730SMark Rutland 	struct stack_info stacks[] = {
182*4b5e694eSMark Rutland 		stackinfo_get_overflow_kern_va(),
183*4b5e694eSMark Rutland 		stackinfo_get_hyp_kern_va(),
1848df13730SMark Rutland 	};
1858df13730SMark Rutland 	struct unwind_state state = {
1868df13730SMark Rutland 		.stacks = stacks,
1878df13730SMark Rutland 		.nr_stacks = ARRAY_SIZE(stacks),
1888df13730SMark Rutland 	};
1899f5fee05SMarc Zyngier 
1909f5fee05SMarc Zyngier 	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
1919f5fee05SMarc Zyngier 
1929f5fee05SMarc Zyngier 	kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
1939f5fee05SMarc Zyngier 
1949f5fee05SMarc Zyngier 	kvm_nvhe_dump_backtrace_start();
1959f5fee05SMarc Zyngier 	unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
1969f5fee05SMarc Zyngier 	kvm_nvhe_dump_backtrace_end();
1979f5fee05SMarc Zyngier }
1989f5fee05SMarc Zyngier 
1999f5fee05SMarc Zyngier #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
2009f5fee05SMarc Zyngier DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
2019f5fee05SMarc Zyngier 			 pkvm_stacktrace);
2029f5fee05SMarc Zyngier 
2039f5fee05SMarc Zyngier /*
2049f5fee05SMarc Zyngier  * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
2059f5fee05SMarc Zyngier  *
2069f5fee05SMarc Zyngier  * @hyp_offset: hypervisor offset, used for address translation.
2079f5fee05SMarc Zyngier  *
2089f5fee05SMarc Zyngier  * Dumping of the pKVM HYP backtrace is done by reading the
2099f5fee05SMarc Zyngier  * stack addresses from the shared stacktrace buffer, since the
2109f5fee05SMarc Zyngier  * host cannot directly access hypervisor memory in protected
2119f5fee05SMarc Zyngier  * mode.
2129f5fee05SMarc Zyngier  */
pkvm_dump_backtrace(unsigned long hyp_offset)2139f5fee05SMarc Zyngier static void pkvm_dump_backtrace(unsigned long hyp_offset)
2149f5fee05SMarc Zyngier {
2159f5fee05SMarc Zyngier 	unsigned long *stacktrace
2169f5fee05SMarc Zyngier 		= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
21762ae2162SOliver Upton 	int i;
2189f5fee05SMarc Zyngier 
2199f5fee05SMarc Zyngier 	kvm_nvhe_dump_backtrace_start();
2209f5fee05SMarc Zyngier 	/* The saved stacktrace is terminated by a null entry */
22162ae2162SOliver Upton 	for (i = 0;
22262ae2162SOliver Upton 	     i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
22362ae2162SOliver Upton 	     i++)
2249f5fee05SMarc Zyngier 		kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
2259f5fee05SMarc Zyngier 	kvm_nvhe_dump_backtrace_end();
2269f5fee05SMarc Zyngier }
2279f5fee05SMarc Zyngier #else	/* !CONFIG_PROTECTED_NVHE_STACKTRACE */
pkvm_dump_backtrace(unsigned long hyp_offset)2289f5fee05SMarc Zyngier static void pkvm_dump_backtrace(unsigned long hyp_offset)
2299f5fee05SMarc Zyngier {
2309f5fee05SMarc Zyngier 	kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
2319f5fee05SMarc Zyngier }
2329f5fee05SMarc Zyngier #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
2339f5fee05SMarc Zyngier 
2349f5fee05SMarc Zyngier /*
2359f5fee05SMarc Zyngier  * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
2369f5fee05SMarc Zyngier  *
2379f5fee05SMarc Zyngier  * @hyp_offset: hypervisor offset, used for address translation.
2389f5fee05SMarc Zyngier  */
kvm_nvhe_dump_backtrace(unsigned long hyp_offset)2399f5fee05SMarc Zyngier void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
2409f5fee05SMarc Zyngier {
2419f5fee05SMarc Zyngier 	if (is_protected_kvm_enabled())
2429f5fee05SMarc Zyngier 		pkvm_dump_backtrace(hyp_offset);
2439f5fee05SMarc Zyngier 	else
2449f5fee05SMarc Zyngier 		hyp_dump_backtrace(hyp_offset);
2459f5fee05SMarc Zyngier }
246