xref: /linux/arch/arm64/kvm/stacktrace.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * KVM nVHE hypervisor stack tracing support.
4  *
5  * The unwinder implementation depends on the nVHE mode:
6  *
7  *   1) Non-protected nVHE mode - the host can directly access the
8  *      HYP stack pages and unwind the HYP stack in EL1. This saves having
9  *      to allocate shared buffers for the host to read the unwinded
10  *      stacktrace.
11  *
12  *   2) pKVM (protected nVHE) mode - the host cannot directly access
13  *      the HYP memory. The stack is unwinded in EL2 and dumped to a shared
14  *      buffer where the host can read and print the stacktrace.
15  *
16  * Copyright (C) 2022 Google LLC
17  */
18 
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 
22 #include <asm/stacktrace/nvhe.h>
23 
24 /*
25  * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
26  *
27  * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
28  * allow for guard pages below the stack. Consequently, the fixed offset address
29  * translation macros won't work here.
30  *
31  * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
32  * stack base.
33  *
34  * Returns true on success and updates @addr to its corresponding kernel VA;
35  * otherwise returns false.
36  */
37 static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
38 				   enum stack_type type)
39 {
40 	struct kvm_nvhe_stacktrace_info *stacktrace_info;
41 	unsigned long hyp_base, kern_base, hyp_offset;
42 
43 	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
44 
45 	switch (type) {
46 	case STACK_TYPE_HYP:
47 		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
48 		hyp_base = (unsigned long)stacktrace_info->stack_base;
49 		break;
50 	case STACK_TYPE_OVERFLOW:
51 		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
52 		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
53 		break;
54 	default:
55 		return false;
56 	}
57 
58 	hyp_offset = *addr - hyp_base;
59 
60 	*addr = kern_base + hyp_offset;
61 
62 	return true;
63 }
64 
65 static bool on_overflow_stack(unsigned long sp, unsigned long size,
66 			      struct stack_info *info)
67 {
68 	struct kvm_nvhe_stacktrace_info *stacktrace_info
69 				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
70 	unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
71 	unsigned long high = low + OVERFLOW_STACK_SIZE;
72 
73 	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
74 }
75 
76 static bool on_hyp_stack(unsigned long sp, unsigned long size,
77 			 struct stack_info *info)
78 {
79 	struct kvm_nvhe_stacktrace_info *stacktrace_info
80 				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
81 	unsigned long low = (unsigned long)stacktrace_info->stack_base;
82 	unsigned long high = low + PAGE_SIZE;
83 
84 	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
85 }
86 
87 static bool on_accessible_stack(const struct task_struct *tsk,
88 				unsigned long sp, unsigned long size,
89 				struct stack_info *info)
90 {
91 	if (info)
92 		info->type = STACK_TYPE_UNKNOWN;
93 
94 	return (on_overflow_stack(sp, size, info) ||
95 		on_hyp_stack(sp, size, info));
96 }
97 
98 static int unwind_next(struct unwind_state *state)
99 {
100 	struct stack_info info;
101 
102 	return unwind_next_common(state, &info, on_accessible_stack,
103 				  kvm_nvhe_stack_kern_va);
104 }
105 
106 static void unwind(struct unwind_state *state,
107 		   stack_trace_consume_fn consume_entry, void *cookie)
108 {
109 	while (1) {
110 		int ret;
111 
112 		if (!consume_entry(cookie, state->pc))
113 			break;
114 		ret = unwind_next(state);
115 		if (ret < 0)
116 			break;
117 	}
118 }
119 
120 /*
121  * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
122  *
123  * @arg    : the hypervisor offset, used for address translation
124  * @where  : the program counter corresponding to the stack frame
125  */
126 static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
127 {
128 	unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
129 	unsigned long hyp_offset = (unsigned long)arg;
130 
131 	/* Mask tags and convert to kern addr */
132 	where = (where & va_mask) + hyp_offset;
133 	kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
134 
135 	return true;
136 }
137 
138 static void kvm_nvhe_dump_backtrace_start(void)
139 {
140 	kvm_err("nVHE call trace:\n");
141 }
142 
143 static void kvm_nvhe_dump_backtrace_end(void)
144 {
145 	kvm_err("---[ end nVHE call trace ]---\n");
146 }
147 
148 /*
149  * hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
150  *
151  * @hyp_offset: hypervisor offset, used for address translation.
152  *
153  * The host can directly access HYP stack pages in non-protected
154  * mode, so the unwinding is done directly from EL1. This removes
155  * the need for shared buffers between host and hypervisor for
156  * the stacktrace.
157  */
158 static void hyp_dump_backtrace(unsigned long hyp_offset)
159 {
160 	struct kvm_nvhe_stacktrace_info *stacktrace_info;
161 	struct unwind_state state;
162 
163 	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
164 
165 	kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
166 
167 	kvm_nvhe_dump_backtrace_start();
168 	unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
169 	kvm_nvhe_dump_backtrace_end();
170 }
171 
172 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
173 DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
174 			 pkvm_stacktrace);
175 
176 /*
177  * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
178  *
179  * @hyp_offset: hypervisor offset, used for address translation.
180  *
181  * Dumping of the pKVM HYP backtrace is done by reading the
182  * stack addresses from the shared stacktrace buffer, since the
183  * host cannot directly access hypervisor memory in protected
184  * mode.
185  */
186 static void pkvm_dump_backtrace(unsigned long hyp_offset)
187 {
188 	unsigned long *stacktrace
189 		= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
190 	int i;
191 
192 	kvm_nvhe_dump_backtrace_start();
193 	/* The saved stacktrace is terminated by a null entry */
194 	for (i = 0;
195 	     i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
196 	     i++)
197 		kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
198 	kvm_nvhe_dump_backtrace_end();
199 }
200 #else	/* !CONFIG_PROTECTED_NVHE_STACKTRACE */
201 static void pkvm_dump_backtrace(unsigned long hyp_offset)
202 {
203 	kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
204 }
205 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
206 
207 /*
208  * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
209  *
210  * @hyp_offset: hypervisor offset, used for address translation.
211  */
212 void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
213 {
214 	if (is_protected_kvm_enabled())
215 		pkvm_dump_backtrace(hyp_offset);
216 	else
217 		hyp_dump_backtrace(hyp_offset);
218 }
219