xref: /linux/arch/arm64/kvm/handle_exit.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/handle_exit.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/ubsan.h>
14 
15 #include <asm/esr.h>
16 #include <asm/exception.h>
17 #include <asm/kvm_asm.h>
18 #include <asm/kvm_emulate.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/kvm_nested.h>
21 #include <asm/debug-monitors.h>
22 #include <asm/stacktrace/nvhe.h>
23 #include <asm/traps.h>
24 
25 #include <kvm/arm_hypercalls.h>
26 
27 #define CREATE_TRACE_POINTS
28 #include "trace_handle_exit.h"
29 
30 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
31 
32 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
33 {
34 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
35 		kvm_inject_serror(vcpu);
36 }
37 
38 static int handle_hvc(struct kvm_vcpu *vcpu)
39 {
40 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 			    kvm_vcpu_hvc_get_imm(vcpu));
42 	vcpu->stat.hvc_exit_stat++;
43 
44 	/* Forward hvc instructions to the virtual EL2 if the guest has EL2. */
45 	if (vcpu_has_nv(vcpu)) {
46 		if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD)
47 			kvm_inject_undefined(vcpu);
48 		else
49 			kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
50 
51 		return 1;
52 	}
53 
54 	return kvm_smccc_call_handler(vcpu);
55 }
56 
57 static int handle_smc(struct kvm_vcpu *vcpu)
58 {
59 	/*
60 	 * Forward this trapped smc instruction to the virtual EL2 if
61 	 * the guest has asked for it.
62 	 */
63 	if (forward_smc_trap(vcpu))
64 		return 1;
65 
66 	/*
67 	 * "If an SMC instruction executed at Non-secure EL1 is
68 	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
69 	 * Trap exception, not a Secure Monitor Call exception [...]"
70 	 *
71 	 * We need to advance the PC after the trap, as it would
72 	 * otherwise return to the same address. Furthermore, pre-incrementing
73 	 * the PC before potentially exiting to userspace maintains the same
74 	 * abstraction for both SMCs and HVCs.
75 	 */
76 	kvm_incr_pc(vcpu);
77 
78 	/*
79 	 * SMCs with a nonzero immediate are reserved according to DEN0028E 2.9
80 	 * "SMC and HVC immediate value".
81 	 */
82 	if (kvm_vcpu_hvc_get_imm(vcpu)) {
83 		vcpu_set_reg(vcpu, 0, ~0UL);
84 		return 1;
85 	}
86 
87 	/*
88 	 * If imm is zero then it is likely an SMCCC call.
89 	 *
90 	 * Note that on ARMv8.3, even if EL3 is not implemented, SMC executed
91 	 * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than
92 	 * being treated as UNDEFINED.
93 	 */
94 	return kvm_smccc_call_handler(vcpu);
95 }
96 
97 /*
98  * This handles the cases where the system does not support FP/ASIMD or when
99  * we are running nested virtualization and the guest hypervisor is trapping
100  * FP/ASIMD accesses by its guest guest.
101  *
102  * All other handling of guest vs. host FP/ASIMD register state is handled in
103  * fixup_guest_exit().
104  */
105 static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
106 {
107 	if (guest_hyp_fpsimd_traps_enabled(vcpu))
108 		return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
109 
110 	/* This is the case when the system doesn't support FP/ASIMD. */
111 	kvm_inject_undefined(vcpu);
112 	return 1;
113 }
114 
115 /**
116  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
117  *		    instruction executed by a guest
118  *
119  * @vcpu:	the vcpu pointer
120  *
121  * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
122  * decides to.
123  * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
124  * world-switches and schedule other host processes until there is an
125  * incoming IRQ or FIQ to the VM.
126  * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
127  *
128  * WF{I,E}T can immediately return if the deadline has already expired.
129  */
130 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
131 {
132 	u64 esr = kvm_vcpu_get_esr(vcpu);
133 	bool is_wfe = !!(esr & ESR_ELx_WFx_ISS_WFE);
134 
135 	if (guest_hyp_wfx_traps_enabled(vcpu))
136 		return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
137 
138 	if (is_wfe) {
139 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
140 		vcpu->stat.wfe_exit_stat++;
141 	} else {
142 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
143 		vcpu->stat.wfi_exit_stat++;
144 	}
145 
146 	if (esr & ESR_ELx_WFx_ISS_WFxT) {
147 		if (esr & ESR_ELx_WFx_ISS_RV) {
148 			u64 val, now;
149 
150 			now = kvm_phys_timer_read();
151 			if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
152 				now -= timer_get_offset(vcpu_hvtimer(vcpu));
153 			else
154 				now -= timer_get_offset(vcpu_vtimer(vcpu));
155 
156 			val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
157 
158 			if (now >= val)
159 				goto out;
160 		} else {
161 			/* Treat WFxT as WFx if RN is invalid */
162 			esr &= ~ESR_ELx_WFx_ISS_WFxT;
163 		}
164 	}
165 
166 	if (esr & ESR_ELx_WFx_ISS_WFE) {
167 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
168 	} else {
169 		if (esr & ESR_ELx_WFx_ISS_WFxT)
170 			vcpu_set_flag(vcpu, IN_WFIT);
171 
172 		kvm_vcpu_wfi(vcpu);
173 	}
174 out:
175 	kvm_incr_pc(vcpu);
176 
177 	return 1;
178 }
179 
180 /**
181  * kvm_handle_guest_debug - handle a debug exception instruction
182  *
183  * @vcpu:	the vcpu pointer
184  *
185  * We route all debug exceptions through the same handler. If both the
186  * guest and host are using the same debug facilities it will be up to
187  * userspace to re-inject the correct exception for guest delivery.
188  *
189  * @return: 0 (while setting vcpu->run->exit_reason)
190  */
191 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
192 {
193 	struct kvm_run *run = vcpu->run;
194 	u64 esr = kvm_vcpu_get_esr(vcpu);
195 
196 	if (!vcpu->guest_debug && forward_debug_exception(vcpu))
197 		return 1;
198 
199 	run->exit_reason = KVM_EXIT_DEBUG;
200 	run->debug.arch.hsr = lower_32_bits(esr);
201 	run->debug.arch.hsr_high = upper_32_bits(esr);
202 	run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
203 
204 	switch (ESR_ELx_EC(esr)) {
205 	case ESR_ELx_EC_WATCHPT_LOW:
206 		run->debug.arch.far = vcpu->arch.fault.far_el2;
207 		break;
208 	case ESR_ELx_EC_SOFTSTP_LOW:
209 		*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
210 		break;
211 	}
212 
213 	return 0;
214 }
215 
216 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
217 {
218 	u64 esr = kvm_vcpu_get_esr(vcpu);
219 
220 	kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
221 		      esr, esr_get_class_string(esr));
222 
223 	kvm_inject_undefined(vcpu);
224 	return 1;
225 }
226 
227 /*
228  * Guest access to SVE registers should be routed to this handler only
229  * when the system doesn't support SVE.
230  */
231 static int handle_sve(struct kvm_vcpu *vcpu)
232 {
233 	if (guest_hyp_sve_traps_enabled(vcpu))
234 		return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
235 
236 	kvm_inject_undefined(vcpu);
237 	return 1;
238 }
239 
240 /*
241  * Two possibilities to handle a trapping ptrauth instruction:
242  *
243  * - Guest usage of a ptrauth instruction (which the guest EL1 did not
244  *   turn into a NOP). If we get here, it is because we didn't enable
245  *   ptrauth for the guest. This results in an UNDEF, as it isn't
246  *   supposed to use ptrauth without being told it could.
247  *
248  * - Running an L2 NV guest while L1 has left HCR_EL2.API==0, and for
249  *   which we reinject the exception into L1.
250  *
251  * Anything else is an emulation bug (hence the WARN_ON + UNDEF).
252  */
253 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
254 {
255 	if (!vcpu_has_ptrauth(vcpu)) {
256 		kvm_inject_undefined(vcpu);
257 		return 1;
258 	}
259 
260 	if (is_nested_ctxt(vcpu)) {
261 		kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
262 		return 1;
263 	}
264 
265 	/* Really shouldn't be here! */
266 	WARN_ON_ONCE(1);
267 	kvm_inject_undefined(vcpu);
268 	return 1;
269 }
270 
271 static int kvm_handle_eret(struct kvm_vcpu *vcpu)
272 {
273 	if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
274 	    !vcpu_has_ptrauth(vcpu))
275 		return kvm_handle_ptrauth(vcpu);
276 
277 	/*
278 	 * If we got here, two possibilities:
279 	 *
280 	 * - the guest is in EL2, and we need to fully emulate ERET
281 	 *
282 	 * - the guest is in EL1, and we need to reinject the
283          *   exception into the L1 hypervisor.
284 	 *
285 	 * If KVM ever traps ERET for its own use, we'll have to
286 	 * revisit this.
287 	 */
288 	if (is_hyp_ctxt(vcpu))
289 		kvm_emulate_nested_eret(vcpu);
290 	else
291 		kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
292 
293 	return 1;
294 }
295 
296 static int handle_svc(struct kvm_vcpu *vcpu)
297 {
298 	/*
299 	 * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a
300 	 * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so
301 	 * we should only have to deal with a 64 bit exception.
302 	 */
303 	kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
304 	return 1;
305 }
306 
307 static int kvm_handle_gcs(struct kvm_vcpu *vcpu)
308 {
309 	/* We don't expect GCS, so treat it with contempt */
310 	if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, GCS, IMP))
311 		WARN_ON_ONCE(1);
312 
313 	kvm_inject_undefined(vcpu);
314 	return 1;
315 }
316 
317 static int handle_other(struct kvm_vcpu *vcpu)
318 {
319 	bool allowed, fwd = is_nested_ctxt(vcpu);
320 	u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
321 	u64 esr = kvm_vcpu_get_esr(vcpu);
322 	u64 iss = ESR_ELx_ISS(esr);
323 	struct kvm *kvm = vcpu->kvm;
324 
325 	/*
326 	 * We only trap for two reasons:
327 	 *
328 	 * - the feature is disabled, and the only outcome is to
329 	 *   generate an UNDEF.
330 	 *
331 	 * - the feature is enabled, but a NV guest wants to trap the
332 	 *   feature used by its L2 guest. We forward the exception in
333 	 *   this case.
334 	 *
335 	 * What we don't expect is to end-up here if the guest is
336 	 * expected be be able to directly use the feature, hence the
337 	 * WARN_ON below.
338 	 */
339 	switch (iss) {
340 	case ESR_ELx_ISS_OTHER_ST64BV:
341 		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
342 		fwd &= !(hcrx & HCRX_EL2_EnASR);
343 		break;
344 	case ESR_ELx_ISS_OTHER_ST64BV0:
345 		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
346 		fwd &= !(hcrx & HCRX_EL2_EnAS0);
347 		break;
348 	case ESR_ELx_ISS_OTHER_LDST64B:
349 		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
350 		fwd &= !(hcrx & HCRX_EL2_EnALS);
351 		break;
352 	case ESR_ELx_ISS_OTHER_TSBCSYNC:
353 		allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1);
354 		fwd &= (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC);
355 		break;
356 	case ESR_ELx_ISS_OTHER_PSBCSYNC:
357 		allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P5);
358 		fwd &= (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC);
359 		break;
360 	default:
361 		/* Clearly, we're missing something. */
362 		WARN_ON_ONCE(1);
363 		allowed = false;
364 	}
365 
366 	WARN_ON_ONCE(allowed && !fwd);
367 
368 	if (allowed && fwd)
369 		kvm_inject_nested_sync(vcpu, esr);
370 	else
371 		kvm_inject_undefined(vcpu);
372 
373 	return 1;
374 }
375 
376 static exit_handle_fn arm_exit_handlers[] = {
377 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
378 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
379 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
380 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
381 	[ESR_ELx_EC_CP14_MR]	= kvm_handle_cp14_32,
382 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
383 	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
384 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
385 	[ESR_ELx_EC_OTHER]	= handle_other,
386 	[ESR_ELx_EC_HVC32]	= handle_hvc,
387 	[ESR_ELx_EC_SMC32]	= handle_smc,
388 	[ESR_ELx_EC_HVC64]	= handle_hvc,
389 	[ESR_ELx_EC_SMC64]	= handle_smc,
390 	[ESR_ELx_EC_SVC64]	= handle_svc,
391 	[ESR_ELx_EC_SYS64]	= kvm_handle_sys_reg,
392 	[ESR_ELx_EC_SVE]	= handle_sve,
393 	[ESR_ELx_EC_ERET]	= kvm_handle_eret,
394 	[ESR_ELx_EC_IABT_LOW]	= kvm_handle_guest_abort,
395 	[ESR_ELx_EC_DABT_LOW]	= kvm_handle_guest_abort,
396 	[ESR_ELx_EC_DABT_CUR]	= kvm_handle_vncr_abort,
397 	[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
398 	[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
399 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
400 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
401 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
402 	[ESR_ELx_EC_FP_ASIMD]	= kvm_handle_fpasimd,
403 	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
404 	[ESR_ELx_EC_GCS]	= kvm_handle_gcs,
405 };
406 
407 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
408 {
409 	u64 esr = kvm_vcpu_get_esr(vcpu);
410 	u8 esr_ec = ESR_ELx_EC(esr);
411 
412 	return arm_exit_handlers[esr_ec];
413 }
414 
415 /*
416  * We may be single-stepping an emulated instruction. If the emulation
417  * has been completed in the kernel, we can return to userspace with a
418  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
419  * emulation first.
420  */
421 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
422 {
423 	int handled;
424 
425 	/*
426 	 * See ARM ARM B1.14.1: "Hyp traps on instructions
427 	 * that fail their condition code check"
428 	 */
429 	if (!kvm_condition_valid(vcpu)) {
430 		kvm_incr_pc(vcpu);
431 		handled = 1;
432 	} else {
433 		exit_handle_fn exit_handler;
434 
435 		exit_handler = kvm_get_exit_handler(vcpu);
436 		handled = exit_handler(vcpu);
437 	}
438 
439 	return handled;
440 }
441 
442 /*
443  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
444  * proper exit to userspace.
445  */
446 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
447 {
448 	struct kvm_run *run = vcpu->run;
449 
450 	if (ARM_SERROR_PENDING(exception_index)) {
451 		/*
452 		 * The SError is handled by handle_exit_early(). If the guest
453 		 * survives it will re-execute the original instruction.
454 		 */
455 		return 1;
456 	}
457 
458 	exception_index = ARM_EXCEPTION_CODE(exception_index);
459 
460 	switch (exception_index) {
461 	case ARM_EXCEPTION_IRQ:
462 		return 1;
463 	case ARM_EXCEPTION_EL1_SERROR:
464 		return 1;
465 	case ARM_EXCEPTION_TRAP:
466 		return handle_trap_exceptions(vcpu);
467 	case ARM_EXCEPTION_HYP_GONE:
468 		/*
469 		 * EL2 has been reset to the hyp-stub. This happens when a guest
470 		 * is pre-emptied by kvm_reboot()'s shutdown call.
471 		 */
472 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
473 		return 0;
474 	case ARM_EXCEPTION_IL:
475 		/*
476 		 * We attempted an illegal exception return.  Guest state must
477 		 * have been corrupted somehow.  Give up.
478 		 */
479 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
480 		return -EINVAL;
481 	default:
482 		kvm_pr_unimpl("Unsupported exception type: %d",
483 			      exception_index);
484 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
485 		return 0;
486 	}
487 }
488 
489 /* For exit types that need handling before we can be preempted */
490 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
491 {
492 	if (ARM_SERROR_PENDING(exception_index)) {
493 		if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
494 			u64 disr = kvm_vcpu_get_disr(vcpu);
495 
496 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
497 		} else {
498 			kvm_inject_serror(vcpu);
499 		}
500 
501 		return;
502 	}
503 
504 	exception_index = ARM_EXCEPTION_CODE(exception_index);
505 
506 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
507 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
508 }
509 
510 static void print_nvhe_hyp_panic(const char *name, u64 panic_addr)
511 {
512 	kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
513 		(void *)(panic_addr + kaslr_offset()));
514 }
515 
516 static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
517 {
518 	print_nvhe_hyp_panic("CFI failure", panic_addr);
519 
520 	if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
521 		kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
522 }
523 
524 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
525 					      u64 elr_virt, u64 elr_phys,
526 					      u64 par, uintptr_t vcpu,
527 					      u64 far, u64 hpfar) {
528 	u64 elr_in_kimg = __phys_to_kimg(elr_phys);
529 	u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
530 	u64 mode = spsr & PSR_MODE_MASK;
531 	u64 panic_addr = elr_virt + hyp_offset;
532 
533 	if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
534 		kvm_err("Invalid host exception to nVHE hyp!\n");
535 	} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
536 		   esr_brk_comment(esr) == BUG_BRK_IMM) {
537 		const char *file = NULL;
538 		unsigned int line = 0;
539 
540 		/* All hyp bugs, including warnings, are treated as fatal. */
541 		if (!is_protected_kvm_enabled() ||
542 		    IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
543 			struct bug_entry *bug = find_bug(elr_in_kimg);
544 
545 			if (bug)
546 				bug_get_file_line(bug, &file, &line);
547 		}
548 
549 		if (file)
550 			kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
551 		else
552 			print_nvhe_hyp_panic("BUG", panic_addr);
553 	} else if (IS_ENABLED(CONFIG_CFI) && esr_is_cfi_brk(esr)) {
554 		kvm_nvhe_report_cfi_failure(panic_addr);
555 	} else if (IS_ENABLED(CONFIG_UBSAN_KVM_EL2) &&
556 		   ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
557 		   esr_is_ubsan_brk(esr)) {
558 		print_nvhe_hyp_panic(report_ubsan_failure(esr & UBSAN_BRK_MASK),
559 				     panic_addr);
560 	} else {
561 		print_nvhe_hyp_panic("panic", panic_addr);
562 	}
563 
564 	/* Dump the nVHE hypervisor backtrace */
565 	kvm_nvhe_dump_backtrace(hyp_offset);
566 
567 	/* Dump the faulting instruction */
568 	dump_kernel_instr(panic_addr + kaslr_offset());
569 
570 	/*
571 	 * Hyp has panicked and we're going to handle that by panicking the
572 	 * kernel. The kernel offset will be revealed in the panic so we're
573 	 * also safe to reveal the hyp offset as a debugging aid for translating
574 	 * hyp VAs to vmlinux addresses.
575 	 */
576 	kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
577 
578 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
579 	      spsr, elr_virt, esr, far, hpfar, par, vcpu);
580 }
581