xref: /linux/arch/arm64/kvm/hyp/nvhe/debug-sr.c (revision e2ee2e9b159094527ae7ad78058b1316f62fc5b7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/debug-sr.h>
8 
9 #include <linux/compiler.h>
10 #include <linux/kvm_host.h>
11 
12 #include <asm/debug-monitors.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_hyp.h>
15 #include <asm/kvm_mmu.h>
16 
17 static void __debug_save_spe(u64 *pmscr_el1)
18 {
19 	u64 reg;
20 
21 	/* Clear pmscr in case of early return */
22 	*pmscr_el1 = 0;
23 
24 	/*
25 	 * At this point, we know that this CPU implements
26 	 * SPE and is available to the host.
27 	 * Check if the host is actually using it ?
28 	 */
29 	reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
30 	if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT)))
31 		return;
32 
33 	/* Yes; save the control register and disable data generation */
34 	*pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
35 	write_sysreg_el1(0, SYS_PMSCR);
36 	isb();
37 
38 	/* Now drain all buffered data to memory */
39 	psb_csync();
40 }
41 
42 static void __debug_restore_spe(u64 pmscr_el1)
43 {
44 	if (!pmscr_el1)
45 		return;
46 
47 	/* The host page table is installed, but not yet synchronised */
48 	isb();
49 
50 	/* Re-enable data generation */
51 	write_sysreg_el1(pmscr_el1, SYS_PMSCR);
52 }
53 
54 static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr)
55 {
56 	*saved_trfcr = read_sysreg_el1(SYS_TRFCR);
57 	write_sysreg_el1(new_trfcr, SYS_TRFCR);
58 }
59 
60 static bool __trace_needs_drain(void)
61 {
62 	if (is_protected_kvm_enabled() && host_data_test_flag(HAS_TRBE))
63 		return read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E;
64 
65 	return host_data_test_flag(TRBE_ENABLED);
66 }
67 
68 static bool __trace_needs_switch(void)
69 {
70 	return host_data_test_flag(TRBE_ENABLED) ||
71 	       host_data_test_flag(EL1_TRACING_CONFIGURED);
72 }
73 
74 static void __trace_switch_to_guest(void)
75 {
76 	/* Unsupported with TRBE so disable */
77 	if (host_data_test_flag(TRBE_ENABLED))
78 		*host_data_ptr(trfcr_while_in_guest) = 0;
79 
80 	__trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1),
81 			  *host_data_ptr(trfcr_while_in_guest));
82 
83 	if (__trace_needs_drain()) {
84 		isb();
85 		tsb_csync();
86 	}
87 }
88 
89 static void __trace_switch_to_host(void)
90 {
91 	__trace_do_switch(host_data_ptr(trfcr_while_in_guest),
92 			  *host_data_ptr(host_debug_state.trfcr_el1));
93 }
94 
95 void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
96 {
97 	/* Disable and flush SPE data generation */
98 	if (host_data_test_flag(HAS_SPE))
99 		__debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));
100 
101 	if (__trace_needs_switch())
102 		__trace_switch_to_guest();
103 }
104 
105 void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
106 {
107 	__debug_switch_to_guest_common(vcpu);
108 }
109 
110 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
111 {
112 	if (host_data_test_flag(HAS_SPE))
113 		__debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
114 	if (__trace_needs_switch())
115 		__trace_switch_to_host();
116 }
117 
118 void __debug_switch_to_host(struct kvm_vcpu *vcpu)
119 {
120 	__debug_switch_to_host_common(vcpu);
121 }
122