xref: /linux/arch/arm64/kvm/hyp/nvhe/debug-sr.c (revision 7aba10efef1d972fc82b00b84911f07f6afbdb78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/debug-sr.h>
8 
9 #include <linux/compiler.h>
10 #include <linux/kvm_host.h>
11 
12 #include <asm/debug-monitors.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_hyp.h>
15 #include <asm/kvm_mmu.h>
16 
17 static void __debug_save_spe(void)
18 {
19 	u64 *pmscr_el1, *pmblimitr_el1;
20 
21 	pmscr_el1 = host_data_ptr(host_debug_state.pmscr_el1);
22 	pmblimitr_el1 = host_data_ptr(host_debug_state.pmblimitr_el1);
23 
24 	/*
25 	 * At this point, we know that this CPU implements
26 	 * SPE and is available to the host.
27 	 * Check if the host is actually using it ?
28 	 */
29 	*pmblimitr_el1 = read_sysreg_s(SYS_PMBLIMITR_EL1);
30 	if (!(*pmblimitr_el1 & BIT(PMBLIMITR_EL1_E_SHIFT)))
31 		return;
32 
33 	/* Yes; save the control register and disable data generation */
34 	*pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
35 	write_sysreg_el1(0, SYS_PMSCR);
36 	isb();
37 
38 	/* Now drain all buffered data to memory */
39 	psb_csync();
40 	dsb(nsh);
41 
42 	/* And disable the profiling buffer */
43 	write_sysreg_s(0, SYS_PMBLIMITR_EL1);
44 	isb();
45 }
46 
47 static void __debug_restore_spe(void)
48 {
49 	u64 pmblimitr_el1 = *host_data_ptr(host_debug_state.pmblimitr_el1);
50 
51 	if (!(pmblimitr_el1 & BIT(PMBLIMITR_EL1_E_SHIFT)))
52 		return;
53 
54 	/* The host page table is installed, but not yet synchronised */
55 	isb();
56 
57 	/* Re-enable the profiling buffer. */
58 	write_sysreg_s(pmblimitr_el1, SYS_PMBLIMITR_EL1);
59 	isb();
60 
61 	/* Re-enable data generation */
62 	write_sysreg_el1(*host_data_ptr(host_debug_state.pmscr_el1), SYS_PMSCR);
63 }
64 
65 static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr)
66 {
67 	*saved_trfcr = read_sysreg_el1(SYS_TRFCR);
68 	write_sysreg_el1(new_trfcr, SYS_TRFCR);
69 }
70 
71 static void __trace_drain_and_disable(void)
72 {
73 	u64 *trblimitr_el1 = host_data_ptr(host_debug_state.trblimitr_el1);
74 	bool needs_drain = is_protected_kvm_enabled() ?
75 			   host_data_test_flag(HAS_TRBE) :
76 			   host_data_test_flag(TRBE_ENABLED);
77 
78 	if (!needs_drain) {
79 		*trblimitr_el1 = 0;
80 		return;
81 	}
82 
83 	*trblimitr_el1 = read_sysreg_s(SYS_TRBLIMITR_EL1);
84 	if (*trblimitr_el1 & TRBLIMITR_EL1_E) {
85 		/*
86 		 * The host has enabled the Trace Buffer Unit so we have
87 		 * to beat the CPU with a stick until it stops accessing
88 		 * memory.
89 		 */
90 
91 		/* First, ensure that our prior write to TRFCR has stuck. */
92 		isb();
93 
94 		/* Now synchronise with the trace and drain the buffer. */
95 		tsb_csync();
96 		dsb(nsh);
97 
98 		/*
99 		 * With no more trace being generated, we can disable the
100 		 * Trace Buffer Unit.
101 		 */
102 		write_sysreg_s(0, SYS_TRBLIMITR_EL1);
103 		if (cpus_have_final_cap(ARM64_WORKAROUND_2064142)) {
104 			/*
105 			 * Some CPUs are so good, we have to drain 'em
106 			 * twice.
107 			 */
108 			tsb_csync();
109 			dsb(nsh);
110 		}
111 
112 		/*
113 		 * Ensure that the Trace Buffer Unit is disabled before
114 		 * we start mucking with the stage-2 and trap
115 		 * configuration.
116 		 */
117 		isb();
118 	}
119 }
120 
121 static bool __trace_needs_switch(void)
122 {
123 	return host_data_test_flag(TRBE_ENABLED) ||
124 	       host_data_test_flag(EL1_TRACING_CONFIGURED);
125 }
126 
127 static void __trace_switch_to_guest(void)
128 {
129 	/* Unsupported with TRBE so disable */
130 	if (host_data_test_flag(TRBE_ENABLED))
131 		*host_data_ptr(trfcr_while_in_guest) = 0;
132 
133 	__trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1),
134 			  *host_data_ptr(trfcr_while_in_guest));
135 	__trace_drain_and_disable();
136 }
137 
138 static void __trace_switch_to_host(void)
139 {
140 	u64 trblimitr_el1 = *host_data_ptr(host_debug_state.trblimitr_el1);
141 
142 	if (trblimitr_el1 & TRBLIMITR_EL1_E) {
143 		/* Re-enable the Trace Buffer Unit for the host. */
144 		write_sysreg_s(trblimitr_el1, SYS_TRBLIMITR_EL1);
145 		isb();
146 		if (cpus_have_final_cap(ARM64_WORKAROUND_2038923)) {
147 			/*
148 			 * Make sure the unit is re-enabled before we
149 			 * poke TRFCR.
150 			 */
151 			isb();
152 		}
153 	}
154 
155 	__trace_do_switch(host_data_ptr(trfcr_while_in_guest),
156 			  *host_data_ptr(host_debug_state.trfcr_el1));
157 }
158 
159 static void __debug_save_brbe(void)
160 {
161 	u64 *brbcr_el1 = host_data_ptr(host_debug_state.brbcr_el1);
162 
163 	*brbcr_el1 = 0;
164 
165 	/* Check if the BRBE is enabled */
166 	if (!(read_sysreg_el1(SYS_BRBCR) & (BRBCR_ELx_E0BRE | BRBCR_ELx_ExBRE)))
167 		return;
168 
169 	/*
170 	 * Prohibit branch record generation while we are in guest.
171 	 * Since access to BRBCR_EL1 is trapped, the guest can't
172 	 * modify the filtering set by the host.
173 	 */
174 	*brbcr_el1 = read_sysreg_el1(SYS_BRBCR);
175 	write_sysreg_el1(0, SYS_BRBCR);
176 }
177 
178 static void __debug_restore_brbe(void)
179 {
180 	u64 brbcr_el1 = *host_data_ptr(host_debug_state.brbcr_el1);
181 
182 	if (!brbcr_el1)
183 		return;
184 
185 	/* Restore BRBE controls */
186 	write_sysreg_el1(brbcr_el1, SYS_BRBCR);
187 }
188 
189 void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
190 {
191 	/* Disable and flush SPE data generation */
192 	if (host_data_test_flag(HAS_SPE))
193 		__debug_save_spe();
194 
195 	/* Disable BRBE branch records */
196 	if (host_data_test_flag(HAS_BRBE))
197 		__debug_save_brbe();
198 
199 	if (__trace_needs_switch())
200 		__trace_switch_to_guest();
201 }
202 
203 void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
204 {
205 	__debug_switch_to_guest_common(vcpu);
206 }
207 
208 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
209 {
210 	if (host_data_test_flag(HAS_SPE))
211 		__debug_restore_spe();
212 	if (host_data_test_flag(HAS_BRBE))
213 		__debug_restore_brbe();
214 	if (__trace_needs_switch())
215 		__trace_switch_to_host();
216 }
217 
218 void __debug_switch_to_host(struct kvm_vcpu *vcpu)
219 {
220 	__debug_switch_to_host_common(vcpu);
221 }
222