xref: /linux/arch/arm64/kvm/debug.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug and Guest Debug support
4  *
5  * Copyright (C) 2015 - Linaro Ltd
6  * Authors: Alex Bennée <alex.bennee@linaro.org>
7  * 	    Oliver Upton <oliver.upton@linux.dev>
8  */
9 
10 #include <linux/kvm_host.h>
11 #include <linux/hw_breakpoint.h>
12 
13 #include <asm/arm_pmuv3.h>
14 #include <asm/debug-monitors.h>
15 #include <asm/kvm_asm.h>
16 #include <asm/kvm_arm.h>
17 #include <asm/kvm_emulate.h>
18 
19 static int cpu_has_spe(u64 dfr0)
20 {
21 	return cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
22 	       !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P);
23 }
24 
25 /**
26  * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
27  *
28  * @vcpu:	the vcpu pointer
29  *
30  * This ensures we will trap access to:
31  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
32  *  - Debug ROM Address (MDCR_EL2_TDRA)
33  *  - OS related registers (MDCR_EL2_TDOSA)
34  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
35  *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
36  *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
37  */
38 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
39 {
40 	preempt_disable();
41 
42 	/*
43 	 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
44 	 * to disable guest access to the profiling and trace buffers
45 	 */
46 	vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN,
47 					 *host_data_ptr(nr_event_counters));
48 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
49 				MDCR_EL2_TPMS |
50 				MDCR_EL2_TTRF |
51 				MDCR_EL2_TPMCR |
52 				MDCR_EL2_TDRA |
53 				MDCR_EL2_TDOSA);
54 
55 	/* Is the VM being debugged by userspace? */
56 	if (vcpu->guest_debug)
57 		/* Route all software debug exceptions to EL2 */
58 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
59 
60 	/*
61 	 * Trap debug registers if the guest doesn't have ownership of them.
62 	 */
63 	if (!kvm_guest_owns_debug_regs(vcpu))
64 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
65 
66 	if (vcpu_has_nv(vcpu))
67 		kvm_nested_setup_mdcr_el2(vcpu);
68 
69 	/* Write MDCR_EL2 directly if we're already at EL2 */
70 	if (has_vhe())
71 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
72 
73 	preempt_enable();
74 }
75 
76 void kvm_init_host_debug_data(void)
77 {
78 	u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
79 	unsigned int pmuver = cpuid_feature_extract_unsigned_field(dfr0,
80 								   ID_AA64DFR0_EL1_PMUVer_SHIFT);
81 
82 	if (pmuv3_implemented(pmuver))
83 		*host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N,
84 							      read_sysreg(pmcr_el0));
85 
86 	*host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
87 	*host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
88 
89 	if (cpu_has_spe(dfr0))
90 		host_data_set_flag(HAS_SPE);
91 
92 	if (has_vhe())
93 		return;
94 
95 	/* Check if we have BRBE implemented and available at the host */
96 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
97 		host_data_set_flag(HAS_BRBE);
98 
99 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
100 		/* Force disable trace in protected mode in case of no TRBE */
101 		if (is_protected_kvm_enabled())
102 			host_data_set_flag(EL1_TRACING_CONFIGURED);
103 
104 		if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
105 		    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
106 			host_data_set_flag(HAS_TRBE);
107 	}
108 }
109 
110 void kvm_debug_init_vhe(void)
111 {
112 	/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
113 	if (host_data_test_flag(HAS_SPE))
114 		write_sysreg_el1(0, SYS_PMSCR);
115 }
116 
117 /*
118  * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
119  * has taken over MDSCR_EL1.
120  *
121  *  - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1.
122  *
123  *  - Userspace is using the breakpoint/watchpoint registers to debug the
124  *    guest, and MDSCR_EL1.MDE is forced to 1.
125  *
126  *  - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0,
127  *    masking all debug exceptions affected by the OS Lock.
128  */
129 static void setup_external_mdscr(struct kvm_vcpu *vcpu)
130 {
131 	/*
132 	 * Use the guest's MDSCR_EL1 as a starting point, since there are
133 	 * several other features controlled by MDSCR_EL1 that are not relevant
134 	 * to the host.
135 	 *
136 	 * Clear the bits that KVM may use which also satisfies emulation of
137 	 * the OS Lock as MDSCR_EL1.MDE is cleared.
138 	 */
139 	u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS |
140 							   MDSCR_EL1_MDE |
141 							   MDSCR_EL1_KDE);
142 
143 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
144 		mdscr |= MDSCR_EL1_SS;
145 
146 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
147 		mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE;
148 
149 	vcpu->arch.external_mdscr_el1 = mdscr;
150 }
151 
152 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
153 {
154 	u64 mdscr;
155 
156 	/* Must be called before kvm_vcpu_load_vhe() */
157 	KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
158 
159 	if (has_vhe())
160 		*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
161 
162 	/*
163 	 * Determine which of the possible debug states we're in:
164 	 *
165 	 *  - VCPU_DEBUG_HOST_OWNED: KVM has taken ownership of the guest's
166 	 *    breakpoint/watchpoint registers, or needs to use MDSCR_EL1 to do
167 	 *    software step or emulate the effects of the OS Lock being enabled.
168 	 *
169 	 *  - VCPU_DEBUG_GUEST_OWNED: The guest has debug exceptions enabled, and
170 	 *    the breakpoint/watchpoint registers need to be loaded eagerly.
171 	 *
172 	 *  - VCPU_DEBUG_FREE: Neither of the above apply, no breakpoint/watchpoint
173 	 *    context needs to be loaded on the CPU.
174 	 */
175 	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
176 		vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED;
177 		setup_external_mdscr(vcpu);
178 
179 		/*
180 		 * Steal the guest's single-step state machine if userspace wants
181 		 * single-step the guest.
182 		 */
183 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
184 			if (*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
185 				vcpu_clear_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
186 			else
187 				vcpu_set_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
188 
189 			if (!vcpu_get_flag(vcpu, HOST_SS_ACTIVE_PENDING))
190 				*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
191 			else
192 				*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
193 		}
194 	} else {
195 		mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
196 
197 		if (mdscr & (MDSCR_EL1_KDE | MDSCR_EL1_MDE))
198 			vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
199 		else
200 			vcpu->arch.debug_owner = VCPU_DEBUG_FREE;
201 	}
202 
203 	kvm_arm_setup_mdcr_el2(vcpu);
204 }
205 
206 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
207 {
208 	if (has_vhe())
209 		write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
210 
211 	if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
212 		return;
213 
214 	/*
215 	 * Save the host's software step state and restore the guest's before
216 	 * potentially returning to userspace.
217 	 */
218 	if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
219 		vcpu_set_flag(vcpu, HOST_SS_ACTIVE_PENDING);
220 	else
221 		vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING);
222 
223 	if (vcpu_get_flag(vcpu, GUEST_SS_ACTIVE_PENDING))
224 		*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
225 	else
226 		*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
227 }
228 
229 /*
230  * Updates ownership of the debug registers after a trapped guest access to a
231  * breakpoint/watchpoint register. Host ownership of the debug registers is of
232  * strictly higher priority, and it is the responsibility of the VMM to emulate
233  * guest debug exceptions in this configuration.
234  */
235 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
236 {
237 	if (kvm_host_owns_debug_regs(vcpu))
238 		return;
239 
240 	vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
241 	kvm_arm_setup_mdcr_el2(vcpu);
242 }
243 
244 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
245 {
246 	if (val & OSLAR_EL1_OSLK)
247 		__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
248 	else
249 		__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
250 
251 	preempt_disable();
252 	kvm_arch_vcpu_put(vcpu);
253 	kvm_arch_vcpu_load(vcpu, smp_processor_id());
254 	preempt_enable();
255 }
256 
257 static bool skip_trbe_access(bool skip_condition)
258 {
259 	return (WARN_ON_ONCE(preemptible()) || skip_condition ||
260 		is_protected_kvm_enabled() || !is_kvm_arm_initialised());
261 }
262 
263 void kvm_enable_trbe(void)
264 {
265 	if (!skip_trbe_access(has_vhe()))
266 		host_data_set_flag(TRBE_ENABLED);
267 }
268 EXPORT_SYMBOL_GPL(kvm_enable_trbe);
269 
270 void kvm_disable_trbe(void)
271 {
272 	if (!skip_trbe_access(has_vhe()))
273 		host_data_clear_flag(TRBE_ENABLED);
274 }
275 EXPORT_SYMBOL_GPL(kvm_disable_trbe);
276 
277 void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)
278 {
279 	if (skip_trbe_access(false))
280 		return;
281 
282 	if (has_vhe()) {
283 		write_sysreg_s(trfcr_while_in_guest, SYS_TRFCR_EL12);
284 		return;
285 	}
286 
287 	*host_data_ptr(trfcr_while_in_guest) = trfcr_while_in_guest;
288 	if (read_sysreg_s(SYS_TRFCR_EL1) != trfcr_while_in_guest)
289 		host_data_set_flag(EL1_TRACING_CONFIGURED);
290 	else
291 		host_data_clear_flag(EL1_TRACING_CONFIGURED);
292 }
293 EXPORT_SYMBOL_GPL(kvm_tracing_set_el1_configuration);
294