xref: /linux/arch/arm64/kvm/debug.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug and Guest Debug support
4  *
5  * Copyright (C) 2015 - Linaro Ltd
6  * Authors: Alex Bennée <alex.bennee@linaro.org>
7  * 	    Oliver Upton <oliver.upton@linux.dev>
8  */
9 
10 #include <linux/kvm_host.h>
11 #include <linux/hw_breakpoint.h>
12 
13 #include <asm/debug-monitors.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_emulate.h>
17 
18 static int cpu_has_spe(u64 dfr0)
19 {
20 	return cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
21 	       !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P);
22 }
23 
24 /**
25  * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
26  *
27  * @vcpu:	the vcpu pointer
28  *
29  * This ensures we will trap access to:
30  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
31  *  - Debug ROM Address (MDCR_EL2_TDRA)
32  *  - OS related registers (MDCR_EL2_TDOSA)
33  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
34  *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
35  *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
36  */
37 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
38 {
39 	preempt_disable();
40 
41 	/*
42 	 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
43 	 * to disable guest access to the profiling and trace buffers
44 	 */
45 	vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN,
46 					 *host_data_ptr(nr_event_counters));
47 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
48 				MDCR_EL2_TPMS |
49 				MDCR_EL2_TTRF |
50 				MDCR_EL2_TPMCR |
51 				MDCR_EL2_TDRA |
52 				MDCR_EL2_TDOSA);
53 
54 	/* Is the VM being debugged by userspace? */
55 	if (vcpu->guest_debug)
56 		/* Route all software debug exceptions to EL2 */
57 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
58 
59 	/*
60 	 * Trap debug registers if the guest doesn't have ownership of them.
61 	 */
62 	if (!kvm_guest_owns_debug_regs(vcpu))
63 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
64 
65 	if (vcpu_has_nv(vcpu))
66 		kvm_nested_setup_mdcr_el2(vcpu);
67 
68 	/* Write MDCR_EL2 directly if we're already at EL2 */
69 	if (has_vhe())
70 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
71 
72 	preempt_enable();
73 }
74 
75 void kvm_init_host_debug_data(void)
76 {
77 	u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
78 
79 	if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0)
80 		*host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N,
81 							      read_sysreg(pmcr_el0));
82 
83 	*host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
84 	*host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
85 
86 	if (cpu_has_spe(dfr0))
87 		host_data_set_flag(HAS_SPE);
88 
89 	if (has_vhe())
90 		return;
91 
92 	/* Check if we have BRBE implemented and available at the host */
93 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
94 		host_data_set_flag(HAS_BRBE);
95 
96 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
97 		/* Force disable trace in protected mode in case of no TRBE */
98 		if (is_protected_kvm_enabled())
99 			host_data_set_flag(EL1_TRACING_CONFIGURED);
100 
101 		if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
102 		    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
103 			host_data_set_flag(HAS_TRBE);
104 	}
105 }
106 
107 void kvm_debug_init_vhe(void)
108 {
109 	/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
110 	if (host_data_test_flag(HAS_SPE))
111 		write_sysreg_el1(0, SYS_PMSCR);
112 }
113 
114 /*
115  * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
116  * has taken over MDSCR_EL1.
117  *
118  *  - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1.
119  *
120  *  - Userspace is using the breakpoint/watchpoint registers to debug the
121  *    guest, and MDSCR_EL1.MDE is forced to 1.
122  *
123  *  - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0,
124  *    masking all debug exceptions affected by the OS Lock.
125  */
126 static void setup_external_mdscr(struct kvm_vcpu *vcpu)
127 {
128 	/*
129 	 * Use the guest's MDSCR_EL1 as a starting point, since there are
130 	 * several other features controlled by MDSCR_EL1 that are not relevant
131 	 * to the host.
132 	 *
133 	 * Clear the bits that KVM may use which also satisfies emulation of
134 	 * the OS Lock as MDSCR_EL1.MDE is cleared.
135 	 */
136 	u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS |
137 							   MDSCR_EL1_MDE |
138 							   MDSCR_EL1_KDE);
139 
140 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
141 		mdscr |= MDSCR_EL1_SS;
142 
143 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
144 		mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE;
145 
146 	vcpu->arch.external_mdscr_el1 = mdscr;
147 }
148 
149 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
150 {
151 	u64 mdscr;
152 
153 	/* Must be called before kvm_vcpu_load_vhe() */
154 	KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
155 
156 	if (has_vhe())
157 		*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
158 
159 	/*
160 	 * Determine which of the possible debug states we're in:
161 	 *
162 	 *  - VCPU_DEBUG_HOST_OWNED: KVM has taken ownership of the guest's
163 	 *    breakpoint/watchpoint registers, or needs to use MDSCR_EL1 to do
164 	 *    software step or emulate the effects of the OS Lock being enabled.
165 	 *
166 	 *  - VCPU_DEBUG_GUEST_OWNED: The guest has debug exceptions enabled, and
167 	 *    the breakpoint/watchpoint registers need to be loaded eagerly.
168 	 *
169 	 *  - VCPU_DEBUG_FREE: Neither of the above apply, no breakpoint/watchpoint
170 	 *    context needs to be loaded on the CPU.
171 	 */
172 	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
173 		vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED;
174 		setup_external_mdscr(vcpu);
175 
176 		/*
177 		 * Steal the guest's single-step state machine if userspace wants
178 		 * single-step the guest.
179 		 */
180 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
181 			if (*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
182 				vcpu_clear_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
183 			else
184 				vcpu_set_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
185 
186 			if (!vcpu_get_flag(vcpu, HOST_SS_ACTIVE_PENDING))
187 				*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
188 			else
189 				*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
190 		}
191 	} else {
192 		mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
193 
194 		if (mdscr & (MDSCR_EL1_KDE | MDSCR_EL1_MDE))
195 			vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
196 		else
197 			vcpu->arch.debug_owner = VCPU_DEBUG_FREE;
198 	}
199 
200 	kvm_arm_setup_mdcr_el2(vcpu);
201 }
202 
203 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
204 {
205 	if (has_vhe())
206 		write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
207 
208 	if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
209 		return;
210 
211 	/*
212 	 * Save the host's software step state and restore the guest's before
213 	 * potentially returning to userspace.
214 	 */
215 	if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
216 		vcpu_set_flag(vcpu, HOST_SS_ACTIVE_PENDING);
217 	else
218 		vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING);
219 
220 	if (vcpu_get_flag(vcpu, GUEST_SS_ACTIVE_PENDING))
221 		*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
222 	else
223 		*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
224 }
225 
226 /*
227  * Updates ownership of the debug registers after a trapped guest access to a
228  * breakpoint/watchpoint register. Host ownership of the debug registers is of
229  * strictly higher priority, and it is the responsibility of the VMM to emulate
230  * guest debug exceptions in this configuration.
231  */
232 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
233 {
234 	if (kvm_host_owns_debug_regs(vcpu))
235 		return;
236 
237 	vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
238 	kvm_arm_setup_mdcr_el2(vcpu);
239 }
240 
241 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
242 {
243 	if (val & OSLAR_EL1_OSLK)
244 		__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
245 	else
246 		__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
247 
248 	preempt_disable();
249 	kvm_arch_vcpu_put(vcpu);
250 	kvm_arch_vcpu_load(vcpu, smp_processor_id());
251 	preempt_enable();
252 }
253 
254 static bool skip_trbe_access(bool skip_condition)
255 {
256 	return (WARN_ON_ONCE(preemptible()) || skip_condition ||
257 		is_protected_kvm_enabled() || !is_kvm_arm_initialised());
258 }
259 
260 void kvm_enable_trbe(void)
261 {
262 	if (!skip_trbe_access(has_vhe()))
263 		host_data_set_flag(TRBE_ENABLED);
264 }
265 EXPORT_SYMBOL_GPL(kvm_enable_trbe);
266 
267 void kvm_disable_trbe(void)
268 {
269 	if (!skip_trbe_access(has_vhe()))
270 		host_data_clear_flag(TRBE_ENABLED);
271 }
272 EXPORT_SYMBOL_GPL(kvm_disable_trbe);
273 
274 void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)
275 {
276 	if (skip_trbe_access(false))
277 		return;
278 
279 	if (has_vhe()) {
280 		write_sysreg_s(trfcr_while_in_guest, SYS_TRFCR_EL12);
281 		return;
282 	}
283 
284 	*host_data_ptr(trfcr_while_in_guest) = trfcr_while_in_guest;
285 	if (read_sysreg_s(SYS_TRFCR_EL1) != trfcr_while_in_guest)
286 		host_data_set_flag(EL1_TRACING_CONFIGURED);
287 	else
288 		host_data_clear_flag(EL1_TRACING_CONFIGURED);
289 }
290 EXPORT_SYMBOL_GPL(kvm_tracing_set_el1_configuration);
291