xref: /linux/arch/arm64/kvm/debug.c (revision 10fd0285305d0b48e8a3bf15d4f17fc4f3d68cb6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug and Guest Debug support
4  *
5  * Copyright (C) 2015 - Linaro Ltd
6  * Authors: Alex Bennée <alex.bennee@linaro.org>
7  * 	    Oliver Upton <oliver.upton@linux.dev>
8  */
9 
10 #include <linux/kvm_host.h>
11 #include <linux/hw_breakpoint.h>
12 
13 #include <asm/debug-monitors.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_emulate.h>
17 
18 /**
19  * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
20  *
21  * @vcpu:	the vcpu pointer
22  *
23  * This ensures we will trap access to:
24  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
25  *  - Debug ROM Address (MDCR_EL2_TDRA)
26  *  - OS related registers (MDCR_EL2_TDOSA)
27  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
28  *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
29  *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
30  */
31 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
32 {
33 	preempt_disable();
34 
35 	/*
36 	 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
37 	 * to disable guest access to the profiling and trace buffers
38 	 */
39 	vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN,
40 					 *host_data_ptr(nr_event_counters));
41 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
42 				MDCR_EL2_TPMS |
43 				MDCR_EL2_TTRF |
44 				MDCR_EL2_TPMCR |
45 				MDCR_EL2_TDRA |
46 				MDCR_EL2_TDOSA);
47 
48 	/* Is the VM being debugged by userspace? */
49 	if (vcpu->guest_debug)
50 		/* Route all software debug exceptions to EL2 */
51 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
52 
53 	/*
54 	 * Trap debug registers if the guest doesn't have ownership of them.
55 	 */
56 	if (!kvm_guest_owns_debug_regs(vcpu))
57 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
58 
59 	if (vcpu_has_nv(vcpu))
60 		kvm_nested_setup_mdcr_el2(vcpu);
61 
62 	/* Write MDCR_EL2 directly if we're already at EL2 */
63 	if (has_vhe())
64 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
65 
66 	preempt_enable();
67 }
68 
69 void kvm_init_host_debug_data(void)
70 {
71 	u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
72 
73 	if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0)
74 		*host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N,
75 							      read_sysreg(pmcr_el0));
76 
77 	*host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
78 	*host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
79 
80 	if (has_vhe())
81 		return;
82 
83 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
84 	    !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
85 		host_data_set_flag(HAS_SPE);
86 
87 	/* Check if we have BRBE implemented and available at the host */
88 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
89 		host_data_set_flag(HAS_BRBE);
90 
91 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
92 		/* Force disable trace in protected mode in case of no TRBE */
93 		if (is_protected_kvm_enabled())
94 			host_data_set_flag(EL1_TRACING_CONFIGURED);
95 
96 		if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
97 		    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
98 			host_data_set_flag(HAS_TRBE);
99 	}
100 }
101 
102 /*
103  * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
104  * has taken over MDSCR_EL1.
105  *
106  *  - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1.
107  *
108  *  - Userspace is using the breakpoint/watchpoint registers to debug the
109  *    guest, and MDSCR_EL1.MDE is forced to 1.
110  *
111  *  - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0,
112  *    masking all debug exceptions affected by the OS Lock.
113  */
114 static void setup_external_mdscr(struct kvm_vcpu *vcpu)
115 {
116 	/*
117 	 * Use the guest's MDSCR_EL1 as a starting point, since there are
118 	 * several other features controlled by MDSCR_EL1 that are not relevant
119 	 * to the host.
120 	 *
121 	 * Clear the bits that KVM may use which also satisfies emulation of
122 	 * the OS Lock as MDSCR_EL1.MDE is cleared.
123 	 */
124 	u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS |
125 							   MDSCR_EL1_MDE |
126 							   MDSCR_EL1_KDE);
127 
128 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
129 		mdscr |= MDSCR_EL1_SS;
130 
131 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
132 		mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE;
133 
134 	vcpu->arch.external_mdscr_el1 = mdscr;
135 }
136 
137 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
138 {
139 	u64 mdscr;
140 
141 	/* Must be called before kvm_vcpu_load_vhe() */
142 	KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
143 
144 	/*
145 	 * Determine which of the possible debug states we're in:
146 	 *
147 	 *  - VCPU_DEBUG_HOST_OWNED: KVM has taken ownership of the guest's
148 	 *    breakpoint/watchpoint registers, or needs to use MDSCR_EL1 to do
149 	 *    software step or emulate the effects of the OS Lock being enabled.
150 	 *
151 	 *  - VCPU_DEBUG_GUEST_OWNED: The guest has debug exceptions enabled, and
152 	 *    the breakpoint/watchpoint registers need to be loaded eagerly.
153 	 *
154 	 *  - VCPU_DEBUG_FREE: Neither of the above apply, no breakpoint/watchpoint
155 	 *    context needs to be loaded on the CPU.
156 	 */
157 	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
158 		vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED;
159 		setup_external_mdscr(vcpu);
160 
161 		/*
162 		 * Steal the guest's single-step state machine if userspace wants
163 		 * single-step the guest.
164 		 */
165 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
166 			if (*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
167 				vcpu_clear_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
168 			else
169 				vcpu_set_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
170 
171 			if (!vcpu_get_flag(vcpu, HOST_SS_ACTIVE_PENDING))
172 				*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
173 			else
174 				*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
175 		}
176 	} else {
177 		mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
178 
179 		if (mdscr & (MDSCR_EL1_KDE | MDSCR_EL1_MDE))
180 			vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
181 		else
182 			vcpu->arch.debug_owner = VCPU_DEBUG_FREE;
183 	}
184 
185 	kvm_arm_setup_mdcr_el2(vcpu);
186 }
187 
188 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
189 {
190 	if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
191 		return;
192 
193 	/*
194 	 * Save the host's software step state and restore the guest's before
195 	 * potentially returning to userspace.
196 	 */
197 	if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
198 		vcpu_set_flag(vcpu, HOST_SS_ACTIVE_PENDING);
199 	else
200 		vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING);
201 
202 	if (vcpu_get_flag(vcpu, GUEST_SS_ACTIVE_PENDING))
203 		*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
204 	else
205 		*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
206 }
207 
208 /*
209  * Updates ownership of the debug registers after a trapped guest access to a
210  * breakpoint/watchpoint register. Host ownership of the debug registers is of
211  * strictly higher priority, and it is the responsibility of the VMM to emulate
212  * guest debug exceptions in this configuration.
213  */
214 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
215 {
216 	if (kvm_host_owns_debug_regs(vcpu))
217 		return;
218 
219 	vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
220 	kvm_arm_setup_mdcr_el2(vcpu);
221 }
222 
223 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
224 {
225 	if (val & OSLAR_EL1_OSLK)
226 		__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
227 	else
228 		__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
229 
230 	preempt_disable();
231 	kvm_arch_vcpu_put(vcpu);
232 	kvm_arch_vcpu_load(vcpu, smp_processor_id());
233 	preempt_enable();
234 }
235 
236 static bool skip_trbe_access(bool skip_condition)
237 {
238 	return (WARN_ON_ONCE(preemptible()) || skip_condition ||
239 		is_protected_kvm_enabled() || !is_kvm_arm_initialised());
240 }
241 
242 void kvm_enable_trbe(void)
243 {
244 	if (!skip_trbe_access(has_vhe()))
245 		host_data_set_flag(TRBE_ENABLED);
246 }
247 EXPORT_SYMBOL_GPL(kvm_enable_trbe);
248 
249 void kvm_disable_trbe(void)
250 {
251 	if (!skip_trbe_access(has_vhe()))
252 		host_data_clear_flag(TRBE_ENABLED);
253 }
254 EXPORT_SYMBOL_GPL(kvm_disable_trbe);
255 
256 void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)
257 {
258 	if (skip_trbe_access(false))
259 		return;
260 
261 	if (has_vhe()) {
262 		write_sysreg_s(trfcr_while_in_guest, SYS_TRFCR_EL12);
263 		return;
264 	}
265 
266 	*host_data_ptr(trfcr_while_in_guest) = trfcr_while_in_guest;
267 	if (read_sysreg_s(SYS_TRFCR_EL1) != trfcr_while_in_guest)
268 		host_data_set_flag(EL1_TRACING_CONFIGURED);
269 	else
270 		host_data_clear_flag(EL1_TRACING_CONFIGURED);
271 }
272 EXPORT_SYMBOL_GPL(kvm_tracing_set_el1_configuration);
273