1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Debug and Guest Debug support
4 *
5 * Copyright (C) 2015 - Linaro Ltd
6 * Authors: Alex Bennée <alex.bennee@linaro.org>
7 * Oliver Upton <oliver.upton@linux.dev>
8 */
9
10 #include <linux/kvm_host.h>
11 #include <linux/hw_breakpoint.h>
12
13 #include <asm/debug-monitors.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_emulate.h>
17
18 /**
19 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
20 *
21 * @vcpu: the vcpu pointer
22 *
23 * This ensures we will trap access to:
24 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
25 * - Debug ROM Address (MDCR_EL2_TDRA)
26 * - OS related registers (MDCR_EL2_TDOSA)
27 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
28 * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
29 * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
30 */
kvm_arm_setup_mdcr_el2(struct kvm_vcpu * vcpu)31 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
32 {
33 preempt_disable();
34
35 /*
36 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
37 * to disable guest access to the profiling and trace buffers
38 */
39 vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN,
40 *host_data_ptr(nr_event_counters));
41 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
42 MDCR_EL2_TPMS |
43 MDCR_EL2_TTRF |
44 MDCR_EL2_TPMCR |
45 MDCR_EL2_TDRA |
46 MDCR_EL2_TDOSA);
47
48 /* Is the VM being debugged by userspace? */
49 if (vcpu->guest_debug)
50 /* Route all software debug exceptions to EL2 */
51 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
52
53 /*
54 * Trap debug registers if the guest doesn't have ownership of them.
55 */
56 if (!kvm_guest_owns_debug_regs(vcpu))
57 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
58
59 /* Write MDCR_EL2 directly if we're already at EL2 */
60 if (has_vhe())
61 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
62
63 preempt_enable();
64 }
65
kvm_init_host_debug_data(void)66 void kvm_init_host_debug_data(void)
67 {
68 u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
69
70 if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0)
71 *host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N,
72 read_sysreg(pmcr_el0));
73
74 *host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
75 *host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
76
77 if (has_vhe())
78 return;
79
80 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
81 !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
82 host_data_set_flag(HAS_SPE);
83
84 /* Check if we have BRBE implemented and available at the host */
85 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
86 host_data_set_flag(HAS_BRBE);
87
88 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
89 /* Force disable trace in protected mode in case of no TRBE */
90 if (is_protected_kvm_enabled())
91 host_data_set_flag(EL1_TRACING_CONFIGURED);
92
93 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
94 !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
95 host_data_set_flag(HAS_TRBE);
96 }
97 }
98
kvm_debug_init_vhe(void)99 void kvm_debug_init_vhe(void)
100 {
101 /* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
102 if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
103 write_sysreg_el1(0, SYS_PMSCR);
104 }
105
106 /*
107 * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
108 * has taken over MDSCR_EL1.
109 *
110 * - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1.
111 *
112 * - Userspace is using the breakpoint/watchpoint registers to debug the
113 * guest, and MDSCR_EL1.MDE is forced to 1.
114 *
115 * - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0,
116 * masking all debug exceptions affected by the OS Lock.
117 */
setup_external_mdscr(struct kvm_vcpu * vcpu)118 static void setup_external_mdscr(struct kvm_vcpu *vcpu)
119 {
120 /*
121 * Use the guest's MDSCR_EL1 as a starting point, since there are
122 * several other features controlled by MDSCR_EL1 that are not relevant
123 * to the host.
124 *
125 * Clear the bits that KVM may use which also satisfies emulation of
126 * the OS Lock as MDSCR_EL1.MDE is cleared.
127 */
128 u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS |
129 MDSCR_EL1_MDE |
130 MDSCR_EL1_KDE);
131
132 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
133 mdscr |= MDSCR_EL1_SS;
134
135 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
136 mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE;
137
138 vcpu->arch.external_mdscr_el1 = mdscr;
139 }
140
kvm_vcpu_load_debug(struct kvm_vcpu * vcpu)141 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
142 {
143 u64 mdscr;
144
145 /* Must be called before kvm_vcpu_load_vhe() */
146 KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
147
148 if (has_vhe())
149 *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
150
151 /*
152 * Determine which of the possible debug states we're in:
153 *
154 * - VCPU_DEBUG_HOST_OWNED: KVM has taken ownership of the guest's
155 * breakpoint/watchpoint registers, or needs to use MDSCR_EL1 to do
156 * software step or emulate the effects of the OS Lock being enabled.
157 *
158 * - VCPU_DEBUG_GUEST_OWNED: The guest has debug exceptions enabled, and
159 * the breakpoint/watchpoint registers need to be loaded eagerly.
160 *
161 * - VCPU_DEBUG_FREE: Neither of the above apply, no breakpoint/watchpoint
162 * context needs to be loaded on the CPU.
163 */
164 if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
165 vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED;
166 setup_external_mdscr(vcpu);
167
168 /*
169 * Steal the guest's single-step state machine if userspace wants
170 * single-step the guest.
171 */
172 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
173 if (*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
174 vcpu_clear_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
175 else
176 vcpu_set_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
177
178 if (!vcpu_get_flag(vcpu, HOST_SS_ACTIVE_PENDING))
179 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
180 else
181 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
182 }
183 } else {
184 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
185
186 if (mdscr & (MDSCR_EL1_KDE | MDSCR_EL1_MDE))
187 vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
188 else
189 vcpu->arch.debug_owner = VCPU_DEBUG_FREE;
190 }
191
192 kvm_arm_setup_mdcr_el2(vcpu);
193 }
194
kvm_vcpu_put_debug(struct kvm_vcpu * vcpu)195 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
196 {
197 if (has_vhe())
198 write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
199
200 if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
201 return;
202
203 /*
204 * Save the host's software step state and restore the guest's before
205 * potentially returning to userspace.
206 */
207 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
208 vcpu_set_flag(vcpu, HOST_SS_ACTIVE_PENDING);
209 else
210 vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING);
211
212 if (vcpu_get_flag(vcpu, GUEST_SS_ACTIVE_PENDING))
213 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
214 else
215 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
216 }
217
218 /*
219 * Updates ownership of the debug registers after a trapped guest access to a
220 * breakpoint/watchpoint register. Host ownership of the debug registers is of
221 * strictly higher priority, and it is the responsibility of the VMM to emulate
222 * guest debug exceptions in this configuration.
223 */
kvm_debug_set_guest_ownership(struct kvm_vcpu * vcpu)224 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
225 {
226 if (kvm_host_owns_debug_regs(vcpu))
227 return;
228
229 vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
230 kvm_arm_setup_mdcr_el2(vcpu);
231 }
232
kvm_debug_handle_oslar(struct kvm_vcpu * vcpu,u64 val)233 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
234 {
235 if (val & OSLAR_EL1_OSLK)
236 __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
237 else
238 __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
239
240 preempt_disable();
241 kvm_arch_vcpu_put(vcpu);
242 kvm_arch_vcpu_load(vcpu, smp_processor_id());
243 preempt_enable();
244 }
245
kvm_enable_trbe(void)246 void kvm_enable_trbe(void)
247 {
248 if (has_vhe() || is_protected_kvm_enabled() ||
249 WARN_ON_ONCE(preemptible()))
250 return;
251
252 host_data_set_flag(TRBE_ENABLED);
253 }
254 EXPORT_SYMBOL_GPL(kvm_enable_trbe);
255
kvm_disable_trbe(void)256 void kvm_disable_trbe(void)
257 {
258 if (has_vhe() || is_protected_kvm_enabled() ||
259 WARN_ON_ONCE(preemptible()))
260 return;
261
262 host_data_clear_flag(TRBE_ENABLED);
263 }
264 EXPORT_SYMBOL_GPL(kvm_disable_trbe);
265
kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)266 void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)
267 {
268 if (is_protected_kvm_enabled() || WARN_ON_ONCE(preemptible()))
269 return;
270
271 if (has_vhe()) {
272 write_sysreg_s(trfcr_while_in_guest, SYS_TRFCR_EL12);
273 return;
274 }
275
276 *host_data_ptr(trfcr_while_in_guest) = trfcr_while_in_guest;
277 if (read_sysreg_s(SYS_TRFCR_EL1) != trfcr_while_in_guest)
278 host_data_set_flag(EL1_TRACING_CONFIGURED);
279 else
280 host_data_clear_flag(EL1_TRACING_CONFIGURED);
281 }
282 EXPORT_SYMBOL_GPL(kvm_tracing_set_el1_configuration);
283