xref: /linux/tools/testing/selftests/kvm/x86_64/debug_regs.c (revision 3efc57369a0ce8f76bf0804f7e673982384e4ac9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM guest debug register tests
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  */
7 #include <stdio.h>
8 #include <string.h>
9 #include "kvm_util.h"
10 #include "processor.h"
11 #include "apic.h"
12 
13 #define DR6_BD		(1 << 13)
14 #define DR7_GD		(1 << 13)
15 
16 #define IRQ_VECTOR 0xAA
17 
18 /* For testing data access debug BP */
19 uint32_t guest_value;
20 
21 extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start;
22 
guest_code(void)23 static void guest_code(void)
24 {
25 	/* Create a pending interrupt on current vCPU */
26 	x2apic_enable();
27 	x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT |
28 			 APIC_DM_FIXED | IRQ_VECTOR);
29 
30 	/*
31 	 * Software BP tests.
32 	 *
33 	 * NOTE: sw_bp need to be before the cmd here, because int3 is an
34 	 * exception rather than a normal trap for KVM_SET_GUEST_DEBUG (we
35 	 * capture it using the vcpu exception bitmap).
36 	 */
37 	asm volatile("sw_bp: int3");
38 
39 	/* Hardware instruction BP test */
40 	asm volatile("hw_bp: nop");
41 
42 	/* Hardware data BP test */
43 	asm volatile("mov $1234,%%rax;\n\t"
44 		     "mov %%rax,%0;\n\t write_data:"
45 		     : "=m" (guest_value) : : "rax");
46 
47 	/*
48 	 * Single step test, covers 2 basic instructions and 2 emulated
49 	 *
50 	 * Enable interrupts during the single stepping to see that pending
51 	 * interrupt we raised is not handled due to KVM_GUESTDBG_BLOCKIRQ.
52 	 *
53 	 * Write MSR_IA32_TSC_DEADLINE to verify that KVM's fastpath handler
54 	 * exits to userspace due to single-step being enabled.
55 	 */
56 	asm volatile("ss_start: "
57 		     "sti\n\t"
58 		     "xor %%eax,%%eax\n\t"
59 		     "cpuid\n\t"
60 		     "movl $" __stringify(MSR_IA32_TSC_DEADLINE) ", %%ecx\n\t"
61 		     "wrmsr\n\t"
62 		     "cli\n\t"
63 		     : : : "eax", "ebx", "ecx", "edx");
64 
65 	/* DR6.BD test */
66 	asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
67 	GUEST_DONE();
68 }
69 
70 #define  CAST_TO_RIP(v)  ((unsigned long long)&(v))
71 
vcpu_skip_insn(struct kvm_vcpu * vcpu,int insn_len)72 static void vcpu_skip_insn(struct kvm_vcpu *vcpu, int insn_len)
73 {
74 	struct kvm_regs regs;
75 
76 	vcpu_regs_get(vcpu, &regs);
77 	regs.rip += insn_len;
78 	vcpu_regs_set(vcpu, &regs);
79 }
80 
main(void)81 int main(void)
82 {
83 	struct kvm_guest_debug debug;
84 	unsigned long long target_dr6, target_rip;
85 	struct kvm_vcpu *vcpu;
86 	struct kvm_run *run;
87 	struct kvm_vm *vm;
88 	struct ucall uc;
89 	uint64_t cmd;
90 	int i;
91 	/* Instruction lengths starting at ss_start */
92 	int ss_size[6] = {
93 		1,		/* sti*/
94 		2,		/* xor */
95 		2,		/* cpuid */
96 		5,		/* mov */
97 		2,		/* rdmsr */
98 		1,		/* cli */
99 	};
100 
101 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG));
102 
103 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
104 	run = vcpu->run;
105 
106 	/* Test software BPs - int3 */
107 	memset(&debug, 0, sizeof(debug));
108 	debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
109 	vcpu_guest_debug_set(vcpu, &debug);
110 	vcpu_run(vcpu);
111 	TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
112 		    run->debug.arch.exception == BP_VECTOR &&
113 		    run->debug.arch.pc == CAST_TO_RIP(sw_bp),
114 		    "INT3: exit %d exception %d rip 0x%llx (should be 0x%llx)",
115 		    run->exit_reason, run->debug.arch.exception,
116 		    run->debug.arch.pc, CAST_TO_RIP(sw_bp));
117 	vcpu_skip_insn(vcpu, 1);
118 
119 	/* Test instruction HW BP over DR[0-3] */
120 	for (i = 0; i < 4; i++) {
121 		memset(&debug, 0, sizeof(debug));
122 		debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
123 		debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
124 		debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
125 		vcpu_guest_debug_set(vcpu, &debug);
126 		vcpu_run(vcpu);
127 		target_dr6 = 0xffff0ff0 | (1UL << i);
128 		TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
129 			    run->debug.arch.exception == DB_VECTOR &&
130 			    run->debug.arch.pc == CAST_TO_RIP(hw_bp) &&
131 			    run->debug.arch.dr6 == target_dr6,
132 			    "INS_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
133 			    "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
134 			    i, run->exit_reason, run->debug.arch.exception,
135 			    run->debug.arch.pc, CAST_TO_RIP(hw_bp),
136 			    run->debug.arch.dr6, target_dr6);
137 	}
138 	/* Skip "nop" */
139 	vcpu_skip_insn(vcpu, 1);
140 
141 	/* Test data access HW BP over DR[0-3] */
142 	for (i = 0; i < 4; i++) {
143 		memset(&debug, 0, sizeof(debug));
144 		debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
145 		debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
146 		debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
147 		    (0x000d0000UL << (4*i));
148 		vcpu_guest_debug_set(vcpu, &debug);
149 		vcpu_run(vcpu);
150 		target_dr6 = 0xffff0ff0 | (1UL << i);
151 		TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
152 			    run->debug.arch.exception == DB_VECTOR &&
153 			    run->debug.arch.pc == CAST_TO_RIP(write_data) &&
154 			    run->debug.arch.dr6 == target_dr6,
155 			    "DATA_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
156 			    "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
157 			    i, run->exit_reason, run->debug.arch.exception,
158 			    run->debug.arch.pc, CAST_TO_RIP(write_data),
159 			    run->debug.arch.dr6, target_dr6);
160 		/* Rollback the 4-bytes "mov" */
161 		vcpu_skip_insn(vcpu, -7);
162 	}
163 	/* Skip the 4-bytes "mov" */
164 	vcpu_skip_insn(vcpu, 7);
165 
166 	/* Test single step */
167 	target_rip = CAST_TO_RIP(ss_start);
168 	target_dr6 = 0xffff4ff0ULL;
169 	for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
170 		target_rip += ss_size[i];
171 		memset(&debug, 0, sizeof(debug));
172 		debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |
173 				KVM_GUESTDBG_BLOCKIRQ;
174 		debug.arch.debugreg[7] = 0x00000400;
175 		vcpu_guest_debug_set(vcpu, &debug);
176 		vcpu_run(vcpu);
177 		TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
178 			    run->debug.arch.exception == DB_VECTOR &&
179 			    run->debug.arch.pc == target_rip &&
180 			    run->debug.arch.dr6 == target_dr6,
181 			    "SINGLE_STEP[%d]: exit %d exception %d rip 0x%llx "
182 			    "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
183 			    i, run->exit_reason, run->debug.arch.exception,
184 			    run->debug.arch.pc, target_rip, run->debug.arch.dr6,
185 			    target_dr6);
186 	}
187 
188 	/* Finally test global disable */
189 	memset(&debug, 0, sizeof(debug));
190 	debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
191 	debug.arch.debugreg[7] = 0x400 | DR7_GD;
192 	vcpu_guest_debug_set(vcpu, &debug);
193 	vcpu_run(vcpu);
194 	target_dr6 = 0xffff0ff0 | DR6_BD;
195 	TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
196 		    run->debug.arch.exception == DB_VECTOR &&
197 		    run->debug.arch.pc == CAST_TO_RIP(bd_start) &&
198 		    run->debug.arch.dr6 == target_dr6,
199 			    "DR7.GD: exit %d exception %d rip 0x%llx "
200 			    "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
201 			    run->exit_reason, run->debug.arch.exception,
202 			    run->debug.arch.pc, target_rip, run->debug.arch.dr6,
203 			    target_dr6);
204 
205 	/* Disable all debug controls, run to the end */
206 	memset(&debug, 0, sizeof(debug));
207 	vcpu_guest_debug_set(vcpu, &debug);
208 
209 	vcpu_run(vcpu);
210 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
211 	cmd = get_ucall(vcpu, &uc);
212 	TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
213 
214 	kvm_vm_free(vm);
215 
216 	return 0;
217 }
218