xref: /linux/arch/x86/include/asm/debugreg.h (revision cc69ac7a65820dd96c48fd2988255f8acc2527f2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_DEBUGREG_H
3 #define _ASM_X86_DEBUGREG_H
4 
5 #include <linux/bug.h>
6 #include <linux/percpu.h>
7 #include <uapi/asm/debugreg.h>
8 
9 #include <asm/cpufeature.h>
10 #include <asm/msr.h>
11 
12 /*
13  * Define bits that are always set to 1 in DR7, only bit 10 is
14  * architecturally reserved to '1'.
15  *
16  * This is also the init/reset value for DR7.
17  */
18 #define DR7_FIXED_1	0x00000400
19 
20 DECLARE_PER_CPU(unsigned long, cpu_dr7);
21 
22 #ifndef CONFIG_PARAVIRT_XXL
23 /*
24  * These special macros can be used to get or set a debugging register
25  */
26 #define get_debugreg(var, register)				\
27 	(var) = native_get_debugreg(register)
28 #define set_debugreg(value, register)				\
29 	native_set_debugreg(register, value)
30 #endif
31 
native_get_debugreg(int regno)32 static __always_inline unsigned long native_get_debugreg(int regno)
33 {
34 	unsigned long val;
35 
36 	switch (regno) {
37 	case 0:
38 		asm("mov %%db0, %0" :"=r" (val));
39 		break;
40 	case 1:
41 		asm("mov %%db1, %0" :"=r" (val));
42 		break;
43 	case 2:
44 		asm("mov %%db2, %0" :"=r" (val));
45 		break;
46 	case 3:
47 		asm("mov %%db3, %0" :"=r" (val));
48 		break;
49 	case 6:
50 		asm("mov %%db6, %0" :"=r" (val));
51 		break;
52 	case 7:
53 		/*
54 		 * Use "asm volatile" for DR7 reads to forbid re-ordering them
55 		 * with other code.
56 		 *
57 		 * This is needed because a DR7 access can cause a #VC exception
58 		 * when running under SEV-ES. Taking a #VC exception is not a
59 		 * safe thing to do just anywhere in the entry code and
60 		 * re-ordering might place the access into an unsafe location.
61 		 *
62 		 * This happened in the NMI handler, where the DR7 read was
63 		 * re-ordered to happen before the call to sev_es_ist_enter(),
64 		 * causing stack recursion.
65 		 */
66 		asm volatile("mov %%db7, %0" : "=r" (val));
67 		break;
68 	default:
69 		BUG();
70 	}
71 	return val;
72 }
73 
native_set_debugreg(int regno,unsigned long value)74 static __always_inline void native_set_debugreg(int regno, unsigned long value)
75 {
76 	switch (regno) {
77 	case 0:
78 		asm("mov %0, %%db0"	::"r" (value));
79 		break;
80 	case 1:
81 		asm("mov %0, %%db1"	::"r" (value));
82 		break;
83 	case 2:
84 		asm("mov %0, %%db2"	::"r" (value));
85 		break;
86 	case 3:
87 		asm("mov %0, %%db3"	::"r" (value));
88 		break;
89 	case 6:
90 		asm("mov %0, %%db6"	::"r" (value));
91 		break;
92 	case 7:
93 		/*
94 		 * Use "asm volatile" for DR7 writes to forbid re-ordering them
95 		 * with other code.
96 		 *
97 		 * While is didn't happen with a DR7 write (see the DR7 read
98 		 * comment above which explains where it happened), add the
99 		 * "asm volatile" here too to avoid similar problems in the
100 		 * future.
101 		 */
102 		asm volatile("mov %0, %%db7"	::"r" (value));
103 		break;
104 	default:
105 		BUG();
106 	}
107 }
108 
hw_breakpoint_disable(void)109 static inline void hw_breakpoint_disable(void)
110 {
111 	/* Reset the control register for HW Breakpoint */
112 	set_debugreg(DR7_FIXED_1, 7);
113 
114 	/* Zero-out the individual HW breakpoint address registers */
115 	set_debugreg(0UL, 0);
116 	set_debugreg(0UL, 1);
117 	set_debugreg(0UL, 2);
118 	set_debugreg(0UL, 3);
119 }
120 
hw_breakpoint_active(void)121 static __always_inline bool hw_breakpoint_active(void)
122 {
123 	return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
124 }
125 
126 extern void hw_breakpoint_restore(void);
127 
local_db_save(void)128 static __always_inline unsigned long local_db_save(void)
129 {
130 	unsigned long dr7;
131 
132 	if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
133 		return 0;
134 
135 	get_debugreg(dr7, 7);
136 
137 	/* Architecturally set bit */
138 	dr7 &= ~DR7_FIXED_1;
139 	if (dr7)
140 		set_debugreg(DR7_FIXED_1, 7);
141 
142 	/*
143 	 * Ensure the compiler doesn't lower the above statements into
144 	 * the critical section; disabling breakpoints late would not
145 	 * be good.
146 	 */
147 	barrier();
148 
149 	return dr7;
150 }
151 
local_db_restore(unsigned long dr7)152 static __always_inline void local_db_restore(unsigned long dr7)
153 {
154 	/*
155 	 * Ensure the compiler doesn't raise this statement into
156 	 * the critical section; enabling breakpoints early would
157 	 * not be good.
158 	 */
159 	barrier();
160 	if (dr7)
161 		set_debugreg(dr7, 7);
162 }
163 
164 #ifdef CONFIG_CPU_SUP_AMD
165 extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
166 extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
167 #else
amd_set_dr_addr_mask(unsigned long mask,unsigned int dr)168 static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
amd_get_dr_addr_mask(unsigned int dr)169 static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
170 {
171 	return 0;
172 }
173 #endif
174 
get_debugctlmsr(void)175 static inline unsigned long get_debugctlmsr(void)
176 {
177 	unsigned long debugctlmsr = 0;
178 
179 #ifndef CONFIG_X86_DEBUGCTLMSR
180 	if (boot_cpu_data.x86 < 6)
181 		return 0;
182 #endif
183 	rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
184 
185 	return debugctlmsr;
186 }
187 
update_debugctlmsr(unsigned long debugctlmsr)188 static inline void update_debugctlmsr(unsigned long debugctlmsr)
189 {
190 #ifndef CONFIG_X86_DEBUGCTLMSR
191 	if (boot_cpu_data.x86 < 6)
192 		return;
193 #endif
194 	wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
195 }
196 
197 #endif /* _ASM_X86_DEBUGREG_H */
198