xref: /linux/arch/x86/include/asm/debugreg.h (revision eed4edda910fe34dfae8c6bfbcf57f4593a54295)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_DEBUGREG_H
3 #define _ASM_X86_DEBUGREG_H
4 
5 #include <linux/bug.h>
6 #include <linux/percpu.h>
7 #include <uapi/asm/debugreg.h>
8 #include <asm/cpufeature.h>
9 
10 DECLARE_PER_CPU(unsigned long, cpu_dr7);
11 
12 #ifndef CONFIG_PARAVIRT_XXL
13 /*
14  * These special macros can be used to get or set a debugging register
15  */
16 #define get_debugreg(var, register)				\
17 	(var) = native_get_debugreg(register)
18 #define set_debugreg(value, register)				\
19 	native_set_debugreg(register, value)
20 #endif
21 
22 static __always_inline unsigned long native_get_debugreg(int regno)
23 {
24 	unsigned long val = 0;	/* Damn you, gcc! */
25 
26 	switch (regno) {
27 	case 0:
28 		asm("mov %%db0, %0" :"=r" (val));
29 		break;
30 	case 1:
31 		asm("mov %%db1, %0" :"=r" (val));
32 		break;
33 	case 2:
34 		asm("mov %%db2, %0" :"=r" (val));
35 		break;
36 	case 3:
37 		asm("mov %%db3, %0" :"=r" (val));
38 		break;
39 	case 6:
40 		asm("mov %%db6, %0" :"=r" (val));
41 		break;
42 	case 7:
43 		/*
44 		 * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
45 		 * with other code.
46 		 *
47 		 * This is needed because a DR7 access can cause a #VC exception
48 		 * when running under SEV-ES. Taking a #VC exception is not a
49 		 * safe thing to do just anywhere in the entry code and
50 		 * re-ordering might place the access into an unsafe location.
51 		 *
52 		 * This happened in the NMI handler, where the DR7 read was
53 		 * re-ordered to happen before the call to sev_es_ist_enter(),
54 		 * causing stack recursion.
55 		 */
56 		asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
57 		break;
58 	default:
59 		BUG();
60 	}
61 	return val;
62 }
63 
64 static __always_inline void native_set_debugreg(int regno, unsigned long value)
65 {
66 	switch (regno) {
67 	case 0:
68 		asm("mov %0, %%db0"	::"r" (value));
69 		break;
70 	case 1:
71 		asm("mov %0, %%db1"	::"r" (value));
72 		break;
73 	case 2:
74 		asm("mov %0, %%db2"	::"r" (value));
75 		break;
76 	case 3:
77 		asm("mov %0, %%db3"	::"r" (value));
78 		break;
79 	case 6:
80 		asm("mov %0, %%db6"	::"r" (value));
81 		break;
82 	case 7:
83 		/*
84 		 * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
85 		 * with other code.
86 		 *
87 		 * While is didn't happen with a DR7 write (see the DR7 read
88 		 * comment above which explains where it happened), add the
89 		 * __FORCE_ORDER here too to avoid similar problems in the
90 		 * future.
91 		 */
92 		asm volatile("mov %0, %%db7"	::"r" (value), __FORCE_ORDER);
93 		break;
94 	default:
95 		BUG();
96 	}
97 }
98 
99 static inline void hw_breakpoint_disable(void)
100 {
101 	/* Zero the control register for HW Breakpoint */
102 	set_debugreg(0UL, 7);
103 
104 	/* Zero-out the individual HW breakpoint address registers */
105 	set_debugreg(0UL, 0);
106 	set_debugreg(0UL, 1);
107 	set_debugreg(0UL, 2);
108 	set_debugreg(0UL, 3);
109 }
110 
111 static __always_inline bool hw_breakpoint_active(void)
112 {
113 	return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
114 }
115 
116 extern void hw_breakpoint_restore(void);
117 
118 static __always_inline unsigned long local_db_save(void)
119 {
120 	unsigned long dr7;
121 
122 	if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
123 		return 0;
124 
125 	get_debugreg(dr7, 7);
126 	dr7 &= ~0x400; /* architecturally set bit */
127 	if (dr7)
128 		set_debugreg(0, 7);
129 	/*
130 	 * Ensure the compiler doesn't lower the above statements into
131 	 * the critical section; disabling breakpoints late would not
132 	 * be good.
133 	 */
134 	barrier();
135 
136 	return dr7;
137 }
138 
139 static __always_inline void local_db_restore(unsigned long dr7)
140 {
141 	/*
142 	 * Ensure the compiler doesn't raise this statement into
143 	 * the critical section; enabling breakpoints early would
144 	 * not be good.
145 	 */
146 	barrier();
147 	if (dr7)
148 		set_debugreg(dr7, 7);
149 }
150 
151 #ifdef CONFIG_CPU_SUP_AMD
152 extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
153 extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
154 #else
155 static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
156 static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
157 {
158 	return 0;
159 }
160 #endif
161 
162 #endif /* _ASM_X86_DEBUGREG_H */
163