xref: /freebsd/sys/amd64/include/pcpu.h (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) Peter Wemm <peter@netplex.com.au>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifdef __i386__
30 #include <i386/pcpu.h>
31 #else /* !__i386__ */
32 
33 #ifndef _MACHINE_PCPU_H_
34 #define	_MACHINE_PCPU_H_
35 
36 #include <machine/_pmap.h>
37 #include <machine/segments.h>
38 #include <machine/tss.h>
39 
40 #define	PC_PTI_STACK_SZ	16
41 
42 struct monitorbuf {
43 	int idle_state;		/* Used by cpu_idle_mwait. */
44 	int stop_state;		/* Used by cpustop_handler. */
45 	char padding[128 - (2 * sizeof(int))];
46 };
47 _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line");
48 
49 /*
50  * The SMP parts are setup in pmap.c and locore.s for the BSP, and
51  * mp_machdep.c sets up the data for the AP's to "see" when they awake.
52  * The reason for doing it via a struct is so that an array of pointers
53  * to each CPU's data can be set up for things like "check curproc on all
54  * other processors"
55  */
56 #define	PCPU_MD_FIELDS							\
57 	struct monitorbuf pc_monitorbuf __aligned(128);	/* cache line */\
58 	struct	pcpu *pc_prvspace;	/* Self-reference */		\
59 	struct	pmap *pc_curpmap;					\
60 	struct	amd64tss *pc_tssp;	/* TSS segment active on CPU */	\
61 	void	*pc_pad0;						\
62 	uint64_t pc_kcr3;						\
63 	uint64_t pc_ucr3;						\
64 	uint64_t pc_saved_ucr3;						\
65 	register_t pc_rsp0;						\
66 	register_t pc_scratch_rsp;	/* User %rsp in syscall */	\
67 	register_t pc_scratch_rax;					\
68 	u_int	pc_apic_id;						\
69 	u_int   pc_acpi_id;		/* ACPI CPU id */		\
70 	/* Pointer to the CPU %fs descriptor */				\
71 	struct user_segment_descriptor	*pc_fs32p;			\
72 	/* Pointer to the CPU %gs descriptor */				\
73 	struct user_segment_descriptor	*pc_gs32p;			\
74 	/* Pointer to the CPU LDT descriptor */				\
75 	struct system_segment_descriptor *pc_ldt;			\
76 	/* Pointer to the CPU TSS descriptor */				\
77 	struct system_segment_descriptor *pc_tss;			\
78 	u_int	pc_cmci_mask;		/* MCx banks for CMCI */	\
79 	uint64_t pc_dbreg[16];		/* ddb debugging regs */	\
80 	uint64_t pc_pti_stack[PC_PTI_STACK_SZ];				\
81 	register_t pc_pti_rsp0;						\
82 	int pc_dbreg_cmd;		/* ddb debugging reg cmd */	\
83 	u_int	pc_vcpu_id;		/* Xen vCPU ID */		\
84 	uint32_t pc_pcid_next;						\
85 	uint32_t pc_pcid_gen;						\
86 	uint32_t pc_unused;						\
87 	uint32_t pc_ibpb_set;						\
88 	void	*pc_mds_buf;						\
89 	void	*pc_mds_buf64;						\
90 	uint32_t pc_pad[4];						\
91 	uint8_t	pc_mds_tmp[64];						\
92 	u_int 	pc_ipi_bitmap;						\
93 	struct amd64tss pc_common_tss;					\
94 	struct user_segment_descriptor pc_gdt[NGDT];			\
95 	void	*pc_smp_tlb_pmap;					\
96 	uint64_t pc_smp_tlb_addr1;					\
97 	uint64_t pc_smp_tlb_addr2;					\
98 	uint32_t pc_smp_tlb_gen;					\
99 	u_int	pc_smp_tlb_op;						\
100 	uint64_t pc_ucr3_load_mask;					\
101 	u_int	pc_small_core;						\
102 	u_int	pc_pcid_invlpg_workaround;				\
103 	struct pmap_pcid pc_kpmap_store;				\
104 	char	__pad[2900]		/* pad to UMA_PCPU_ALLOC_SIZE */
105 
106 #define	PC_DBREG_CMD_NONE	0
107 #define	PC_DBREG_CMD_LOAD	1
108 
109 #ifdef _KERNEL
110 
111 #define MONITOR_STOPSTATE_RUNNING	0
112 #define MONITOR_STOPSTATE_STOPPED	1
113 
114 /*
115  * Evaluates to the type of the per-cpu variable name.
116  */
117 #define	__pcpu_type(name)						\
118 	__typeof(((struct pcpu *)0)->name)
119 
120 #ifdef __SEG_GS
121 #define	get_pcpu() __extension__ ({					\
122 	static struct pcpu __seg_gs *__pc = 0;				\
123 									\
124 	__pc->pc_prvspace;						\
125 })
126 
127 /*
128  * Evaluates to the address of the per-cpu variable name.
129  */
130 #define	__PCPU_PTR(name) __extension__ ({				\
131 	struct pcpu *__pc = get_pcpu();					\
132 									\
133 	&__pc->name;							\
134 })
135 
136 /*
137  * Evaluates to the value of the per-cpu variable name.
138  */
139 #define	__PCPU_GET(name) __extension__ ({				\
140 	static struct pcpu __seg_gs *__pc = 0;				\
141 									\
142 	__pc->name;							\
143 })
144 
145 /*
146  * Adds the value to the per-cpu counter name.  The implementation
147  * must be atomic with respect to interrupts.
148  */
149 #define	__PCPU_ADD(name, val) do {					\
150 	static struct pcpu __seg_gs *__pc = 0;				\
151 	__pcpu_type(name) __val;					\
152 									\
153 	__val = (val);							\
154 	if (sizeof(__val) == 1 || sizeof(__val) == 2 ||			\
155 	    sizeof(__val) == 4 || sizeof(__val) == 8) {			\
156 		__pc->name += __val;					\
157 	} else								\
158 		*__PCPU_PTR(name) += __val;				\
159 } while (0)
160 
161 /*
162  * Sets the value of the per-cpu variable name to value val.
163  */
164 #define	__PCPU_SET(name, val) do {					\
165 	static struct pcpu __seg_gs *__pc = 0;				\
166 	__pcpu_type(name) __val;					\
167 									\
168 	__val = (val);							\
169 	if (sizeof(__val) == 1 || sizeof(__val) == 2 ||			\
170 	    sizeof(__val) == 4 || sizeof(__val) == 8) {			\
171 		__pc->name = __val;					\
172 	} else								\
173 		*__PCPU_PTR(name) = __val;				\
174 } while (0)
175 #else /* !__SEG_GS */
176 /*
177  * Evaluates to the byte offset of the per-cpu variable name.
178  */
179 #define	__pcpu_offset(name)						\
180 	__offsetof(struct pcpu, name)
181 
182 /*
183  * Evaluates to the address of the per-cpu variable name.
184  */
185 #define	__PCPU_PTR(name)						\
186 	(&get_pcpu()->name)
187 
188 /*
189  * Evaluates to the value of the per-cpu variable name.
190  */
191 #define	__PCPU_GET(name) __extension__ ({				\
192 	__pcpu_type(name) __res;					\
193 	struct __s {							\
194 		u_char	__b[MIN(sizeof(__pcpu_type(name)), 8)];		\
195 	};								\
196 									\
197 	if (sizeof(__res) == 1 || sizeof(__res) == 2 ||			\
198 	    sizeof(__res) == 4 || sizeof(__res) == 8) {			\
199 		__asm __volatile("mov %%gs:%c1,%0"			\
200 		    : "=r" (*(struct __s *)(void *)&__res)		\
201 		    : "i" (__pcpu_offset(name)));			\
202 	} else {							\
203 		__res = *__PCPU_PTR(name);				\
204 	}								\
205 	__res;								\
206 })
207 
208 /*
209  * Adds the value to the per-cpu counter name.  The implementation
210  * must be atomic with respect to interrupts.
211  */
212 #define	__PCPU_ADD(name, val) do {					\
213 	__pcpu_type(name) __val;					\
214 	struct __s {							\
215 		u_char	__b[MIN(sizeof(__pcpu_type(name)), 8)];		\
216 	};								\
217 									\
218 	__val = (val);							\
219 	if (sizeof(__val) == 1 || sizeof(__val) == 2 ||			\
220 	    sizeof(__val) == 4 || sizeof(__val) == 8) {			\
221 		__asm __volatile("add %1,%%gs:%c0"			\
222 		    :							\
223 		    : "i" (__pcpu_offset(name)),			\
224 		      "r" (*(struct __s *)(void *)&__val)		\
225 		    : "cc", "memory");					\
226 	} else								\
227 		*__PCPU_PTR(name) += __val;				\
228 } while (0)
229 
230 /*
231  * Sets the value of the per-cpu variable name to value val.
232  */
233 #define	__PCPU_SET(name, val) do {					\
234 	__pcpu_type(name) __val;					\
235 	struct __s {							\
236 		u_char	__b[MIN(sizeof(__pcpu_type(name)), 8)];		\
237 	};								\
238 									\
239 	__val = (val);							\
240 	if (sizeof(__val) == 1 || sizeof(__val) == 2 ||			\
241 	    sizeof(__val) == 4 || sizeof(__val) == 8) {			\
242 		__asm __volatile("mov %1,%%gs:%c0"			\
243 		    :							\
244 		    : "i" (__pcpu_offset(name)),			\
245 		      "r" (*(struct __s *)(void *)&__val)		\
246 		    : "memory");					\
247 	} else {							\
248 		*__PCPU_PTR(name) = __val;				\
249 	}								\
250 } while (0)
251 
252 #define	get_pcpu() __extension__ ({					\
253 	struct pcpu *__pc;						\
254 									\
255 	__asm __volatile("movq %%gs:%c1,%0"				\
256 	    : "=r" (__pc)						\
257 	    : "i" (__pcpu_offset(pc_prvspace)));			\
258 	__pc;								\
259 })
260 #endif /* !__SEG_GS */
261 
262 #define	PCPU_GET(member)	__PCPU_GET(pc_ ## member)
263 #define	PCPU_ADD(member, val)	__PCPU_ADD(pc_ ## member, val)
264 #define	PCPU_PTR(member)	__PCPU_PTR(pc_ ## member)
265 #define	PCPU_SET(member, val)	__PCPU_SET(pc_ ## member, val)
266 
267 #define	IS_BSP()	(PCPU_GET(cpuid) == 0)
268 
269 #define zpcpu_offset_cpu(cpu)	((uintptr_t)&__pcpu[0] + UMA_PCPU_ALLOC_SIZE * cpu)
270 #define zpcpu_base_to_offset(base) (void *)((uintptr_t)(base) - (uintptr_t)&__pcpu[0])
271 #define zpcpu_offset_to_base(base) (void *)((uintptr_t)(base) + (uintptr_t)&__pcpu[0])
272 
273 #define zpcpu_sub_protected(base, n) do {				\
274 	ZPCPU_ASSERT_PROTECTED();					\
275 	zpcpu_sub(base, n);						\
276 } while (0)
277 
278 #define zpcpu_set_protected(base, n) do {				\
279 	__typeof(*base) __n = (n);					\
280 	ZPCPU_ASSERT_PROTECTED();					\
281 	switch (sizeof(*base)) {					\
282 	case 4:								\
283 		__asm __volatile("movl\t%1,%%gs:(%0)"			\
284 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
285 		break;							\
286 	case 8:								\
287 		__asm __volatile("movq\t%1,%%gs:(%0)"			\
288 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
289 		break;							\
290 	default:							\
291 		*zpcpu_get(base) = __n;					\
292 	}								\
293 } while (0);
294 
295 #define zpcpu_add(base, n) do {						\
296 	__typeof(*base) __n = (n);					\
297 	CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8);		\
298 	switch (sizeof(*base)) {					\
299 	case 4:								\
300 		__asm __volatile("addl\t%1,%%gs:(%0)"			\
301 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
302 		break;							\
303 	case 8:								\
304 		__asm __volatile("addq\t%1,%%gs:(%0)"			\
305 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
306 		break;							\
307 	}								\
308 } while (0)
309 
310 #define zpcpu_add_protected(base, n) do {				\
311 	ZPCPU_ASSERT_PROTECTED();					\
312 	zpcpu_add(base, n);						\
313 } while (0)
314 
315 #define zpcpu_sub(base, n) do {						\
316 	__typeof(*base) __n = (n);					\
317 	CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8);		\
318 	switch (sizeof(*base)) {					\
319 	case 4:								\
320 		__asm __volatile("subl\t%1,%%gs:(%0)"			\
321 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
322 		break;							\
323 	case 8:								\
324 		__asm __volatile("subq\t%1,%%gs:(%0)"			\
325 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
326 		break;							\
327 	}								\
328 } while (0);
329 
330 #endif /* _KERNEL */
331 
332 #endif /* !_MACHINE_PCPU_H_ */
333 
334 #endif /* __i386__ */
335