xref: /freebsd/sys/amd64/include/pcpu.h (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) Peter Wemm <peter@netplex.com.au>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef _MACHINE_PCPU_H_
32 #define	_MACHINE_PCPU_H_
33 
34 #ifndef _SYS_CDEFS_H_
35 #error "sys/cdefs.h is a prerequisite for this file"
36 #endif
37 
38 #include <machine/segments.h>
39 #include <machine/tss.h>
40 
41 #define	PC_PTI_STACK_SZ	16
42 
43 struct monitorbuf {
44 	int idle_state;		/* Used by cpu_idle_mwait. */
45 	int stop_state;		/* Used by cpustop_handler. */
46 	char padding[128 - (2 * sizeof(int))];
47 };
48 _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line");
49 
50 /*
51  * The SMP parts are setup in pmap.c and locore.s for the BSP, and
52  * mp_machdep.c sets up the data for the AP's to "see" when they awake.
53  * The reason for doing it via a struct is so that an array of pointers
54  * to each CPU's data can be set up for things like "check curproc on all
55  * other processors"
56  */
57 #define	PCPU_MD_FIELDS							\
58 	struct monitorbuf pc_monitorbuf __aligned(128);	/* cache line */\
59 	struct	pcpu *pc_prvspace;	/* Self-reference */		\
60 	struct	pmap *pc_curpmap;					\
61 	struct	amd64tss *pc_tssp;	/* TSS segment active on CPU */	\
62 	void	*pc_pad0;						\
63 	uint64_t pc_kcr3;						\
64 	uint64_t pc_ucr3;						\
65 	uint64_t pc_saved_ucr3;						\
66 	register_t pc_rsp0;						\
67 	register_t pc_scratch_rsp;	/* User %rsp in syscall */	\
68 	register_t pc_scratch_rax;					\
69 	u_int	pc_apic_id;						\
70 	u_int   pc_acpi_id;		/* ACPI CPU id */		\
71 	/* Pointer to the CPU %fs descriptor */				\
72 	struct user_segment_descriptor	*pc_fs32p;			\
73 	/* Pointer to the CPU %gs descriptor */				\
74 	struct user_segment_descriptor	*pc_gs32p;			\
75 	/* Pointer to the CPU LDT descriptor */				\
76 	struct system_segment_descriptor *pc_ldt;			\
77 	/* Pointer to the CPU TSS descriptor */				\
78 	struct system_segment_descriptor *pc_tss;			\
79 	uint64_t	pc_pm_save_cnt;					\
80 	u_int	pc_cmci_mask;		/* MCx banks for CMCI */	\
81 	uint64_t pc_dbreg[16];		/* ddb debugging regs */	\
82 	uint64_t pc_pti_stack[PC_PTI_STACK_SZ];				\
83 	register_t pc_pti_rsp0;						\
84 	int pc_dbreg_cmd;		/* ddb debugging reg cmd */	\
85 	u_int	pc_vcpu_id;		/* Xen vCPU ID */		\
86 	uint32_t pc_pcid_next;						\
87 	uint32_t pc_pcid_gen;						\
88 	uint32_t pc_unused;						\
89 	uint32_t pc_ibpb_set;						\
90 	void	*pc_mds_buf;						\
91 	void	*pc_mds_buf64;						\
92 	uint32_t pc_pad[2];						\
93 	uint8_t	pc_mds_tmp[64];						\
94 	u_int 	pc_ipi_bitmap;						\
95 	struct amd64tss pc_common_tss;					\
96 	struct user_segment_descriptor pc_gdt[NGDT];			\
97 	void	*pc_smp_tlb_pmap;					\
98 	uint64_t pc_smp_tlb_addr1;					\
99 	uint64_t pc_smp_tlb_addr2;					\
100 	uint32_t pc_smp_tlb_gen;					\
101 	u_int	pc_smp_tlb_op;						\
102 	uint64_t pc_ucr3_load_mask;					\
103 	char	__pad[2916]		/* pad to UMA_PCPU_ALLOC_SIZE */
104 
105 #define	PC_DBREG_CMD_NONE	0
106 #define	PC_DBREG_CMD_LOAD	1
107 
108 #ifdef _KERNEL
109 
110 #define MONITOR_STOPSTATE_RUNNING	0
111 #define MONITOR_STOPSTATE_STOPPED	1
112 
113 #if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF)
114 
115 /*
116  * Evaluates to the byte offset of the per-cpu variable name.
117  */
118 #define	__pcpu_offset(name)						\
119 	__offsetof(struct pcpu, name)
120 
121 /*
122  * Evaluates to the type of the per-cpu variable name.
123  */
124 #define	__pcpu_type(name)						\
125 	__typeof(((struct pcpu *)0)->name)
126 
127 /*
128  * Evaluates to the address of the per-cpu variable name.
129  */
130 #define	__PCPU_PTR(name) __extension__ ({				\
131 	__pcpu_type(name) *__p;						\
132 									\
133 	__asm __volatile("movq %%gs:%1,%0; addq %2,%0"			\
134 	    : "=r" (__p)						\
135 	    : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))),	\
136 	      "i" (__pcpu_offset(name)));				\
137 									\
138 	__p;								\
139 })
140 
141 /*
142  * Evaluates to the value of the per-cpu variable name.
143  */
144 #define	__PCPU_GET(name) __extension__ ({				\
145 	__pcpu_type(name) __res;					\
146 	struct __s {							\
147 		u_char	__b[MIN(sizeof(__pcpu_type(name)), 8)];		\
148 	} __s;								\
149 									\
150 	if (sizeof(__res) == 1 || sizeof(__res) == 2 ||			\
151 	    sizeof(__res) == 4 || sizeof(__res) == 8) {			\
152 		__asm __volatile("mov %%gs:%1,%0"			\
153 		    : "=r" (__s)					\
154 		    : "m" (*(struct __s *)(__pcpu_offset(name))));	\
155 		*(struct __s *)(void *)&__res = __s;			\
156 	} else {							\
157 		__res = *__PCPU_PTR(name);				\
158 	}								\
159 	__res;								\
160 })
161 
162 /*
163  * Adds the value to the per-cpu counter name.  The implementation
164  * must be atomic with respect to interrupts.
165  */
166 #define	__PCPU_ADD(name, val) do {					\
167 	__pcpu_type(name) __val;					\
168 	struct __s {							\
169 		u_char	__b[MIN(sizeof(__pcpu_type(name)), 8)];		\
170 	} __s;								\
171 									\
172 	__val = (val);							\
173 	if (sizeof(__val) == 1 || sizeof(__val) == 2 ||			\
174 	    sizeof(__val) == 4 || sizeof(__val) == 8) {			\
175 		__s = *(struct __s *)(void *)&__val;			\
176 		__asm __volatile("add %1,%%gs:%0"			\
177 		    : "=m" (*(struct __s *)(__pcpu_offset(name)))	\
178 		    : "r" (__s));					\
179 	} else								\
180 		*__PCPU_PTR(name) += __val;				\
181 } while (0)
182 
183 /*
184  * Increments the value of the per-cpu counter name.  The implementation
185  * must be atomic with respect to interrupts.
186  */
187 #define	__PCPU_INC(name) do {						\
188 	CTASSERT(sizeof(__pcpu_type(name)) == 1 ||			\
189 	    sizeof(__pcpu_type(name)) == 2 ||				\
190 	    sizeof(__pcpu_type(name)) == 4 ||				\
191 	    sizeof(__pcpu_type(name)) == 8);				\
192 	if (sizeof(__pcpu_type(name)) == 1) {				\
193 		__asm __volatile("incb %%gs:%0"				\
194 		    : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
195 		    : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
196 	} else if (sizeof(__pcpu_type(name)) == 2) {			\
197 		__asm __volatile("incw %%gs:%0"				\
198 		    : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
199 		    : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
200 	} else if (sizeof(__pcpu_type(name)) == 4) {			\
201 		__asm __volatile("incl %%gs:%0"				\
202 		    : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
203 		    : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
204 	} else if (sizeof(__pcpu_type(name)) == 8) {			\
205 		__asm __volatile("incq %%gs:%0"				\
206 		    : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\
207 		    : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\
208 	}								\
209 } while (0)
210 
211 /*
212  * Sets the value of the per-cpu variable name to value val.
213  */
214 #define	__PCPU_SET(name, val) {						\
215 	__pcpu_type(name) __val;					\
216 	struct __s {							\
217 		u_char	__b[MIN(sizeof(__pcpu_type(name)), 8)];		\
218 	} __s;								\
219 									\
220 	__val = (val);							\
221 	if (sizeof(__val) == 1 || sizeof(__val) == 2 ||			\
222 	    sizeof(__val) == 4 || sizeof(__val) == 8) {			\
223 		__s = *(struct __s *)(void *)&__val;			\
224 		__asm __volatile("mov %1,%%gs:%0"			\
225 		    : "=m" (*(struct __s *)(__pcpu_offset(name)))	\
226 		    : "r" (__s));					\
227 	} else {							\
228 		*__PCPU_PTR(name) = __val;				\
229 	}								\
230 }
231 
232 #define	get_pcpu() __extension__ ({					\
233 	struct pcpu *__pc;						\
234 									\
235 	__asm __volatile("movq %%gs:%1,%0"				\
236 	    : "=r" (__pc)						\
237 	    : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))));	\
238 	__pc;								\
239 })
240 
241 #define	PCPU_GET(member)	__PCPU_GET(pc_ ## member)
242 #define	PCPU_ADD(member, val)	__PCPU_ADD(pc_ ## member, val)
243 #define	PCPU_INC(member)	__PCPU_INC(pc_ ## member)
244 #define	PCPU_PTR(member)	__PCPU_PTR(pc_ ## member)
245 #define	PCPU_SET(member, val)	__PCPU_SET(pc_ ## member, val)
246 
247 #define	IS_BSP()	(PCPU_GET(cpuid) == 0)
248 
249 #define zpcpu_offset_cpu(cpu)	((uintptr_t)&__pcpu[0] + UMA_PCPU_ALLOC_SIZE * cpu)
250 #define zpcpu_base_to_offset(base) (void *)((uintptr_t)(base) - (uintptr_t)&__pcpu[0])
251 #define zpcpu_offset_to_base(base) (void *)((uintptr_t)(base) + (uintptr_t)&__pcpu[0])
252 
253 #define zpcpu_sub_protected(base, n) do {				\
254 	ZPCPU_ASSERT_PROTECTED();					\
255 	zpcpu_sub(base, n);						\
256 } while (0)
257 
258 #define zpcpu_set_protected(base, n) do {				\
259 	__typeof(*base) __n = (n);					\
260 	ZPCPU_ASSERT_PROTECTED();					\
261 	switch (sizeof(*base)) {					\
262 	case 4:								\
263 		__asm __volatile("movl\t%1,%%gs:(%0)"			\
264 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
265 		break;							\
266 	case 8:								\
267 		__asm __volatile("movq\t%1,%%gs:(%0)"			\
268 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
269 		break;							\
270 	default:							\
271 		*zpcpu_get(base) = __n;					\
272 	}								\
273 } while (0);
274 
275 #define zpcpu_add(base, n) do {						\
276 	__typeof(*base) __n = (n);					\
277 	CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8);		\
278 	switch (sizeof(*base)) {					\
279 	case 4:								\
280 		__asm __volatile("addl\t%1,%%gs:(%0)"			\
281 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
282 		break;							\
283 	case 8:								\
284 		__asm __volatile("addq\t%1,%%gs:(%0)"			\
285 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
286 		break;							\
287 	}								\
288 } while (0)
289 
290 #define zpcpu_add_protected(base, n) do {				\
291 	ZPCPU_ASSERT_PROTECTED();					\
292 	zpcpu_add(base, n);						\
293 } while (0)
294 
295 #define zpcpu_sub(base, n) do {						\
296 	__typeof(*base) __n = (n);					\
297 	CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8);		\
298 	switch (sizeof(*base)) {					\
299 	case 4:								\
300 		__asm __volatile("subl\t%1,%%gs:(%0)"			\
301 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
302 		break;							\
303 	case 8:								\
304 		__asm __volatile("subq\t%1,%%gs:(%0)"			\
305 		    : : "r" (base), "ri" (__n) : "memory", "cc");	\
306 		break;							\
307 	}								\
308 } while (0);
309 
310 #else /* !__GNUCLIKE_ASM || !__GNUCLIKE___TYPEOF */
311 
312 #error "this file needs to be ported to your compiler"
313 
314 #endif /* __GNUCLIKE_ASM && __GNUCLIKE___TYPEOF */
315 
316 #endif /* _KERNEL */
317 
318 #endif /* !_MACHINE_PCPU_H_ */
319