1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) Peter Wemm <peter@netplex.com.au> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef _MACHINE_PCPU_H_ 32 #define _MACHINE_PCPU_H_ 33 34 #ifndef _SYS_CDEFS_H_ 35 #error "sys/cdefs.h is a prerequisite for this file" 36 #endif 37 38 #include <machine/segments.h> 39 #include <machine/tss.h> 40 41 #define PC_PTI_STACK_SZ 16 42 43 struct monitorbuf { 44 int idle_state; /* Used by cpu_idle_mwait. */ 45 int stop_state; /* Used by cpustop_handler. */ 46 char padding[128 - (2 * sizeof(int))]; 47 }; 48 _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line"); 49 50 /* 51 * The SMP parts are setup in pmap.c and locore.s for the BSP, and 52 * mp_machdep.c sets up the data for the AP's to "see" when they awake. 53 * The reason for doing it via a struct is so that an array of pointers 54 * to each CPU's data can be set up for things like "check curproc on all 55 * other processors" 56 */ 57 #define PCPU_MD_FIELDS \ 58 struct monitorbuf pc_monitorbuf __aligned(128); /* cache line */\ 59 struct pcpu *pc_prvspace; /* Self-reference */ \ 60 struct pmap *pc_curpmap; \ 61 struct amd64tss *pc_tssp; /* TSS segment active on CPU */ \ 62 void *pc_pad0; \ 63 uint64_t pc_kcr3; \ 64 uint64_t pc_ucr3; \ 65 uint64_t pc_saved_ucr3; \ 66 register_t pc_rsp0; \ 67 register_t pc_scratch_rsp; /* User %rsp in syscall */ \ 68 register_t pc_scratch_rax; \ 69 u_int pc_apic_id; \ 70 u_int pc_acpi_id; /* ACPI CPU id */ \ 71 /* Pointer to the CPU %fs descriptor */ \ 72 struct user_segment_descriptor *pc_fs32p; \ 73 /* Pointer to the CPU %gs descriptor */ \ 74 struct user_segment_descriptor *pc_gs32p; \ 75 /* Pointer to the CPU LDT descriptor */ \ 76 struct system_segment_descriptor *pc_ldt; \ 77 /* Pointer to the CPU TSS descriptor */ \ 78 struct system_segment_descriptor *pc_tss; \ 79 uint64_t pc_pm_save_cnt; \ 80 u_int pc_cmci_mask; /* MCx banks for CMCI */ \ 81 uint64_t pc_dbreg[16]; /* ddb debugging regs */ \ 82 uint64_t pc_pti_stack[PC_PTI_STACK_SZ]; \ 83 register_t pc_pti_rsp0; \ 84 int pc_dbreg_cmd; /* ddb debugging reg cmd */ \ 85 u_int pc_vcpu_id; /* Xen vCPU ID */ \ 86 uint32_t pc_pcid_next; \ 87 uint32_t pc_pcid_gen; \ 88 uint32_t pc_unused; \ 89 uint32_t pc_ibpb_set; \ 90 void *pc_mds_buf; \ 91 void *pc_mds_buf64; \ 92 uint32_t pc_pad[2]; \ 93 uint8_t pc_mds_tmp[64]; \ 94 u_int pc_ipi_bitmap; \ 95 struct amd64tss pc_common_tss; \ 96 struct user_segment_descriptor pc_gdt[NGDT]; \ 97 void *pc_smp_tlb_pmap; \ 98 uint64_t pc_smp_tlb_addr1; \ 99 uint64_t pc_smp_tlb_addr2; \ 100 uint32_t pc_smp_tlb_gen; \ 101 u_int pc_smp_tlb_op; \ 102 char __pad[2924] /* pad to UMA_PCPU_ALLOC_SIZE */ 103 104 #define PC_DBREG_CMD_NONE 0 105 #define PC_DBREG_CMD_LOAD 1 106 107 #ifdef _KERNEL 108 109 #define MONITOR_STOPSTATE_RUNNING 0 110 #define MONITOR_STOPSTATE_STOPPED 1 111 112 #if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) 113 114 /* 115 * Evaluates to the byte offset of the per-cpu variable name. 116 */ 117 #define __pcpu_offset(name) \ 118 __offsetof(struct pcpu, name) 119 120 /* 121 * Evaluates to the type of the per-cpu variable name. 122 */ 123 #define __pcpu_type(name) \ 124 __typeof(((struct pcpu *)0)->name) 125 126 /* 127 * Evaluates to the address of the per-cpu variable name. 128 */ 129 #define __PCPU_PTR(name) __extension__ ({ \ 130 __pcpu_type(name) *__p; \ 131 \ 132 __asm __volatile("movq %%gs:%1,%0; addq %2,%0" \ 133 : "=r" (__p) \ 134 : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \ 135 "i" (__pcpu_offset(name))); \ 136 \ 137 __p; \ 138 }) 139 140 /* 141 * Evaluates to the value of the per-cpu variable name. 142 */ 143 #define __PCPU_GET(name) __extension__ ({ \ 144 __pcpu_type(name) __res; \ 145 struct __s { \ 146 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ 147 } __s; \ 148 \ 149 if (sizeof(__res) == 1 || sizeof(__res) == 2 || \ 150 sizeof(__res) == 4 || sizeof(__res) == 8) { \ 151 __asm __volatile("mov %%gs:%1,%0" \ 152 : "=r" (__s) \ 153 : "m" (*(struct __s *)(__pcpu_offset(name)))); \ 154 *(struct __s *)(void *)&__res = __s; \ 155 } else { \ 156 __res = *__PCPU_PTR(name); \ 157 } \ 158 __res; \ 159 }) 160 161 /* 162 * Adds the value to the per-cpu counter name. The implementation 163 * must be atomic with respect to interrupts. 164 */ 165 #define __PCPU_ADD(name, val) do { \ 166 __pcpu_type(name) __val; \ 167 struct __s { \ 168 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ 169 } __s; \ 170 \ 171 __val = (val); \ 172 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \ 173 sizeof(__val) == 4 || sizeof(__val) == 8) { \ 174 __s = *(struct __s *)(void *)&__val; \ 175 __asm __volatile("add %1,%%gs:%0" \ 176 : "=m" (*(struct __s *)(__pcpu_offset(name))) \ 177 : "r" (__s)); \ 178 } else \ 179 *__PCPU_PTR(name) += __val; \ 180 } while (0) 181 182 /* 183 * Increments the value of the per-cpu counter name. The implementation 184 * must be atomic with respect to interrupts. 185 */ 186 #define __PCPU_INC(name) do { \ 187 CTASSERT(sizeof(__pcpu_type(name)) == 1 || \ 188 sizeof(__pcpu_type(name)) == 2 || \ 189 sizeof(__pcpu_type(name)) == 4 || \ 190 sizeof(__pcpu_type(name)) == 8); \ 191 if (sizeof(__pcpu_type(name)) == 1) { \ 192 __asm __volatile("incb %%gs:%0" \ 193 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 194 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 195 } else if (sizeof(__pcpu_type(name)) == 2) { \ 196 __asm __volatile("incw %%gs:%0" \ 197 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 198 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 199 } else if (sizeof(__pcpu_type(name)) == 4) { \ 200 __asm __volatile("incl %%gs:%0" \ 201 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 202 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 203 } else if (sizeof(__pcpu_type(name)) == 8) { \ 204 __asm __volatile("incq %%gs:%0" \ 205 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 206 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 207 } \ 208 } while (0) 209 210 /* 211 * Sets the value of the per-cpu variable name to value val. 212 */ 213 #define __PCPU_SET(name, val) { \ 214 __pcpu_type(name) __val; \ 215 struct __s { \ 216 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ 217 } __s; \ 218 \ 219 __val = (val); \ 220 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \ 221 sizeof(__val) == 4 || sizeof(__val) == 8) { \ 222 __s = *(struct __s *)(void *)&__val; \ 223 __asm __volatile("mov %1,%%gs:%0" \ 224 : "=m" (*(struct __s *)(__pcpu_offset(name))) \ 225 : "r" (__s)); \ 226 } else { \ 227 *__PCPU_PTR(name) = __val; \ 228 } \ 229 } 230 231 #define get_pcpu() __extension__ ({ \ 232 struct pcpu *__pc; \ 233 \ 234 __asm __volatile("movq %%gs:%1,%0" \ 235 : "=r" (__pc) \ 236 : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace)))); \ 237 __pc; \ 238 }) 239 240 #define PCPU_GET(member) __PCPU_GET(pc_ ## member) 241 #define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val) 242 #define PCPU_INC(member) __PCPU_INC(pc_ ## member) 243 #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member) 244 #define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val) 245 246 #define IS_BSP() (PCPU_GET(cpuid) == 0) 247 248 #define zpcpu_offset_cpu(cpu) ((uintptr_t)&__pcpu[0] + UMA_PCPU_ALLOC_SIZE * cpu) 249 #define zpcpu_base_to_offset(base) (void *)((uintptr_t)(base) - (uintptr_t)&__pcpu[0]) 250 #define zpcpu_offset_to_base(base) (void *)((uintptr_t)(base) + (uintptr_t)&__pcpu[0]) 251 252 #define zpcpu_sub_protected(base, n) do { \ 253 ZPCPU_ASSERT_PROTECTED(); \ 254 zpcpu_sub(base, n); \ 255 } while (0) 256 257 #define zpcpu_set_protected(base, n) do { \ 258 __typeof(*base) __n = (n); \ 259 ZPCPU_ASSERT_PROTECTED(); \ 260 switch (sizeof(*base)) { \ 261 case 4: \ 262 __asm __volatile("movl\t%1,%%gs:(%0)" \ 263 : : "r" (base), "ri" (__n) : "memory", "cc"); \ 264 break; \ 265 case 8: \ 266 __asm __volatile("movq\t%1,%%gs:(%0)" \ 267 : : "r" (base), "ri" (__n) : "memory", "cc"); \ 268 break; \ 269 default: \ 270 *zpcpu_get(base) = __n; \ 271 } \ 272 } while (0); 273 274 #define zpcpu_add(base, n) do { \ 275 __typeof(*base) __n = (n); \ 276 CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8); \ 277 switch (sizeof(*base)) { \ 278 case 4: \ 279 __asm __volatile("addl\t%1,%%gs:(%0)" \ 280 : : "r" (base), "ri" (__n) : "memory", "cc"); \ 281 break; \ 282 case 8: \ 283 __asm __volatile("addq\t%1,%%gs:(%0)" \ 284 : : "r" (base), "ri" (__n) : "memory", "cc"); \ 285 break; \ 286 } \ 287 } while (0) 288 289 #define zpcpu_add_protected(base, n) do { \ 290 ZPCPU_ASSERT_PROTECTED(); \ 291 zpcpu_add(base, n); \ 292 } while (0) 293 294 #define zpcpu_sub(base, n) do { \ 295 __typeof(*base) __n = (n); \ 296 CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8); \ 297 switch (sizeof(*base)) { \ 298 case 4: \ 299 __asm __volatile("subl\t%1,%%gs:(%0)" \ 300 : : "r" (base), "ri" (__n) : "memory", "cc"); \ 301 break; \ 302 case 8: \ 303 __asm __volatile("subq\t%1,%%gs:(%0)" \ 304 : : "r" (base), "ri" (__n) : "memory", "cc"); \ 305 break; \ 306 } \ 307 } while (0); 308 309 #else /* !__GNUCLIKE_ASM || !__GNUCLIKE___TYPEOF */ 310 311 #error "this file needs to be ported to your compiler" 312 313 #endif /* __GNUCLIKE_ASM && __GNUCLIKE___TYPEOF */ 314 315 #endif /* _KERNEL */ 316 317 #endif /* !_MACHINE_PCPU_H_ */ 318