1 /*- 2 * Copyright (c) Peter Wemm <peter@netplex.com.au> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _MACHINE_PCPU_H_ 30 #define _MACHINE_PCPU_H_ 31 32 #ifndef _SYS_CDEFS_H_ 33 #error "sys/cdefs.h is a prerequisite for this file" 34 #endif 35 36 /* 37 * The SMP parts are setup in pmap.c and locore.s for the BSP, and 38 * mp_machdep.c sets up the data for the AP's to "see" when they awake. 39 * The reason for doing it via a struct is so that an array of pointers 40 * to each CPU's data can be set up for things like "check curproc on all 41 * other processors" 42 */ 43 #define PCPU_MD_FIELDS \ 44 char pc_monitorbuf[128] __aligned(128); /* cache line */ \ 45 struct pcpu *pc_prvspace; /* Self-reference */ \ 46 struct pmap *pc_curpmap; \ 47 struct amd64tss *pc_tssp; \ 48 register_t pc_rsp0; \ 49 register_t pc_scratch_rsp; /* User %rsp in syscall */ \ 50 u_int pc_apic_id; \ 51 u_int pc_acpi_id; /* ACPI CPU id */ \ 52 struct user_segment_descriptor *pc_gs32p 53 54 #ifdef _KERNEL 55 56 #ifdef lint 57 58 extern struct pcpu *pcpup; 59 60 #define PCPU_GET(member) (pcpup->pc_ ## member) 61 #define PCPU_ADD(member, val) (pcpup->pc_ ## member += (val)) 62 #define PCPU_INC(member) PCPU_ADD(member, 1) 63 #define PCPU_PTR(member) (&pcpup->pc_ ## member) 64 #define PCPU_SET(member, val) (pcpup->pc_ ## member = (val)) 65 66 #elif defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) 67 68 /* 69 * Evaluates to the byte offset of the per-cpu variable name. 70 */ 71 #define __pcpu_offset(name) \ 72 __offsetof(struct pcpu, name) 73 74 /* 75 * Evaluates to the type of the per-cpu variable name. 76 */ 77 #define __pcpu_type(name) \ 78 __typeof(((struct pcpu *)0)->name) 79 80 /* 81 * Evaluates to the address of the per-cpu variable name. 82 */ 83 #define __PCPU_PTR(name) __extension__ ({ \ 84 __pcpu_type(name) *__p; \ 85 \ 86 __asm __volatile("movq %%gs:%1,%0; addq %2,%0" \ 87 : "=r" (__p) \ 88 : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \ 89 "i" (__pcpu_offset(name))); \ 90 \ 91 __p; \ 92 }) 93 94 /* 95 * Evaluates to the value of the per-cpu variable name. 96 */ 97 #define __PCPU_GET(name) __extension__ ({ \ 98 __pcpu_type(name) __res; \ 99 struct __s { \ 100 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ 101 } __s; \ 102 \ 103 if (sizeof(__res) == 1 || sizeof(__res) == 2 || \ 104 sizeof(__res) == 4 || sizeof(__res) == 8) { \ 105 __asm __volatile("mov %%gs:%1,%0" \ 106 : "=r" (__s) \ 107 : "m" (*(struct __s *)(__pcpu_offset(name)))); \ 108 *(struct __s *)(void *)&__res = __s; \ 109 } else { \ 110 __res = *__PCPU_PTR(name); \ 111 } \ 112 __res; \ 113 }) 114 115 /* 116 * Adds the value to the per-cpu counter name. The implementation 117 * must be atomic with respect to interrupts. 118 */ 119 #define __PCPU_ADD(name, val) do { \ 120 __pcpu_type(name) __val; \ 121 struct __s { \ 122 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ 123 } __s; \ 124 \ 125 __val = (val); \ 126 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \ 127 sizeof(__val) == 4 || sizeof(__val) == 8) { \ 128 __s = *(struct __s *)(void *)&__val; \ 129 __asm __volatile("add %1,%%gs:%0" \ 130 : "=m" (*(struct __s *)(__pcpu_offset(name))) \ 131 : "r" (__s)); \ 132 } else \ 133 *__PCPU_PTR(name) += __val; \ 134 } while (0) 135 136 /* 137 * Increments the value of the per-cpu counter name. The implementation 138 * must be atomic with respect to interrupts. 139 */ 140 #define __PCPU_INC(name) do { \ 141 CTASSERT(sizeof(__pcpu_type(name)) == 1 || \ 142 sizeof(__pcpu_type(name)) == 2 || \ 143 sizeof(__pcpu_type(name)) == 4 || \ 144 sizeof(__pcpu_type(name)) == 8); \ 145 if (sizeof(__pcpu_type(name)) == 1) { \ 146 __asm __volatile("incb %%gs:%0" \ 147 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 148 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 149 } else if (sizeof(__pcpu_type(name)) == 2) { \ 150 __asm __volatile("incw %%gs:%0" \ 151 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 152 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 153 } else if (sizeof(__pcpu_type(name)) == 4) { \ 154 __asm __volatile("incl %%gs:%0" \ 155 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 156 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 157 } else if (sizeof(__pcpu_type(name)) == 8) { \ 158 __asm __volatile("incq %%gs:%0" \ 159 : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ 160 : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ 161 } \ 162 } while (0) 163 164 /* 165 * Sets the value of the per-cpu variable name to value val. 166 */ 167 #define __PCPU_SET(name, val) { \ 168 __pcpu_type(name) __val; \ 169 struct __s { \ 170 u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ 171 } __s; \ 172 \ 173 __val = (val); \ 174 if (sizeof(__val) == 1 || sizeof(__val) == 2 || \ 175 sizeof(__val) == 4 || sizeof(__val) == 8) { \ 176 __s = *(struct __s *)(void *)&__val; \ 177 __asm __volatile("mov %1,%%gs:%0" \ 178 : "=m" (*(struct __s *)(__pcpu_offset(name))) \ 179 : "r" (__s)); \ 180 } else { \ 181 *__PCPU_PTR(name) = __val; \ 182 } \ 183 } 184 185 #define PCPU_GET(member) __PCPU_GET(pc_ ## member) 186 #define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val) 187 #define PCPU_INC(member) __PCPU_INC(pc_ ## member) 188 #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member) 189 #define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val) 190 191 static __inline struct thread * 192 __curthread(void) 193 { 194 struct thread *td; 195 196 __asm __volatile("movq %%gs:0,%0" : "=r" (td)); 197 return (td); 198 } 199 #define curthread (__curthread()) 200 201 #else /* !lint || defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) */ 202 203 #error "this file needs to be ported to your compiler" 204 205 #endif /* lint, etc. */ 206 207 #endif /* _KERNEL */ 208 209 #endif /* !_MACHINE_PCPU_H_ */ 210