1 /*- 2 * Copyright (c) Peter Wemm <peter@netplex.com.au> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _MACHINE_PCPU_H_ 30 #define _MACHINE_PCPU_H_ 31 32 #ifdef _KERNEL 33 34 #ifndef __GNUC__ 35 #error gcc is required to use this file 36 #endif 37 38 #include <machine/segments.h> 39 #include <machine/tss.h> 40 41 /* 42 * The SMP parts are setup in pmap.c and locore.s for the BSP, and 43 * mp_machdep.c sets up the data for the AP's to "see" when they awake. 44 * The reason for doing it via a struct is so that an array of pointers 45 * to each CPU's data can be set up for things like "check curproc on all 46 * other processors" 47 */ 48 #define PCPU_MD_FIELDS \ 49 struct pcpu *pc_prvspace; /* Self-reference */ \ 50 struct i386tss pc_common_tss; \ 51 struct segment_descriptor pc_common_tssd; \ 52 struct segment_descriptor *pc_tss_gdt; \ 53 int pc_currentldt 54 55 /* 56 * Evaluates to the byte offset of the per-cpu variable name. 57 */ 58 #define __pcpu_offset(name) \ 59 __offsetof(struct pcpu, name) 60 61 /* 62 * Evaluates to the type of the per-cpu variable name. 63 */ 64 #define __pcpu_type(name) \ 65 __typeof(((struct pcpu *)0)->name) 66 67 /* 68 * Evaluates to the address of the per-cpu variable name. 69 */ 70 #define __PCPU_PTR(name) ({ \ 71 __pcpu_type(name) *__p; \ 72 \ 73 __asm __volatile("movl %%fs:%1,%0; addl %2,%0" \ 74 : "=r" (__p) \ 75 : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \ 76 "i" (__pcpu_offset(name))); \ 77 \ 78 __p; \ 79 }) 80 81 /* 82 * Evaluates to the value of the per-cpu variable name. 83 */ 84 #define __PCPU_GET(name) ({ \ 85 __pcpu_type(name) __result; \ 86 \ 87 if (sizeof(__result) == 1) { \ 88 u_char __b; \ 89 __asm __volatile("movb %%fs:%1,%0" \ 90 : "=r" (__b) \ 91 : "m" (*(u_char *)(__pcpu_offset(name)))); \ 92 __result = *(__pcpu_type(name) *)&__b; \ 93 } else if (sizeof(__result) == 2) { \ 94 u_short __w; \ 95 __asm __volatile("movw %%fs:%1,%0" \ 96 : "=r" (__w) \ 97 : "m" (*(u_short *)(__pcpu_offset(name)))); \ 98 __result = *(__pcpu_type(name) *)&__w; \ 99 } else if (sizeof(__result) == 4) { \ 100 u_int __i; \ 101 __asm __volatile("movl %%fs:%1,%0" \ 102 : "=r" (__i) \ 103 : "m" (*(u_int *)(__pcpu_offset(name)))); \ 104 __result = *(__pcpu_type(name) *)&__i; \ 105 } else { \ 106 __result = *__PCPU_PTR(name); \ 107 } \ 108 \ 109 __result; \ 110 }) 111 112 /* 113 * Sets the value of the per-cpu variable name to value val. 114 */ 115 #define __PCPU_SET(name, val) ({ \ 116 __pcpu_type(name) __val = (val); \ 117 \ 118 if (sizeof(__val) == 1) { \ 119 u_char __b; \ 120 __b = *(u_char *)&__val; \ 121 __asm __volatile("movb %1,%%fs:%0" \ 122 : "=m" (*(u_char *)(__pcpu_offset(name))) \ 123 : "r" (__b)); \ 124 } else if (sizeof(__val) == 2) { \ 125 u_short __w; \ 126 __w = *(u_short *)&__val; \ 127 __asm __volatile("movw %1,%%fs:%0" \ 128 : "=m" (*(u_short *)(__pcpu_offset(name))) \ 129 : "r" (__w)); \ 130 } else if (sizeof(__val) == 4) { \ 131 u_int __i; \ 132 __i = *(u_int *)&__val; \ 133 __asm __volatile("movl %1,%%fs:%0" \ 134 : "=m" (*(u_int *)(__pcpu_offset(name))) \ 135 : "r" (__i)); \ 136 } else { \ 137 *__PCPU_PTR(name) = __val; \ 138 } \ 139 }) 140 141 #define PCPU_GET(member) __PCPU_GET(pc_ ## member) 142 #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member) 143 #define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val) 144 145 #endif /* _KERNEL */ 146 147 #endif /* ! _MACHINE_PCPU_H_ */ 148