1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright 2019 Joyent, Inc. 29 */ 30 31 #ifndef _SYS_ASM_LINKAGE_H 32 #define _SYS_ASM_LINKAGE_H 33 34 #include <sys/stack.h> 35 #include <sys/trap.h> 36 37 #ifdef __cplusplus 38 extern "C" { 39 #endif 40 41 #ifdef _ASM /* The remainder of this file is only for assembly files */ 42 43 /* 44 * make annoying differences in assembler syntax go away 45 */ 46 47 /* 48 * D16 and A16 are used to insert instructions prefixes; the 49 * macros help the assembler code be slightly more portable. 50 */ 51 #if !defined(__GNUC_AS__) 52 /* 53 * /usr/ccs/bin/as prefixes are parsed as separate instructions 54 */ 55 #define D16 data16; 56 #define A16 addr16; 57 58 /* 59 * (There are some weird constructs in constant expressions) 60 */ 61 #define _CONST(const) [const] 62 #define _BITNOT(const) -1!_CONST(const) 63 #define _MUL(a, b) _CONST(a \* b) 64 65 #else 66 /* 67 * Why not use the 'data16' and 'addr16' prefixes .. well, the 68 * assembler doesn't quite believe in real mode, and thus argues with 69 * us about what we're trying to do. 70 */ 71 #define D16 .byte 0x66; 72 #define A16 .byte 0x67; 73 74 #define _CONST(const) (const) 75 #define _BITNOT(const) ~_CONST(const) 76 #define _MUL(a, b) _CONST(a * b) 77 78 #endif 79 80 /* 81 * C pointers are different sizes between i386 and amd64. 82 * These constants can be used to compute offsets into pointer arrays. 83 */ 84 #if defined(__amd64) 85 #define CLONGSHIFT 3 86 #define CLONGSIZE 8 87 #define CLONGMASK 7 88 #elif defined(__i386) 89 #define CLONGSHIFT 2 90 #define CLONGSIZE 4 91 #define CLONGMASK 3 92 #endif 93 94 /* 95 * Since we know we're either ILP32 or LP64 .. 96 */ 97 #define CPTRSHIFT CLONGSHIFT 98 #define CPTRSIZE CLONGSIZE 99 #define CPTRMASK CLONGMASK 100 101 #if CPTRSIZE != (1 << CPTRSHIFT) || CLONGSIZE != (1 << CLONGSHIFT) 102 #error "inconsistent shift constants" 103 #endif 104 105 #if CPTRMASK != (CPTRSIZE - 1) || CLONGMASK != (CLONGSIZE - 1) 106 #error "inconsistent mask constants" 107 #endif 108 109 #define ASM_ENTRY_ALIGN 16 110 111 /* 112 * SSE register alignment and save areas 113 */ 114 115 #define XMM_SIZE 16 116 #define XMM_ALIGN 16 117 118 #if defined(__amd64) 119 120 #define SAVE_XMM_PROLOG(sreg, nreg) \ 121 subq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp; \ 122 movq %rsp, sreg 123 124 #define RSTOR_XMM_EPILOG(sreg, nreg) \ 125 addq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp 126 127 #elif defined(__i386) 128 129 #define SAVE_XMM_PROLOG(sreg, nreg) \ 130 subl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp; \ 131 movl %esp, sreg; \ 132 addl $XMM_ALIGN, sreg; \ 133 andl $_BITNOT(XMM_ALIGN-1), sreg 134 135 #define RSTOR_XMM_EPILOG(sreg, nreg) \ 136 addl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp; 137 138 #endif /* __i386 */ 139 140 /* 141 * profiling causes definitions of the MCOUNT and RTMCOUNT 142 * particular to the type 143 */ 144 #ifdef GPROF 145 146 #define MCOUNT(x) \ 147 pushl %ebp; \ 148 movl %esp, %ebp; \ 149 call _mcount; \ 150 popl %ebp 151 152 #endif /* GPROF */ 153 154 #ifdef PROF 155 156 #define MCOUNT(x) \ 157 /* CSTYLED */ \ 158 .lcomm .L_/**/x/**/1, 4, 4; \ 159 pushl %ebp; \ 160 movl %esp, %ebp; \ 161 /* CSTYLED */ \ 162 movl $.L_/**/x/**/1, %edx; \ 163 call _mcount; \ 164 popl %ebp 165 166 #endif /* PROF */ 167 168 /* 169 * if we are not profiling, MCOUNT should be defined to nothing 170 */ 171 #if !defined(PROF) && !defined(GPROF) 172 #define MCOUNT(x) 173 #endif /* !defined(PROF) && !defined(GPROF) */ 174 175 #define RTMCOUNT(x) MCOUNT(x) 176 177 /* 178 * Macro to define weak symbol aliases. These are similar to the ANSI-C 179 * #pragma weak _name = name 180 * except a compiler can determine type. The assembler must be told. Hence, 181 * the second parameter must be the type of the symbol (i.e.: function,...) 182 */ 183 #define ANSI_PRAGMA_WEAK(sym, stype) \ 184 /* CSTYLED */ \ 185 .weak _/**/sym; \ 186 /* CSTYLED */ \ 187 .type _/**/sym, @stype; \ 188 /* CSTYLED */ \ 189 _/**/sym = sym 190 191 /* 192 * Like ANSI_PRAGMA_WEAK(), but for unrelated names, as in: 193 * #pragma weak sym1 = sym2 194 */ 195 #define ANSI_PRAGMA_WEAK2(sym1, sym2, stype) \ 196 .weak sym1; \ 197 .type sym1, @stype; \ 198 sym1 = sym2 199 200 /* 201 * ENTRY provides the standard procedure entry code and an easy way to 202 * insert the calls to mcount for profiling. ENTRY_NP is identical, but 203 * never calls mcount. 204 */ 205 #define ENTRY(x) \ 206 .text; \ 207 .align ASM_ENTRY_ALIGN; \ 208 .globl x; \ 209 .type x, @function; \ 210 x: MCOUNT(x) 211 212 #define ENTRY_NP(x) \ 213 .text; \ 214 .align ASM_ENTRY_ALIGN; \ 215 .globl x; \ 216 .type x, @function; \ 217 x: 218 219 #define RTENTRY(x) \ 220 .text; \ 221 .align ASM_ENTRY_ALIGN; \ 222 .globl x; \ 223 .type x, @function; \ 224 x: RTMCOUNT(x) 225 226 /* 227 * ENTRY2 is identical to ENTRY but provides two labels for the entry point. 228 */ 229 #define ENTRY2(x, y) \ 230 .text; \ 231 .align ASM_ENTRY_ALIGN; \ 232 .globl x, y; \ 233 .type x, @function; \ 234 .type y, @function; \ 235 /* CSTYLED */ \ 236 x: ; \ 237 y: MCOUNT(x) 238 239 #define ENTRY_NP2(x, y) \ 240 .text; \ 241 .align ASM_ENTRY_ALIGN; \ 242 .globl x, y; \ 243 .type x, @function; \ 244 .type y, @function; \ 245 /* CSTYLED */ \ 246 x: ; \ 247 y: 248 249 250 /* 251 * ALTENTRY provides for additional entry points. 252 */ 253 #define ALTENTRY(x) \ 254 .globl x; \ 255 .type x, @function; \ 256 x: 257 258 /* 259 * DGDEF and DGDEF2 provide global data declarations. 260 * 261 * DGDEF provides a word aligned word of storage. 262 * 263 * DGDEF2 allocates "sz" bytes of storage with **NO** alignment. This 264 * implies this macro is best used for byte arrays. 265 * 266 * DGDEF3 allocates "sz" bytes of storage with "algn" alignment. 267 */ 268 #define DGDEF2(name, sz) \ 269 .data; \ 270 .globl name; \ 271 .type name, @object; \ 272 .size name, sz; \ 273 name: 274 275 #define DGDEF3(name, sz, algn) \ 276 .data; \ 277 .align algn; \ 278 .globl name; \ 279 .type name, @object; \ 280 .size name, sz; \ 281 name: 282 283 #define DGDEF(name) DGDEF3(name, 4, 4) 284 285 /* 286 * SET_SIZE trails a function and set the size for the ELF symbol table. 287 */ 288 #define SET_SIZE(x) \ 289 .size x, [.-x] 290 291 /* 292 * NWORD provides native word value. 293 */ 294 #if defined(__amd64) 295 296 /*CSTYLED*/ 297 #define NWORD quad 298 299 #elif defined(__i386) 300 301 #define NWORD long 302 303 #endif /* __i386 */ 304 305 /* 306 * These macros should be used when making indirect calls in the kernel. They 307 * will perform a jump or call to the corresponding register in a way that knows 308 * about retpolines and handles whether such mitigations are enabled or not. 309 * 310 * INDIRECT_JMP_REG will jump to named register. INDIRECT_CALL_REG will instead 311 * do a call. These macros cannot be used to dereference a register. For 312 * example, if you need to do something that looks like the following: 313 * 314 * call *24(%rdi) 315 * jmp *(%r15) 316 * 317 * You must instead first do a movq into the corresponding location. You need to 318 * be careful to make sure that the register that its loaded into is safe to 319 * use. Often that register may be saved or used elsewhere so it may not be safe 320 * to clobber the value. Usually, loading into %rax would be safe. These would 321 * turn into something like: 322 * 323 * movq 24(%rdi), %rdi; INDIRECT_CALL_REG(rdi) 324 * movq (%r15), %r15; INDIRECT_JMP_REG(r15) 325 * 326 * If you are trying to call a global function, then use the following pattern 327 * (substituting the register in question): 328 * 329 * leaq my_favorite_function(%rip), %rax 330 * INDIRECT_CALL_REG(rax) 331 * 332 * If you instead have a function pointer (say gethrtimef for example), then you 333 * need to do: 334 * 335 * movq my_favorite_function_pointer(%rip), %rax 336 * INDIRECT_CALL_REG(rax) 337 */ 338 339 /* CSTYLED */ 340 #define INDIRECT_JMP_REG(reg) jmp __x86_indirect_thunk_/**/reg; 341 342 /* CSTYLED */ 343 #define INDIRECT_CALL_REG(reg) call __x86_indirect_thunk_/**/reg; 344 345 #endif /* _ASM */ 346 347 #ifdef __cplusplus 348 } 349 #endif 350 351 #endif /* _SYS_ASM_LINKAGE_H */ 352