1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * syscall_wrapper.h - x86 specific wrappers to syscall definitions 4 */ 5 6 #ifndef _ASM_X86_SYSCALL_WRAPPER_H 7 #define _ASM_X86_SYSCALL_WRAPPER_H 8 9 #include <asm/ptrace.h> 10 11 extern long __x64_sys_ni_syscall(const struct pt_regs *regs); 12 extern long __ia32_sys_ni_syscall(const struct pt_regs *regs); 13 14 /* 15 * Instead of the generic __SYSCALL_DEFINEx() definition, the x86 version takes 16 * struct pt_regs *regs as the only argument of the syscall stub(s) named as: 17 * __x64_sys_*() - 64-bit native syscall 18 * __ia32_sys_*() - 32-bit native syscall or common compat syscall 19 * __ia32_compat_sys_*() - 32-bit compat syscall 20 * __x64_compat_sys_*() - 64-bit X32 compat syscall 21 * 22 * The registers are decoded according to the ABI: 23 * 64-bit: RDI, RSI, RDX, R10, R8, R9 24 * 32-bit: EBX, ECX, EDX, ESI, EDI, EBP 25 * 26 * The stub then passes the decoded arguments to the __se_sys_*() wrapper to 27 * perform sign-extension (omitted for zero-argument syscalls). Finally the 28 * arguments are passed to the __do_sys_*() function which is the actual 29 * syscall. These wrappers are marked as inline so the compiler can optimize 30 * the functions where appropriate. 31 * 32 * Example assembly (slightly re-ordered for better readability): 33 * 34 * <__x64_sys_recv>: <-- syscall with 4 parameters 35 * callq <__fentry__> 36 * 37 * mov 0x70(%rdi),%rdi <-- decode regs->di 38 * mov 0x68(%rdi),%rsi <-- decode regs->si 39 * mov 0x60(%rdi),%rdx <-- decode regs->dx 40 * mov 0x38(%rdi),%rcx <-- decode regs->r10 41 * 42 * xor %r9d,%r9d <-- clear %r9 43 * xor %r8d,%r8d <-- clear %r8 44 * 45 * callq __sys_recvfrom <-- do the actual work in __sys_recvfrom() 46 * which takes 6 arguments 47 * 48 * cltq <-- extend return value to 64-bit 49 * retq <-- return 50 * 51 * This approach avoids leaking random user-provided register content down 52 * the call chain. 53 */ 54 55 /* Mapping of registers to parameters for syscalls on x86-64 and x32 */ 56 #define SC_X86_64_REGS_TO_ARGS(x, ...) \ 57 __MAP(x,__SC_ARGS \ 58 ,,regs->di,,regs->si,,regs->dx \ 59 ,,regs->r10,,regs->r8,,regs->r9) \ 60 61 62 /* SYSCALL_PT_ARGS is Adapted from s390x */ 63 #define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \ 64 SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp)) 65 #define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \ 66 SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di)) 67 #define SYSCALL_PT_ARG4(m, t1, t2, t3, t4) \ 68 SYSCALL_PT_ARG3(m, t1, t2, t3), m(t4, (regs->si)) 69 #define SYSCALL_PT_ARG3(m, t1, t2, t3) \ 70 SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx)) 71 #define SYSCALL_PT_ARG2(m, t1, t2) \ 72 SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx)) 73 #define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx)) 74 #define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__) 75 76 #define __SC_COMPAT_CAST(t, a) \ 77 (__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U))) \ 78 (unsigned int)a 79 80 /* Mapping of registers to parameters for syscalls on i386 */ 81 #define SC_IA32_REGS_TO_ARGS(x, ...) \ 82 SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST, \ 83 __MAP(x, __SC_TYPE, __VA_ARGS__)) \ 84 85 #define __SYS_STUB0(abi, name) \ 86 long __##abi##_##name(const struct pt_regs *regs); \ 87 ALLOW_ERROR_INJECTION(__##abi##_##name, ERRNO); \ 88 long __##abi##_##name(const struct pt_regs *regs) \ 89 __alias(__do_##name); 90 91 #define __SYS_STUBx(abi, name, ...) \ 92 long __##abi##_##name(const struct pt_regs *regs); \ 93 ALLOW_ERROR_INJECTION(__##abi##_##name, ERRNO); \ 94 long __##abi##_##name(const struct pt_regs *regs) \ 95 { \ 96 return __se_##name(__VA_ARGS__); \ 97 } 98 99 #define __COND_SYSCALL(abi, name) \ 100 __weak long __##abi##_##name(const struct pt_regs *__unused); \ 101 __weak long __##abi##_##name(const struct pt_regs *__unused) \ 102 { \ 103 return sys_ni_syscall(); \ 104 } 105 106 #ifdef CONFIG_X86_64 107 #define __X64_SYS_STUB0(name) \ 108 __SYS_STUB0(x64, sys_##name) 109 110 #define __X64_SYS_STUBx(x, name, ...) \ 111 __SYS_STUBx(x64, sys##name, \ 112 SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__)) 113 114 #define __X64_COND_SYSCALL(name) \ 115 __COND_SYSCALL(x64, sys_##name) 116 117 #else /* CONFIG_X86_64 */ 118 #define __X64_SYS_STUB0(name) 119 #define __X64_SYS_STUBx(x, name, ...) 120 #define __X64_COND_SYSCALL(name) 121 #endif /* CONFIG_X86_64 */ 122 123 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 124 #define __IA32_SYS_STUB0(name) \ 125 __SYS_STUB0(ia32, sys_##name) 126 127 #define __IA32_SYS_STUBx(x, name, ...) \ 128 __SYS_STUBx(ia32, sys##name, \ 129 SC_IA32_REGS_TO_ARGS(x, __VA_ARGS__)) 130 131 #define __IA32_COND_SYSCALL(name) \ 132 __COND_SYSCALL(ia32, sys_##name) 133 134 #else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ 135 #define __IA32_SYS_STUB0(name) 136 #define __IA32_SYS_STUBx(x, name, ...) 137 #define __IA32_COND_SYSCALL(name) 138 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ 139 140 #ifdef CONFIG_IA32_EMULATION 141 /* 142 * For IA32 emulation, we need to handle "compat" syscalls *and* create 143 * additional wrappers (aptly named __ia32_sys_xyzzy) which decode the 144 * ia32 regs in the proper order for shared or "common" syscalls. As some 145 * syscalls may not be implemented, we need to expand COND_SYSCALL in 146 * kernel/sys_ni.c to cover this case as well. 147 */ 148 #define __IA32_COMPAT_SYS_STUB0(name) \ 149 __SYS_STUB0(ia32, compat_sys_##name) 150 151 #define __IA32_COMPAT_SYS_STUBx(x, name, ...) \ 152 __SYS_STUBx(ia32, compat_sys##name, \ 153 SC_IA32_REGS_TO_ARGS(x, __VA_ARGS__)) 154 155 #define __IA32_COMPAT_COND_SYSCALL(name) \ 156 __COND_SYSCALL(ia32, compat_sys_##name) 157 158 #else /* CONFIG_IA32_EMULATION */ 159 #define __IA32_COMPAT_SYS_STUB0(name) 160 #define __IA32_COMPAT_SYS_STUBx(x, name, ...) 161 #define __IA32_COMPAT_COND_SYSCALL(name) 162 #endif /* CONFIG_IA32_EMULATION */ 163 164 165 #ifdef CONFIG_X86_X32_ABI 166 /* 167 * For the x32 ABI, we need to create a stub for compat_sys_*() which is aware 168 * of the x86-64-style parameter ordering of x32 syscalls. The syscalls common 169 * with x86_64 obviously do not need such care. 170 */ 171 #define __X32_COMPAT_SYS_STUB0(name) \ 172 __SYS_STUB0(x64, compat_sys_##name) 173 174 #define __X32_COMPAT_SYS_STUBx(x, name, ...) \ 175 __SYS_STUBx(x64, compat_sys##name, \ 176 SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__)) 177 178 #define __X32_COMPAT_COND_SYSCALL(name) \ 179 __COND_SYSCALL(x64, compat_sys_##name) 180 181 #else /* CONFIG_X86_X32_ABI */ 182 #define __X32_COMPAT_SYS_STUB0(name) 183 #define __X32_COMPAT_SYS_STUBx(x, name, ...) 184 #define __X32_COMPAT_COND_SYSCALL(name) 185 #endif /* CONFIG_X86_X32_ABI */ 186 187 188 #ifdef CONFIG_COMPAT 189 /* 190 * Compat means IA32_EMULATION and/or X86_X32. As they use a different 191 * mapping of registers to parameters, we need to generate stubs for each 192 * of them. 193 */ 194 #define COMPAT_SYSCALL_DEFINE0(name) \ 195 static long \ 196 __do_compat_sys_##name(const struct pt_regs *__unused); \ 197 __IA32_COMPAT_SYS_STUB0(name) \ 198 __X32_COMPAT_SYS_STUB0(name) \ 199 static long \ 200 __do_compat_sys_##name(const struct pt_regs *__unused) 201 202 #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ 203 static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ 204 static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ 205 __IA32_COMPAT_SYS_STUBx(x, name, __VA_ARGS__) \ 206 __X32_COMPAT_SYS_STUBx(x, name, __VA_ARGS__) \ 207 static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ 208 { \ 209 return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ 210 } \ 211 static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 212 213 /* 214 * As some compat syscalls may not be implemented, we need to expand 215 * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well. 216 */ 217 #define COND_SYSCALL_COMPAT(name) \ 218 __IA32_COMPAT_COND_SYSCALL(name) \ 219 __X32_COMPAT_COND_SYSCALL(name) 220 221 #endif /* CONFIG_COMPAT */ 222 223 #define __SYSCALL_DEFINEx(x, name, ...) \ 224 static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ 225 static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ 226 __X64_SYS_STUBx(x, name, __VA_ARGS__) \ 227 __IA32_SYS_STUBx(x, name, __VA_ARGS__) \ 228 static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ 229 { \ 230 long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\ 231 __MAP(x,__SC_TEST,__VA_ARGS__); \ 232 __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ 233 return ret; \ 234 } \ 235 static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 236 237 /* 238 * As the generic SYSCALL_DEFINE0() macro does not decode any parameters for 239 * obvious reasons, and passing struct pt_regs *regs to it in %rdi does not 240 * hurt, we only need to re-define it here to keep the naming congruent to 241 * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro 242 * to work correctly. 243 */ 244 #define SYSCALL_DEFINE0(sname) \ 245 SYSCALL_METADATA(_##sname, 0); \ 246 static long __do_sys_##sname(const struct pt_regs *__unused); \ 247 __X64_SYS_STUB0(sname) \ 248 __IA32_SYS_STUB0(sname) \ 249 static long __do_sys_##sname(const struct pt_regs *__unused) 250 251 #define COND_SYSCALL(name) \ 252 __X64_COND_SYSCALL(name) \ 253 __IA32_COND_SYSCALL(name) 254 255 256 /* 257 * For VSYSCALLS, we need to declare these three syscalls with the new 258 * pt_regs-based calling convention for in-kernel use. 259 */ 260 long __x64_sys_getcpu(const struct pt_regs *regs); 261 long __x64_sys_gettimeofday(const struct pt_regs *regs); 262 long __x64_sys_time(const struct pt_regs *regs); 263 264 #endif /* _ASM_X86_SYSCALL_WRAPPER_H */ 265