xref: /linux/tools/include/nolibc/arch-x86_64.h (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3  * x86_64 specific definitions for NOLIBC
4  * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
5  */
6 
7 #ifndef _NOLIBC_ARCH_X86_64_H
8 #define _NOLIBC_ARCH_X86_64_H
9 
10 #include "compiler.h"
11 #include "crt.h"
12 
13 /* Syscalls for x86_64 :
14  *   - registers are 64-bit
15  *   - syscall number is passed in rax
16  *   - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
17  *   - the system call is performed by calling the syscall instruction
18  *   - syscall return comes in rax
19  *   - rcx and r11 are clobbered, others are preserved.
20  *   - the arguments are cast to long and assigned into the target registers
21  *     which are then simply passed as registers to the asm code, so that we
22  *     don't have to experience issues with register constraints.
23  *   - the syscall number is always specified last in order to allow to force
24  *     some registers before (gcc refuses a %-register at the last position).
25  *   - see also x86-64 ABI section A.2 AMD64 Linux Kernel Conventions, A.2.1
26  *     Calling Conventions.
27  *
28  * Link x86-64 ABI: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/home
29  *
30  */
31 
32 #define my_syscall0(num)                                                      \
33 ({                                                                            \
34 	long _ret;                                                            \
35 	register long _num  __asm__ ("rax") = (num);                          \
36 									      \
37 	__asm__ volatile (                                                    \
38 		"syscall\n"                                                   \
39 		: "=a"(_ret)                                                  \
40 		: "0"(_num)                                                   \
41 		: "rcx", "r11", "memory", "cc"                                \
42 	);                                                                    \
43 	_ret;                                                                 \
44 })
45 
46 #define my_syscall1(num, arg1)                                                \
47 ({                                                                            \
48 	long _ret;                                                            \
49 	register long _num  __asm__ ("rax") = (num);                          \
50 	register long _arg1 __asm__ ("rdi") = (long)(arg1);                   \
51 									      \
52 	__asm__ volatile (                                                    \
53 		"syscall\n"                                                   \
54 		: "=a"(_ret)                                                  \
55 		: "r"(_arg1),                                                 \
56 		  "0"(_num)                                                   \
57 		: "rcx", "r11", "memory", "cc"                                \
58 	);                                                                    \
59 	_ret;                                                                 \
60 })
61 
62 #define my_syscall2(num, arg1, arg2)                                          \
63 ({                                                                            \
64 	long _ret;                                                            \
65 	register long _num  __asm__ ("rax") = (num);                          \
66 	register long _arg1 __asm__ ("rdi") = (long)(arg1);                   \
67 	register long _arg2 __asm__ ("rsi") = (long)(arg2);                   \
68 									      \
69 	__asm__ volatile (                                                    \
70 		"syscall\n"                                                   \
71 		: "=a"(_ret)                                                  \
72 		: "r"(_arg1), "r"(_arg2),                                     \
73 		  "0"(_num)                                                   \
74 		: "rcx", "r11", "memory", "cc"                                \
75 	);                                                                    \
76 	_ret;                                                                 \
77 })
78 
79 #define my_syscall3(num, arg1, arg2, arg3)                                    \
80 ({                                                                            \
81 	long _ret;                                                            \
82 	register long _num  __asm__ ("rax") = (num);                          \
83 	register long _arg1 __asm__ ("rdi") = (long)(arg1);                   \
84 	register long _arg2 __asm__ ("rsi") = (long)(arg2);                   \
85 	register long _arg3 __asm__ ("rdx") = (long)(arg3);                   \
86 									      \
87 	__asm__ volatile (                                                    \
88 		"syscall\n"                                                   \
89 		: "=a"(_ret)                                                  \
90 		: "r"(_arg1), "r"(_arg2), "r"(_arg3),                         \
91 		  "0"(_num)                                                   \
92 		: "rcx", "r11", "memory", "cc"                                \
93 	);                                                                    \
94 	_ret;                                                                 \
95 })
96 
97 #define my_syscall4(num, arg1, arg2, arg3, arg4)                              \
98 ({                                                                            \
99 	long _ret;                                                            \
100 	register long _num  __asm__ ("rax") = (num);                          \
101 	register long _arg1 __asm__ ("rdi") = (long)(arg1);                   \
102 	register long _arg2 __asm__ ("rsi") = (long)(arg2);                   \
103 	register long _arg3 __asm__ ("rdx") = (long)(arg3);                   \
104 	register long _arg4 __asm__ ("r10") = (long)(arg4);                   \
105 									      \
106 	__asm__ volatile (                                                    \
107 		"syscall\n"                                                   \
108 		: "=a"(_ret)                                                  \
109 		: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4),             \
110 		  "0"(_num)                                                   \
111 		: "rcx", "r11", "memory", "cc"                                \
112 	);                                                                    \
113 	_ret;                                                                 \
114 })
115 
116 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5)                        \
117 ({                                                                            \
118 	long _ret;                                                            \
119 	register long _num  __asm__ ("rax") = (num);                          \
120 	register long _arg1 __asm__ ("rdi") = (long)(arg1);                   \
121 	register long _arg2 __asm__ ("rsi") = (long)(arg2);                   \
122 	register long _arg3 __asm__ ("rdx") = (long)(arg3);                   \
123 	register long _arg4 __asm__ ("r10") = (long)(arg4);                   \
124 	register long _arg5 __asm__ ("r8")  = (long)(arg5);                   \
125 									      \
126 	__asm__ volatile (                                                    \
127 		"syscall\n"                                                   \
128 		: "=a"(_ret)                                                  \
129 		: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
130 		  "0"(_num)                                                   \
131 		: "rcx", "r11", "memory", "cc"                                \
132 	);                                                                    \
133 	_ret;                                                                 \
134 })
135 
136 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6)                  \
137 ({                                                                            \
138 	long _ret;                                                            \
139 	register long _num  __asm__ ("rax") = (num);                          \
140 	register long _arg1 __asm__ ("rdi") = (long)(arg1);                   \
141 	register long _arg2 __asm__ ("rsi") = (long)(arg2);                   \
142 	register long _arg3 __asm__ ("rdx") = (long)(arg3);                   \
143 	register long _arg4 __asm__ ("r10") = (long)(arg4);                   \
144 	register long _arg5 __asm__ ("r8")  = (long)(arg5);                   \
145 	register long _arg6 __asm__ ("r9")  = (long)(arg6);                   \
146 									      \
147 	__asm__ volatile (                                                    \
148 		"syscall\n"                                                   \
149 		: "=a"(_ret)                                                  \
150 		: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
151 		  "r"(_arg6), "0"(_num)                                       \
152 		: "rcx", "r11", "memory", "cc"                                \
153 	);                                                                    \
154 	_ret;                                                                 \
155 })
156 
157 /* startup code */
158 /*
159  * x86-64 System V ABI mandates:
160  * 1) %rsp must be 16-byte aligned right before the function call.
161  * 2) The deepest stack frame should be zero (the %rbp).
162  *
163  */
164 void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
165 {
166 	__asm__ volatile (
167 		"xor  %ebp, %ebp\n"       /* zero the stack frame                            */
168 		"mov  %rsp, %rdi\n"       /* save stack pointer to %rdi, as arg1 of _start_c */
169 		"and  $-16, %rsp\n"       /* %rsp must be 16-byte aligned before call        */
170 		"call _start_c\n"         /* transfer to c runtime                           */
171 		"hlt\n"                   /* ensure it does not return                       */
172 	);
173 	__builtin_unreachable();
174 }
175 
176 #define NOLIBC_ARCH_HAS_MEMMOVE
177 void *memmove(void *dst, const void *src, size_t len);
178 
179 #define NOLIBC_ARCH_HAS_MEMCPY
180 void *memcpy(void *dst, const void *src, size_t len);
181 
182 #define NOLIBC_ARCH_HAS_MEMSET
183 void *memset(void *dst, int c, size_t len);
184 
185 __asm__ (
186 ".section .text.nolibc_memmove_memcpy\n"
187 ".weak memmove\n"
188 ".weak memcpy\n"
189 "memmove:\n"
190 "memcpy:\n"
191 	"movq %rdx, %rcx\n\t"
192 	"movq %rdi, %rax\n\t"
193 	"movq %rdi, %rdx\n\t"
194 	"subq %rsi, %rdx\n\t"
195 	"cmpq %rcx, %rdx\n\t"
196 	"jb   .Lbackward_copy\n\t"
197 	"rep movsb\n\t"
198 	"retq\n"
199 ".Lbackward_copy:"
200 	"leaq -1(%rdi, %rcx, 1), %rdi\n\t"
201 	"leaq -1(%rsi, %rcx, 1), %rsi\n\t"
202 	"std\n\t"
203 	"rep movsb\n\t"
204 	"cld\n\t"
205 	"retq\n"
206 
207 ".section .text.nolibc_memset\n"
208 ".weak memset\n"
209 "memset:\n"
210 	"xchgl %eax, %esi\n\t"
211 	"movq  %rdx, %rcx\n\t"
212 	"pushq %rdi\n\t"
213 	"rep stosb\n\t"
214 	"popq  %rax\n\t"
215 	"retq\n"
216 );
217 
218 #endif /* _NOLIBC_ARCH_X86_64_H */
219