xref: /linux/arch/x86/um/shared/sysdep/stub_64.h (revision 831c1926ee728c3e747255f7c0f434762e8e863d)
1 /*
2  * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5 
6 #ifndef __SYSDEP_STUB_H
7 #define __SYSDEP_STUB_H
8 
9 #include <stddef.h>
10 #include <sysdep/ptrace_user.h>
11 #include <generated/asm-offsets.h>
12 #include <linux/stddef.h>
13 
14 #define STUB_MMAP_NR __NR_mmap
15 #define MMAP_OFFSET(o) (o)
16 
17 #define __syscall_clobber "r11","rcx","memory"
18 #define __syscall "syscall"
19 
stub_syscall0(long syscall)20 static __always_inline long stub_syscall0(long syscall)
21 {
22 	long ret;
23 
24 	__asm__ volatile (__syscall
25 		: "=a" (ret)
26 		: "0" (syscall) : __syscall_clobber );
27 
28 	return ret;
29 }
30 
stub_syscall1(long syscall,long arg1)31 static __always_inline long stub_syscall1(long syscall, long arg1)
32 {
33 	long ret;
34 
35 	__asm__ volatile (__syscall
36 		: "=a" (ret)
37 		: "0" (syscall), "D" (arg1) : __syscall_clobber );
38 
39 	return ret;
40 }
41 
stub_syscall2(long syscall,long arg1,long arg2)42 static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
43 {
44 	long ret;
45 
46 	__asm__ volatile (__syscall
47 		: "=a" (ret)
48 		: "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
49 
50 	return ret;
51 }
52 
stub_syscall3(long syscall,long arg1,long arg2,long arg3)53 static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
54 					  long arg3)
55 {
56 	long ret;
57 
58 	__asm__ volatile (__syscall
59 		: "=a" (ret)
60 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
61 		: __syscall_clobber );
62 
63 	return ret;
64 }
65 
stub_syscall4(long syscall,long arg1,long arg2,long arg3,long arg4)66 static __always_inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
67 				 long arg4)
68 {
69 	long ret;
70 
71 	__asm__ volatile ("movq %5,%%r10 ; " __syscall
72 		: "=a" (ret)
73 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
74 		  "g" (arg4)
75 		: __syscall_clobber, "r10" );
76 
77 	return ret;
78 }
79 
stub_syscall5(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5)80 static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
81 					  long arg3, long arg4, long arg5)
82 {
83 	long ret;
84 
85 	__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
86 		: "=a" (ret)
87 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
88 		  "g" (arg4), "g" (arg5)
89 		: __syscall_clobber, "r10", "r8" );
90 
91 	return ret;
92 }
93 
stub_syscall6(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5,long arg6)94 static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
95 					  long arg3, long arg4, long arg5,
96 					  long arg6)
97 {
98 	long ret;
99 
100 	__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; "
101 		__syscall
102 		: "=a" (ret)
103 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
104 		  "g" (arg4), "g" (arg5), "g" (arg6)
105 		: __syscall_clobber, "r10", "r8", "r9");
106 
107 	return ret;
108 }
109 
trap_myself(void)110 static __always_inline void trap_myself(void)
111 {
112 	__asm("int3");
113 }
114 
get_stub_data(void)115 static __always_inline void *get_stub_data(void)
116 {
117 	unsigned long ret;
118 
119 	asm volatile (
120 		"lea 0(%%rip), %0;"
121 		"andq %1, %0 ;"
122 		"addq %2, %0 ;"
123 		: "=a" (ret)
124 		: "g" (~(UM_KERN_PAGE_SIZE - 1)),
125 		  "g" (UM_KERN_PAGE_SIZE));
126 
127 	return (void *)ret;
128 }
129 
130 #define stub_start(fn)							\
131 	asm volatile (							\
132 		"subq %0,%%rsp ;"					\
133 		"movq %1,%%rax ;"					\
134 		"call *%%rax ;"						\
135 		:: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE),	\
136 		   "i" (&fn))
137 #endif
138