1 /*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6 #ifndef __SYSDEP_STUB_H
7 #define __SYSDEP_STUB_H
8
9 #include <stddef.h>
10 #include <asm/ptrace.h>
11 #include <generated/asm-offsets.h>
12
13 #define STUB_MMAP_NR __NR_mmap2
14 #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
15
stub_syscall0(long syscall)16 static __always_inline long stub_syscall0(long syscall)
17 {
18 long ret;
19
20 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall)
21 : "memory");
22
23 return ret;
24 }
25
stub_syscall1(long syscall,long arg1)26 static __always_inline long stub_syscall1(long syscall, long arg1)
27 {
28 long ret;
29
30 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1)
31 : "memory");
32
33 return ret;
34 }
35
stub_syscall2(long syscall,long arg1,long arg2)36 static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
37 {
38 long ret;
39
40 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
41 "c" (arg2)
42 : "memory");
43
44 return ret;
45 }
46
stub_syscall3(long syscall,long arg1,long arg2,long arg3)47 static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
48 long arg3)
49 {
50 long ret;
51
52 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
53 "c" (arg2), "d" (arg3)
54 : "memory");
55
56 return ret;
57 }
58
stub_syscall4(long syscall,long arg1,long arg2,long arg3,long arg4)59 static __always_inline long stub_syscall4(long syscall, long arg1, long arg2,
60 long arg3, long arg4)
61 {
62 long ret;
63
64 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
65 "c" (arg2), "d" (arg3), "S" (arg4)
66 : "memory");
67
68 return ret;
69 }
70
stub_syscall5(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5)71 static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
72 long arg3, long arg4, long arg5)
73 {
74 long ret;
75
76 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
77 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
78 : "memory");
79
80 return ret;
81 }
82
stub_syscall6(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5,long arg6)83 static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
84 long arg3, long arg4, long arg5,
85 long arg6)
86 {
87 struct syscall_args {
88 int ebx, ebp;
89 } args = { arg1, arg6 };
90 long ret;
91
92 __asm__ volatile ("pushl %%ebp;"
93 "movl 0x4(%%ebx),%%ebp;"
94 "movl (%%ebx),%%ebx;"
95 "int $0x80;"
96 "popl %%ebp"
97 : "=a" (ret)
98 : "0" (syscall), "b" (&args),
99 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
100 : "memory");
101
102 return ret;
103 }
104
trap_myself(void)105 static __always_inline void trap_myself(void)
106 {
107 __asm("int3");
108 }
109
get_stub_data(void)110 static __always_inline void *get_stub_data(void)
111 {
112 unsigned long ret;
113
114 asm volatile (
115 "call _here_%=;"
116 "_here_%=:"
117 "popl %0;"
118 "andl %1, %0 ;"
119 "addl %2, %0 ;"
120 : "=a" (ret)
121 : "g" (~(UM_KERN_PAGE_SIZE - 1)),
122 "g" (UM_KERN_PAGE_SIZE));
123
124 return (void *)ret;
125 }
126
127 #define stub_start(fn) \
128 asm volatile ( \
129 "subl %0,%%esp ;" \
130 "movl %1, %%eax ; " \
131 "call *%%eax ;" \
132 :: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE), \
133 "i" (&fn))
134 #endif
135