xref: /linux/arch/x86/um/shared/sysdep/stub_64.h (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 /*
2  * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5 
6 #ifndef __SYSDEP_STUB_H
7 #define __SYSDEP_STUB_H
8 
9 #include <stddef.h>
10 #include <sysdep/ptrace_user.h>
11 #include <generated/asm-offsets.h>
12 #include <linux/stddef.h>
13 #include <asm/prctl.h>
14 
15 #define STUB_MMAP_NR __NR_mmap
16 #define MMAP_OFFSET(o) (o)
17 
18 #define __syscall_clobber "r11","rcx","memory"
19 #define __syscall "syscall"
20 
stub_syscall0(long syscall)21 static __always_inline long stub_syscall0(long syscall)
22 {
23 	long ret;
24 
25 	__asm__ volatile (__syscall
26 		: "=a" (ret)
27 		: "0" (syscall) : __syscall_clobber );
28 
29 	return ret;
30 }
31 
stub_syscall1(long syscall,long arg1)32 static __always_inline long stub_syscall1(long syscall, long arg1)
33 {
34 	long ret;
35 
36 	__asm__ volatile (__syscall
37 		: "=a" (ret)
38 		: "0" (syscall), "D" (arg1) : __syscall_clobber );
39 
40 	return ret;
41 }
42 
stub_syscall2(long syscall,long arg1,long arg2)43 static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
44 {
45 	long ret;
46 
47 	__asm__ volatile (__syscall
48 		: "=a" (ret)
49 		: "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
50 
51 	return ret;
52 }
53 
stub_syscall3(long syscall,long arg1,long arg2,long arg3)54 static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
55 					  long arg3)
56 {
57 	long ret;
58 
59 	__asm__ volatile (__syscall
60 		: "=a" (ret)
61 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
62 		: __syscall_clobber );
63 
64 	return ret;
65 }
66 
stub_syscall4(long syscall,long arg1,long arg2,long arg3,long arg4)67 static __always_inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
68 				 long arg4)
69 {
70 	long ret;
71 
72 	__asm__ volatile ("movq %5,%%r10 ; " __syscall
73 		: "=a" (ret)
74 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
75 		  "g" (arg4)
76 		: __syscall_clobber, "r10" );
77 
78 	return ret;
79 }
80 
stub_syscall5(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5)81 static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
82 					  long arg3, long arg4, long arg5)
83 {
84 	long ret;
85 
86 	__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
87 		: "=a" (ret)
88 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
89 		  "g" (arg4), "g" (arg5)
90 		: __syscall_clobber, "r10", "r8" );
91 
92 	return ret;
93 }
94 
stub_syscall6(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5,long arg6)95 static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
96 					  long arg3, long arg4, long arg5,
97 					  long arg6)
98 {
99 	long ret;
100 
101 	__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; "
102 		__syscall
103 		: "=a" (ret)
104 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
105 		  "g" (arg4), "g" (arg5), "g" (arg6)
106 		: __syscall_clobber, "r10", "r8", "r9");
107 
108 	return ret;
109 }
110 
trap_myself(void)111 static __always_inline void trap_myself(void)
112 {
113 	__asm("int3");
114 }
115 
get_stub_data(void)116 static __always_inline void *get_stub_data(void)
117 {
118 	unsigned long ret;
119 
120 	asm volatile (
121 		"lea 0(%%rip), %0;"
122 		"andq %1, %0 ;"
123 		"addq %2, %0 ;"
124 		: "=a" (ret)
125 		: "g" (~(UM_KERN_PAGE_SIZE - 1)),
126 		  "g" (UM_KERN_PAGE_SIZE));
127 
128 	return (void *)ret;
129 }
130 
131 #define stub_start(fn)							\
132 	asm volatile (							\
133 		"subq %0,%%rsp ;"					\
134 		"movq %1,%%rax ;"					\
135 		"call *%%rax ;"						\
136 		:: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE),	\
137 		   "i" (&fn))
138 
139 static __always_inline void
stub_seccomp_restore_state(struct stub_data_arch * arch)140 stub_seccomp_restore_state(struct stub_data_arch *arch)
141 {
142 	/*
143 	 * We could use _writefsbase_u64/_writegsbase_u64 if the host reports
144 	 * support in the hwcaps (HWCAP2_FSGSBASE).
145 	 */
146 	if (arch->sync & STUB_SYNC_FS_BASE)
147 		stub_syscall2(__NR_arch_prctl, ARCH_SET_FS, arch->fs_base);
148 	if (arch->sync & STUB_SYNC_GS_BASE)
149 		stub_syscall2(__NR_arch_prctl, ARCH_SET_GS, arch->gs_base);
150 
151 	arch->sync = 0;
152 }
153 
154 #endif
155