xref: /linux/arch/x86/um/shared/sysdep/stub_64.h (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 /*
2  * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5 
6 #ifndef __SYSDEP_STUB_H
7 #define __SYSDEP_STUB_H
8 
9 #include <sysdep/ptrace_user.h>
10 #include <generated/asm-offsets.h>
11 #include <linux/stddef.h>
12 
13 #define STUB_MMAP_NR __NR_mmap
14 #define MMAP_OFFSET(o) (o)
15 
16 #define __syscall_clobber "r11","rcx","memory"
17 #define __syscall "syscall"
18 
19 static __always_inline long stub_syscall0(long syscall)
20 {
21 	long ret;
22 
23 	__asm__ volatile (__syscall
24 		: "=a" (ret)
25 		: "0" (syscall) : __syscall_clobber );
26 
27 	return ret;
28 }
29 
30 static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
31 {
32 	long ret;
33 
34 	__asm__ volatile (__syscall
35 		: "=a" (ret)
36 		: "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
37 
38 	return ret;
39 }
40 
41 static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
42 					  long arg3)
43 {
44 	long ret;
45 
46 	__asm__ volatile (__syscall
47 		: "=a" (ret)
48 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
49 		: __syscall_clobber );
50 
51 	return ret;
52 }
53 
54 static __always_inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
55 				 long arg4)
56 {
57 	long ret;
58 
59 	__asm__ volatile ("movq %5,%%r10 ; " __syscall
60 		: "=a" (ret)
61 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
62 		  "g" (arg4)
63 		: __syscall_clobber, "r10" );
64 
65 	return ret;
66 }
67 
68 static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
69 					  long arg3, long arg4, long arg5)
70 {
71 	long ret;
72 
73 	__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
74 		: "=a" (ret)
75 		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
76 		  "g" (arg4), "g" (arg5)
77 		: __syscall_clobber, "r10", "r8" );
78 
79 	return ret;
80 }
81 
82 static __always_inline void trap_myself(void)
83 {
84 	__asm("int3");
85 }
86 
87 static __always_inline void remap_stack_and_trap(void)
88 {
89 	__asm__ volatile (
90 		"movq %0,%%rax ;"
91 		"movq %%rsp,%%rdi ;"
92 		"andq %1,%%rdi ;"
93 		"movq %2,%%r10 ;"
94 		"movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;"
95 		"movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;"
96 		__syscall ";"
97 		"movq %%rsp,%%rdi ; andq %1,%%rdi ;"
98 		"addq %5,%%rdi ; movq %%rax, (%%rdi) ;"
99 		"int3"
100 		: :
101 		"g" (STUB_MMAP_NR),
102 		"g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1)),
103 		"g" (MAP_FIXED | MAP_SHARED),
104 		"g" (UML_STUB_FIELD_FD),
105 		"g" (UML_STUB_FIELD_OFFSET),
106 		"g" (UML_STUB_FIELD_CHILD_ERR),
107 		"S" (STUB_DATA_PAGES * UM_KERN_PAGE_SIZE),
108 		"d" (PROT_READ | PROT_WRITE)
109 		:
110 		__syscall_clobber, "r10", "r8", "r9");
111 }
112 
113 static __always_inline void *get_stub_data(void)
114 {
115 	unsigned long ret;
116 
117 	asm volatile (
118 		"movq %%rsp,%0 ;"
119 		"andq %1,%0"
120 		: "=a" (ret)
121 		: "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1)));
122 
123 	return (void *)ret;
124 }
125 #endif
126