1 #ifndef _ASM_X86_PTRACE_H 2 #define _ASM_X86_PTRACE_H 3 4 #include <linux/compiler.h> /* For __user */ 5 #include <asm/ptrace-abi.h> 6 #include <asm/processor-flags.h> 7 8 #ifdef __KERNEL__ 9 #include <asm/ds.h> /* the DS BTS struct is used for ptrace too */ 10 #include <asm/segment.h> 11 #endif 12 13 #ifndef __ASSEMBLY__ 14 15 #ifdef __i386__ 16 /* this struct defines the way the registers are stored on the 17 stack during a system call. */ 18 19 #ifndef __KERNEL__ 20 21 struct pt_regs { 22 long ebx; 23 long ecx; 24 long edx; 25 long esi; 26 long edi; 27 long ebp; 28 long eax; 29 int xds; 30 int xes; 31 int xfs; 32 /* int gs; */ 33 long orig_eax; 34 long eip; 35 int xcs; 36 long eflags; 37 long esp; 38 int xss; 39 }; 40 41 #else /* __KERNEL__ */ 42 43 struct pt_regs { 44 unsigned long bx; 45 unsigned long cx; 46 unsigned long dx; 47 unsigned long si; 48 unsigned long di; 49 unsigned long bp; 50 unsigned long ax; 51 unsigned long ds; 52 unsigned long es; 53 unsigned long fs; 54 /* int gs; */ 55 unsigned long orig_ax; 56 unsigned long ip; 57 unsigned long cs; 58 unsigned long flags; 59 unsigned long sp; 60 unsigned long ss; 61 }; 62 63 #endif /* __KERNEL__ */ 64 65 #else /* __i386__ */ 66 67 #ifndef __KERNEL__ 68 69 struct pt_regs { 70 unsigned long r15; 71 unsigned long r14; 72 unsigned long r13; 73 unsigned long r12; 74 unsigned long rbp; 75 unsigned long rbx; 76 /* arguments: non interrupts/non tracing syscalls only save upto here*/ 77 unsigned long r11; 78 unsigned long r10; 79 unsigned long r9; 80 unsigned long r8; 81 unsigned long rax; 82 unsigned long rcx; 83 unsigned long rdx; 84 unsigned long rsi; 85 unsigned long rdi; 86 unsigned long orig_rax; 87 /* end of arguments */ 88 /* cpu exception frame or undefined */ 89 unsigned long rip; 90 unsigned long cs; 91 unsigned long eflags; 92 unsigned long rsp; 93 unsigned long ss; 94 /* top of stack page */ 95 }; 96 97 #else /* __KERNEL__ */ 98 99 struct pt_regs { 100 unsigned long r15; 101 unsigned long r14; 102 unsigned long r13; 103 unsigned long r12; 104 unsigned long bp; 105 unsigned long bx; 106 /* arguments: non interrupts/non tracing syscalls only save upto here*/ 107 unsigned long r11; 108 unsigned long r10; 109 unsigned long r9; 110 unsigned long r8; 111 unsigned long ax; 112 unsigned long cx; 113 unsigned long dx; 114 unsigned long si; 115 unsigned long di; 116 unsigned long orig_ax; 117 /* end of arguments */ 118 /* cpu exception frame or undefined */ 119 unsigned long ip; 120 unsigned long cs; 121 unsigned long flags; 122 unsigned long sp; 123 unsigned long ss; 124 /* top of stack page */ 125 }; 126 127 #endif /* __KERNEL__ */ 128 #endif /* !__i386__ */ 129 130 131 #ifdef CONFIG_X86_PTRACE_BTS 132 /* a branch trace record entry 133 * 134 * In order to unify the interface between various processor versions, 135 * we use the below data structure for all processors. 136 */ 137 enum bts_qualifier { 138 BTS_INVALID = 0, 139 BTS_BRANCH, 140 BTS_TASK_ARRIVES, 141 BTS_TASK_DEPARTS 142 }; 143 144 struct bts_struct { 145 __u64 qualifier; 146 union { 147 /* BTS_BRANCH */ 148 struct { 149 __u64 from_ip; 150 __u64 to_ip; 151 } lbr; 152 /* BTS_TASK_ARRIVES or 153 BTS_TASK_DEPARTS */ 154 __u64 jiffies; 155 } variant; 156 }; 157 #endif /* CONFIG_X86_PTRACE_BTS */ 158 159 #ifdef __KERNEL__ 160 161 #include <linux/init.h> 162 163 struct cpuinfo_x86; 164 struct task_struct; 165 166 #ifdef CONFIG_X86_PTRACE_BTS 167 extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *); 168 extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); 169 #else 170 #define ptrace_bts_init_intel(config) do {} while (0) 171 #endif /* CONFIG_X86_PTRACE_BTS */ 172 173 extern unsigned long profile_pc(struct pt_regs *regs); 174 175 extern unsigned long 176 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 177 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 178 int error_code, int si_code); 179 void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 180 181 extern long syscall_trace_enter(struct pt_regs *); 182 extern void syscall_trace_leave(struct pt_regs *); 183 184 static inline unsigned long regs_return_value(struct pt_regs *regs) 185 { 186 return regs->ax; 187 } 188 189 /* 190 * user_mode_vm(regs) determines whether a register set came from user mode. 191 * This is true if V8086 mode was enabled OR if the register set was from 192 * protected mode with RPL-3 CS value. This tricky test checks that with 193 * one comparison. Many places in the kernel can bypass this full check 194 * if they have already ruled out V8086 mode, so user_mode(regs) can be used. 195 */ 196 static inline int user_mode(struct pt_regs *regs) 197 { 198 #ifdef CONFIG_X86_32 199 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; 200 #else 201 return !!(regs->cs & 3); 202 #endif 203 } 204 205 static inline int user_mode_vm(struct pt_regs *regs) 206 { 207 #ifdef CONFIG_X86_32 208 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= 209 USER_RPL; 210 #else 211 return user_mode(regs); 212 #endif 213 } 214 215 static inline int v8086_mode(struct pt_regs *regs) 216 { 217 #ifdef CONFIG_X86_32 218 return (regs->flags & X86_VM_MASK); 219 #else 220 return 0; /* No V86 mode support in long mode */ 221 #endif 222 } 223 224 /* 225 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode 226 * when it traps. So regs will be the current sp. 227 * 228 * This is valid only for kernel mode traps. 229 */ 230 static inline unsigned long kernel_trap_sp(struct pt_regs *regs) 231 { 232 #ifdef CONFIG_X86_32 233 return (unsigned long)regs; 234 #else 235 return regs->sp; 236 #endif 237 } 238 239 static inline unsigned long instruction_pointer(struct pt_regs *regs) 240 { 241 return regs->ip; 242 } 243 244 static inline unsigned long frame_pointer(struct pt_regs *regs) 245 { 246 return regs->bp; 247 } 248 249 static inline unsigned long user_stack_pointer(struct pt_regs *regs) 250 { 251 return regs->sp; 252 } 253 254 /* 255 * These are defined as per linux/ptrace.h, which see. 256 */ 257 #define arch_has_single_step() (1) 258 extern void user_enable_single_step(struct task_struct *); 259 extern void user_disable_single_step(struct task_struct *); 260 261 extern void user_enable_block_step(struct task_struct *); 262 #ifdef CONFIG_X86_DEBUGCTLMSR 263 #define arch_has_block_step() (1) 264 #else 265 #define arch_has_block_step() (boot_cpu_data.x86 >= 6) 266 #endif 267 268 struct user_desc; 269 extern int do_get_thread_area(struct task_struct *p, int idx, 270 struct user_desc __user *info); 271 extern int do_set_thread_area(struct task_struct *p, int idx, 272 struct user_desc __user *info, int can_allocate); 273 274 #define __ARCH_WANT_COMPAT_SYS_PTRACE 275 276 #endif /* __KERNEL__ */ 277 278 #endif /* !__ASSEMBLY__ */ 279 280 #endif /* _ASM_X86_PTRACE_H */ 281