1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SuperH process tracing 4 * 5 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 6 * Copyright (C) 2002 - 2009 Paul Mundt 7 * 8 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> 9 */ 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/sched/task_stack.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/errno.h> 16 #include <linux/ptrace.h> 17 #include <linux/user.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 #include <linux/io.h> 21 #include <linux/audit.h> 22 #include <linux/seccomp.h> 23 #include <linux/elf.h> 24 #include <linux/regset.h> 25 #include <linux/hw_breakpoint.h> 26 #include <linux/uaccess.h> 27 #include <asm/processor.h> 28 #include <asm/mmu_context.h> 29 #include <asm/syscalls.h> 30 #include <asm/fpu.h> 31 32 #define CREATE_TRACE_POINTS 33 #include <trace/events/syscalls.h> 34 35 /* 36 * This routine will get a word off of the process kernel stack. 37 */ 38 static inline int get_stack_long(struct task_struct *task, int offset) 39 { 40 unsigned char *stack; 41 42 stack = (unsigned char *)task_pt_regs(task); 43 stack += offset; 44 return (*((int *)stack)); 45 } 46 47 /* 48 * This routine will put a word on the process kernel stack. 49 */ 50 static inline int put_stack_long(struct task_struct *task, int offset, 51 unsigned long data) 52 { 53 unsigned char *stack; 54 55 stack = (unsigned char *)task_pt_regs(task); 56 stack += offset; 57 *(unsigned long *) stack = data; 58 return 0; 59 } 60 61 void ptrace_triggered(struct perf_event *bp, 62 struct perf_sample_data *data, struct pt_regs *regs) 63 { 64 struct perf_event_attr attr; 65 66 /* 67 * Disable the breakpoint request here since ptrace has defined a 68 * one-shot behaviour for breakpoint exceptions. 69 */ 70 attr = bp->attr; 71 attr.disabled = true; 72 modify_user_hw_breakpoint(bp, &attr); 73 } 74 75 static int set_single_step(struct task_struct *tsk, unsigned long addr) 76 { 77 struct thread_struct *thread = &tsk->thread; 78 struct perf_event *bp; 79 struct perf_event_attr attr; 80 81 bp = thread->ptrace_bps[0]; 82 if (!bp) { 83 ptrace_breakpoint_init(&attr); 84 85 attr.bp_addr = addr; 86 attr.bp_len = HW_BREAKPOINT_LEN_2; 87 attr.bp_type = HW_BREAKPOINT_R; 88 89 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, 90 NULL, tsk); 91 if (IS_ERR(bp)) 92 return PTR_ERR(bp); 93 94 thread->ptrace_bps[0] = bp; 95 } else { 96 int err; 97 98 attr = bp->attr; 99 attr.bp_addr = addr; 100 /* reenable breakpoint */ 101 attr.disabled = false; 102 err = modify_user_hw_breakpoint(bp, &attr); 103 if (unlikely(err)) 104 return err; 105 } 106 107 return 0; 108 } 109 110 void user_enable_single_step(struct task_struct *child) 111 { 112 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); 113 114 set_tsk_thread_flag(child, TIF_SINGLESTEP); 115 116 set_single_step(child, pc); 117 } 118 119 void user_disable_single_step(struct task_struct *child) 120 { 121 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 122 } 123 124 /* 125 * Called by kernel/ptrace.c when detaching.. 126 * 127 * Make sure single step bits etc are not set. 128 */ 129 void ptrace_disable(struct task_struct *child) 130 { 131 user_disable_single_step(child); 132 } 133 134 static int genregs_get(struct task_struct *target, 135 const struct user_regset *regset, 136 struct membuf to) 137 { 138 const struct pt_regs *regs = task_pt_regs(target); 139 140 return membuf_write(&to, regs, sizeof(struct pt_regs)); 141 } 142 143 static int genregs_set(struct task_struct *target, 144 const struct user_regset *regset, 145 unsigned int pos, unsigned int count, 146 const void *kbuf, const void __user *ubuf) 147 { 148 struct pt_regs *regs = task_pt_regs(target); 149 int ret; 150 151 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 152 regs->regs, 153 0, 16 * sizeof(unsigned long)); 154 if (!ret && count > 0) 155 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 156 ®s->pc, 157 offsetof(struct pt_regs, pc), 158 sizeof(struct pt_regs)); 159 if (!ret) 160 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 161 sizeof(struct pt_regs), -1); 162 163 return ret; 164 } 165 166 #ifdef CONFIG_SH_FPU 167 static int fpregs_get(struct task_struct *target, 168 const struct user_regset *regset, 169 struct membuf to) 170 { 171 int ret; 172 173 ret = init_fpu(target); 174 if (ret) 175 return ret; 176 177 return membuf_write(&to, target->thread.xstate, 178 sizeof(struct user_fpu_struct)); 179 } 180 181 static int fpregs_set(struct task_struct *target, 182 const struct user_regset *regset, 183 unsigned int pos, unsigned int count, 184 const void *kbuf, const void __user *ubuf) 185 { 186 int ret; 187 188 ret = init_fpu(target); 189 if (ret) 190 return ret; 191 192 set_stopped_child_used_math(target); 193 194 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 195 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 196 &target->thread.xstate->hardfpu, 0, -1); 197 198 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 199 &target->thread.xstate->softfpu, 0, -1); 200 } 201 202 static int fpregs_active(struct task_struct *target, 203 const struct user_regset *regset) 204 { 205 return tsk_used_math(target) ? regset->n : 0; 206 } 207 #endif 208 209 #ifdef CONFIG_SH_DSP 210 static int dspregs_get(struct task_struct *target, 211 const struct user_regset *regset, 212 struct membuf to) 213 { 214 const struct pt_dspregs *regs = 215 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 216 217 return membuf_write(&to, regs, sizeof(struct pt_dspregs)); 218 } 219 220 static int dspregs_set(struct task_struct *target, 221 const struct user_regset *regset, 222 unsigned int pos, unsigned int count, 223 const void *kbuf, const void __user *ubuf) 224 { 225 struct pt_dspregs *regs = 226 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 227 int ret; 228 229 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 230 0, sizeof(struct pt_dspregs)); 231 if (!ret) 232 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 233 sizeof(struct pt_dspregs), -1); 234 235 return ret; 236 } 237 238 static int dspregs_active(struct task_struct *target, 239 const struct user_regset *regset) 240 { 241 struct pt_regs *regs = task_pt_regs(target); 242 243 return regs->sr & SR_DSP ? regset->n : 0; 244 } 245 #endif 246 247 const struct pt_regs_offset regoffset_table[] = { 248 REGS_OFFSET_NAME(0), 249 REGS_OFFSET_NAME(1), 250 REGS_OFFSET_NAME(2), 251 REGS_OFFSET_NAME(3), 252 REGS_OFFSET_NAME(4), 253 REGS_OFFSET_NAME(5), 254 REGS_OFFSET_NAME(6), 255 REGS_OFFSET_NAME(7), 256 REGS_OFFSET_NAME(8), 257 REGS_OFFSET_NAME(9), 258 REGS_OFFSET_NAME(10), 259 REGS_OFFSET_NAME(11), 260 REGS_OFFSET_NAME(12), 261 REGS_OFFSET_NAME(13), 262 REGS_OFFSET_NAME(14), 263 REGS_OFFSET_NAME(15), 264 REG_OFFSET_NAME(pc), 265 REG_OFFSET_NAME(pr), 266 REG_OFFSET_NAME(sr), 267 REG_OFFSET_NAME(gbr), 268 REG_OFFSET_NAME(mach), 269 REG_OFFSET_NAME(macl), 270 REG_OFFSET_NAME(tra), 271 REG_OFFSET_END, 272 }; 273 274 /* 275 * These are our native regset flavours. 276 */ 277 enum sh_regset { 278 REGSET_GENERAL, 279 #ifdef CONFIG_SH_FPU 280 REGSET_FPU, 281 #endif 282 #ifdef CONFIG_SH_DSP 283 REGSET_DSP, 284 #endif 285 }; 286 287 static const struct user_regset sh_regsets[] = { 288 /* 289 * Format is: 290 * R0 --> R15 291 * PC, PR, SR, GBR, MACH, MACL, TRA 292 */ 293 [REGSET_GENERAL] = { 294 .core_note_type = NT_PRSTATUS, 295 .n = ELF_NGREG, 296 .size = sizeof(long), 297 .align = sizeof(long), 298 .regset_get = genregs_get, 299 .set = genregs_set, 300 }, 301 302 #ifdef CONFIG_SH_FPU 303 [REGSET_FPU] = { 304 .core_note_type = NT_PRFPREG, 305 .n = sizeof(struct user_fpu_struct) / sizeof(long), 306 .size = sizeof(long), 307 .align = sizeof(long), 308 .regset_get = fpregs_get, 309 .set = fpregs_set, 310 .active = fpregs_active, 311 }, 312 #endif 313 314 #ifdef CONFIG_SH_DSP 315 [REGSET_DSP] = { 316 .n = sizeof(struct pt_dspregs) / sizeof(long), 317 .size = sizeof(long), 318 .align = sizeof(long), 319 .regset_get = dspregs_get, 320 .set = dspregs_set, 321 .active = dspregs_active, 322 }, 323 #endif 324 }; 325 326 static const struct user_regset_view user_sh_native_view = { 327 .name = "sh", 328 .e_machine = EM_SH, 329 .regsets = sh_regsets, 330 .n = ARRAY_SIZE(sh_regsets), 331 }; 332 333 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 334 { 335 return &user_sh_native_view; 336 } 337 338 long arch_ptrace(struct task_struct *child, long request, 339 unsigned long addr, unsigned long data) 340 { 341 unsigned long __user *datap = (unsigned long __user *)data; 342 int ret; 343 344 switch (request) { 345 /* read the word at location addr in the USER area. */ 346 case PTRACE_PEEKUSR: { 347 unsigned long tmp; 348 349 ret = -EIO; 350 if ((addr & 3) || addr < 0 || 351 addr > sizeof(struct user) - 3) 352 break; 353 354 if (addr < sizeof(struct pt_regs)) 355 tmp = get_stack_long(child, addr); 356 else if (addr >= offsetof(struct user, fpu) && 357 addr < offsetof(struct user, u_fpvalid)) { 358 if (!tsk_used_math(child)) { 359 if (addr == offsetof(struct user, fpu.fpscr)) 360 tmp = FPSCR_INIT; 361 else 362 tmp = 0; 363 } else { 364 unsigned long index; 365 ret = init_fpu(child); 366 if (ret) 367 break; 368 index = addr - offsetof(struct user, fpu); 369 tmp = ((unsigned long *)child->thread.xstate) 370 [index >> 2]; 371 } 372 } else if (addr == offsetof(struct user, u_fpvalid)) 373 tmp = !!tsk_used_math(child); 374 else if (addr == PT_TEXT_ADDR) 375 tmp = child->mm->start_code; 376 else if (addr == PT_DATA_ADDR) 377 tmp = child->mm->start_data; 378 else if (addr == PT_TEXT_END_ADDR) 379 tmp = child->mm->end_code; 380 else if (addr == PT_TEXT_LEN) 381 tmp = child->mm->end_code - child->mm->start_code; 382 else 383 tmp = 0; 384 ret = put_user(tmp, datap); 385 break; 386 } 387 388 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 389 ret = -EIO; 390 if ((addr & 3) || addr < 0 || 391 addr > sizeof(struct user) - 3) 392 break; 393 394 if (addr < sizeof(struct pt_regs)) 395 ret = put_stack_long(child, addr, data); 396 else if (addr >= offsetof(struct user, fpu) && 397 addr < offsetof(struct user, u_fpvalid)) { 398 unsigned long index; 399 ret = init_fpu(child); 400 if (ret) 401 break; 402 index = addr - offsetof(struct user, fpu); 403 set_stopped_child_used_math(child); 404 ((unsigned long *)child->thread.xstate) 405 [index >> 2] = data; 406 ret = 0; 407 } else if (addr == offsetof(struct user, u_fpvalid)) { 408 conditional_stopped_child_used_math(data, child); 409 ret = 0; 410 } 411 break; 412 413 case PTRACE_GETREGS: 414 return copy_regset_to_user(child, &user_sh_native_view, 415 REGSET_GENERAL, 416 0, sizeof(struct pt_regs), 417 datap); 418 case PTRACE_SETREGS: 419 return copy_regset_from_user(child, &user_sh_native_view, 420 REGSET_GENERAL, 421 0, sizeof(struct pt_regs), 422 datap); 423 #ifdef CONFIG_SH_FPU 424 case PTRACE_GETFPREGS: 425 return copy_regset_to_user(child, &user_sh_native_view, 426 REGSET_FPU, 427 0, sizeof(struct user_fpu_struct), 428 datap); 429 case PTRACE_SETFPREGS: 430 return copy_regset_from_user(child, &user_sh_native_view, 431 REGSET_FPU, 432 0, sizeof(struct user_fpu_struct), 433 datap); 434 #endif 435 #ifdef CONFIG_SH_DSP 436 case PTRACE_GETDSPREGS: 437 return copy_regset_to_user(child, &user_sh_native_view, 438 REGSET_DSP, 439 0, sizeof(struct pt_dspregs), 440 datap); 441 case PTRACE_SETDSPREGS: 442 return copy_regset_from_user(child, &user_sh_native_view, 443 REGSET_DSP, 444 0, sizeof(struct pt_dspregs), 445 datap); 446 #endif 447 default: 448 ret = ptrace_request(child, request, addr, data); 449 break; 450 } 451 452 return ret; 453 } 454 455 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 456 { 457 if (test_thread_flag(TIF_SYSCALL_TRACE) && 458 ptrace_report_syscall_entry(regs)) { 459 regs->regs[0] = -ENOSYS; 460 return -1; 461 } 462 463 if (secure_computing() == -1) 464 return -1; 465 466 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 467 trace_sys_enter(regs, regs->regs[0]); 468 469 audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5], 470 regs->regs[6], regs->regs[7]); 471 472 return 0; 473 } 474 475 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 476 { 477 int step; 478 479 audit_syscall_exit(regs); 480 481 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 482 trace_sys_exit(regs, regs->regs[0]); 483 484 step = test_thread_flag(TIF_SINGLESTEP); 485 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 486 ptrace_report_syscall_exit(regs, step); 487 } 488