1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2001 - 2007 Tensilica Inc. 7 * 8 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 9 * Chris Zankel <chris@zankel.net> 10 * Scott Foehner<sfoehner@yahoo.com>, 11 * Kevin Chea 12 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca> 13 */ 14 15 #include <linux/audit.h> 16 #include <linux/errno.h> 17 #include <linux/hw_breakpoint.h> 18 #include <linux/kernel.h> 19 #include <linux/mm.h> 20 #include <linux/perf_event.h> 21 #include <linux/ptrace.h> 22 #include <linux/regset.h> 23 #include <linux/sched.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/seccomp.h> 26 #include <linux/security.h> 27 #include <linux/signal.h> 28 #include <linux/smp.h> 29 #include <linux/tracehook.h> 30 #include <linux/uaccess.h> 31 32 #define CREATE_TRACE_POINTS 33 #include <trace/events/syscalls.h> 34 35 #include <asm/coprocessor.h> 36 #include <asm/elf.h> 37 #include <asm/page.h> 38 #include <asm/ptrace.h> 39 40 static int gpr_get(struct task_struct *target, 41 const struct user_regset *regset, 42 unsigned int pos, unsigned int count, 43 void *kbuf, void __user *ubuf) 44 { 45 struct pt_regs *regs = task_pt_regs(target); 46 struct user_pt_regs newregs = { 47 .pc = regs->pc, 48 .ps = regs->ps & ~(1 << PS_EXCM_BIT), 49 .lbeg = regs->lbeg, 50 .lend = regs->lend, 51 .lcount = regs->lcount, 52 .sar = regs->sar, 53 .threadptr = regs->threadptr, 54 .windowbase = regs->windowbase, 55 .windowstart = regs->windowstart, 56 .syscall = regs->syscall, 57 }; 58 59 memcpy(newregs.a, 60 regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4, 61 regs->windowbase * 16); 62 memcpy(newregs.a + regs->windowbase * 4, 63 regs->areg, 64 (WSBITS - regs->windowbase) * 16); 65 66 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 67 &newregs, 0, -1); 68 } 69 70 static int gpr_set(struct task_struct *target, 71 const struct user_regset *regset, 72 unsigned int pos, unsigned int count, 73 const void *kbuf, const void __user *ubuf) 74 { 75 int ret; 76 struct user_pt_regs newregs = {0}; 77 struct pt_regs *regs; 78 const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; 79 80 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 81 if (ret) 82 return ret; 83 84 if (newregs.windowbase >= XCHAL_NUM_AREGS / 4) 85 return -EINVAL; 86 87 regs = task_pt_regs(target); 88 regs->pc = newregs.pc; 89 regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask); 90 regs->lbeg = newregs.lbeg; 91 regs->lend = newregs.lend; 92 regs->lcount = newregs.lcount; 93 regs->sar = newregs.sar; 94 regs->threadptr = newregs.threadptr; 95 96 if (newregs.syscall) 97 regs->syscall = newregs.syscall; 98 99 if (newregs.windowbase != regs->windowbase || 100 newregs.windowstart != regs->windowstart) { 101 u32 rotws, wmask; 102 103 rotws = (((newregs.windowstart | 104 (newregs.windowstart << WSBITS)) >> 105 newregs.windowbase) & 106 ((1 << WSBITS) - 1)) & ~1; 107 wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) | 108 (rotws & 0xF) | 1; 109 regs->windowbase = newregs.windowbase; 110 regs->windowstart = newregs.windowstart; 111 regs->wmask = wmask; 112 } 113 114 memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4, 115 newregs.a, newregs.windowbase * 16); 116 memcpy(regs->areg, newregs.a + newregs.windowbase * 4, 117 (WSBITS - newregs.windowbase) * 16); 118 119 return 0; 120 } 121 122 static int tie_get(struct task_struct *target, 123 const struct user_regset *regset, 124 unsigned int pos, unsigned int count, 125 void *kbuf, void __user *ubuf) 126 { 127 int ret; 128 struct pt_regs *regs = task_pt_regs(target); 129 struct thread_info *ti = task_thread_info(target); 130 elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); 131 132 if (!newregs) 133 return -ENOMEM; 134 135 newregs->opt = regs->xtregs_opt; 136 newregs->user = ti->xtregs_user; 137 138 #if XTENSA_HAVE_COPROCESSORS 139 /* Flush all coprocessor registers to memory. */ 140 coprocessor_flush_all(ti); 141 newregs->cp0 = ti->xtregs_cp.cp0; 142 newregs->cp1 = ti->xtregs_cp.cp1; 143 newregs->cp2 = ti->xtregs_cp.cp2; 144 newregs->cp3 = ti->xtregs_cp.cp3; 145 newregs->cp4 = ti->xtregs_cp.cp4; 146 newregs->cp5 = ti->xtregs_cp.cp5; 147 newregs->cp6 = ti->xtregs_cp.cp6; 148 newregs->cp7 = ti->xtregs_cp.cp7; 149 #endif 150 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 151 newregs, 0, -1); 152 kfree(newregs); 153 return ret; 154 } 155 156 static int tie_set(struct task_struct *target, 157 const struct user_regset *regset, 158 unsigned int pos, unsigned int count, 159 const void *kbuf, const void __user *ubuf) 160 { 161 int ret; 162 struct pt_regs *regs = task_pt_regs(target); 163 struct thread_info *ti = task_thread_info(target); 164 elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); 165 166 if (!newregs) 167 return -ENOMEM; 168 169 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 170 newregs, 0, -1); 171 172 if (ret) 173 goto exit; 174 regs->xtregs_opt = newregs->opt; 175 ti->xtregs_user = newregs->user; 176 177 #if XTENSA_HAVE_COPROCESSORS 178 /* Flush all coprocessors before we overwrite them. */ 179 coprocessor_flush_all(ti); 180 coprocessor_release_all(ti); 181 ti->xtregs_cp.cp0 = newregs->cp0; 182 ti->xtregs_cp.cp1 = newregs->cp1; 183 ti->xtregs_cp.cp2 = newregs->cp2; 184 ti->xtregs_cp.cp3 = newregs->cp3; 185 ti->xtregs_cp.cp4 = newregs->cp4; 186 ti->xtregs_cp.cp5 = newregs->cp5; 187 ti->xtregs_cp.cp6 = newregs->cp6; 188 ti->xtregs_cp.cp7 = newregs->cp7; 189 #endif 190 exit: 191 kfree(newregs); 192 return ret; 193 } 194 195 enum xtensa_regset { 196 REGSET_GPR, 197 REGSET_TIE, 198 }; 199 200 static const struct user_regset xtensa_regsets[] = { 201 [REGSET_GPR] = { 202 .core_note_type = NT_PRSTATUS, 203 .n = sizeof(struct user_pt_regs) / sizeof(u32), 204 .size = sizeof(u32), 205 .align = sizeof(u32), 206 .get = gpr_get, 207 .set = gpr_set, 208 }, 209 [REGSET_TIE] = { 210 .core_note_type = NT_PRFPREG, 211 .n = sizeof(elf_xtregs_t) / sizeof(u32), 212 .size = sizeof(u32), 213 .align = sizeof(u32), 214 .get = tie_get, 215 .set = tie_set, 216 }, 217 }; 218 219 static const struct user_regset_view user_xtensa_view = { 220 .name = "xtensa", 221 .e_machine = EM_XTENSA, 222 .regsets = xtensa_regsets, 223 .n = ARRAY_SIZE(xtensa_regsets) 224 }; 225 226 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 227 { 228 return &user_xtensa_view; 229 } 230 231 void user_enable_single_step(struct task_struct *child) 232 { 233 child->ptrace |= PT_SINGLESTEP; 234 } 235 236 void user_disable_single_step(struct task_struct *child) 237 { 238 child->ptrace &= ~PT_SINGLESTEP; 239 } 240 241 /* 242 * Called by kernel/ptrace.c when detaching to disable single stepping. 243 */ 244 245 void ptrace_disable(struct task_struct *child) 246 { 247 /* Nothing to do.. */ 248 } 249 250 static int ptrace_getregs(struct task_struct *child, void __user *uregs) 251 { 252 return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR, 253 0, sizeof(xtensa_gregset_t), uregs); 254 } 255 256 static int ptrace_setregs(struct task_struct *child, void __user *uregs) 257 { 258 return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR, 259 0, sizeof(xtensa_gregset_t), uregs); 260 } 261 262 static int ptrace_getxregs(struct task_struct *child, void __user *uregs) 263 { 264 return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE, 265 0, sizeof(elf_xtregs_t), uregs); 266 } 267 268 static int ptrace_setxregs(struct task_struct *child, void __user *uregs) 269 { 270 return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE, 271 0, sizeof(elf_xtregs_t), uregs); 272 } 273 274 static int ptrace_peekusr(struct task_struct *child, long regno, 275 long __user *ret) 276 { 277 struct pt_regs *regs; 278 unsigned long tmp; 279 280 regs = task_pt_regs(child); 281 tmp = 0; /* Default return value. */ 282 283 switch(regno) { 284 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: 285 tmp = regs->areg[regno - REG_AR_BASE]; 286 break; 287 288 case REG_A_BASE ... REG_A_BASE + 15: 289 tmp = regs->areg[regno - REG_A_BASE]; 290 break; 291 292 case REG_PC: 293 tmp = regs->pc; 294 break; 295 296 case REG_PS: 297 /* Note: PS.EXCM is not set while user task is running; 298 * its being set in regs is for exception handling 299 * convenience. 300 */ 301 tmp = (regs->ps & ~(1 << PS_EXCM_BIT)); 302 break; 303 304 case REG_WB: 305 break; /* tmp = 0 */ 306 307 case REG_WS: 308 { 309 unsigned long wb = regs->windowbase; 310 unsigned long ws = regs->windowstart; 311 tmp = ((ws >> wb) | (ws << (WSBITS - wb))) & 312 ((1 << WSBITS) - 1); 313 break; 314 } 315 case REG_LBEG: 316 tmp = regs->lbeg; 317 break; 318 319 case REG_LEND: 320 tmp = regs->lend; 321 break; 322 323 case REG_LCOUNT: 324 tmp = regs->lcount; 325 break; 326 327 case REG_SAR: 328 tmp = regs->sar; 329 break; 330 331 case SYSCALL_NR: 332 tmp = regs->syscall; 333 break; 334 335 default: 336 return -EIO; 337 } 338 return put_user(tmp, ret); 339 } 340 341 static int ptrace_pokeusr(struct task_struct *child, long regno, long val) 342 { 343 struct pt_regs *regs; 344 regs = task_pt_regs(child); 345 346 switch (regno) { 347 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: 348 regs->areg[regno - REG_AR_BASE] = val; 349 break; 350 351 case REG_A_BASE ... REG_A_BASE + 15: 352 regs->areg[regno - REG_A_BASE] = val; 353 break; 354 355 case REG_PC: 356 regs->pc = val; 357 break; 358 359 case SYSCALL_NR: 360 regs->syscall = val; 361 break; 362 363 default: 364 return -EIO; 365 } 366 return 0; 367 } 368 369 #ifdef CONFIG_HAVE_HW_BREAKPOINT 370 static void ptrace_hbptriggered(struct perf_event *bp, 371 struct perf_sample_data *data, 372 struct pt_regs *regs) 373 { 374 int i; 375 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 376 377 if (bp->attr.bp_type & HW_BREAKPOINT_X) { 378 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) 379 if (current->thread.ptrace_bp[i] == bp) 380 break; 381 i <<= 1; 382 } else { 383 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) 384 if (current->thread.ptrace_wp[i] == bp) 385 break; 386 i = (i << 1) | 1; 387 } 388 389 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address); 390 } 391 392 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) 393 { 394 struct perf_event_attr attr; 395 396 ptrace_breakpoint_init(&attr); 397 398 /* Initialise fields to sane defaults. */ 399 attr.bp_addr = 0; 400 attr.bp_len = 1; 401 attr.bp_type = type; 402 attr.disabled = 1; 403 404 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, 405 tsk); 406 } 407 408 /* 409 * Address bit 0 choose instruction (0) or data (1) break register, bits 410 * 31..1 are the register number. 411 * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words: 412 * address (0) and control (1). 413 * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set. 414 * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is 415 * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a 416 * breakpoint. To set a breakpoint length must be a power of 2 in the range 417 * 1..64 and the address must be length-aligned. 418 */ 419 420 static long ptrace_gethbpregs(struct task_struct *child, long addr, 421 long __user *datap) 422 { 423 struct perf_event *bp; 424 u32 user_data[2] = {0}; 425 bool dbreak = addr & 1; 426 unsigned idx = addr >> 1; 427 428 if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || 429 (dbreak && idx >= XCHAL_NUM_DBREAK)) 430 return -EINVAL; 431 432 if (dbreak) 433 bp = child->thread.ptrace_wp[idx]; 434 else 435 bp = child->thread.ptrace_bp[idx]; 436 437 if (bp) { 438 user_data[0] = bp->attr.bp_addr; 439 user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len; 440 if (dbreak) { 441 if (bp->attr.bp_type & HW_BREAKPOINT_R) 442 user_data[1] |= DBREAKC_LOAD_MASK; 443 if (bp->attr.bp_type & HW_BREAKPOINT_W) 444 user_data[1] |= DBREAKC_STOR_MASK; 445 } 446 } 447 448 if (copy_to_user(datap, user_data, sizeof(user_data))) 449 return -EFAULT; 450 451 return 0; 452 } 453 454 static long ptrace_sethbpregs(struct task_struct *child, long addr, 455 long __user *datap) 456 { 457 struct perf_event *bp; 458 struct perf_event_attr attr; 459 u32 user_data[2]; 460 bool dbreak = addr & 1; 461 unsigned idx = addr >> 1; 462 int bp_type = 0; 463 464 if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || 465 (dbreak && idx >= XCHAL_NUM_DBREAK)) 466 return -EINVAL; 467 468 if (copy_from_user(user_data, datap, sizeof(user_data))) 469 return -EFAULT; 470 471 if (dbreak) { 472 bp = child->thread.ptrace_wp[idx]; 473 if (user_data[1] & DBREAKC_LOAD_MASK) 474 bp_type |= HW_BREAKPOINT_R; 475 if (user_data[1] & DBREAKC_STOR_MASK) 476 bp_type |= HW_BREAKPOINT_W; 477 } else { 478 bp = child->thread.ptrace_bp[idx]; 479 bp_type = HW_BREAKPOINT_X; 480 } 481 482 if (!bp) { 483 bp = ptrace_hbp_create(child, 484 bp_type ? bp_type : HW_BREAKPOINT_RW); 485 if (IS_ERR(bp)) 486 return PTR_ERR(bp); 487 if (dbreak) 488 child->thread.ptrace_wp[idx] = bp; 489 else 490 child->thread.ptrace_bp[idx] = bp; 491 } 492 493 attr = bp->attr; 494 attr.bp_addr = user_data[0]; 495 attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK); 496 attr.bp_type = bp_type; 497 attr.disabled = !attr.bp_len; 498 499 return modify_user_hw_breakpoint(bp, &attr); 500 } 501 #endif 502 503 long arch_ptrace(struct task_struct *child, long request, 504 unsigned long addr, unsigned long data) 505 { 506 int ret = -EPERM; 507 void __user *datap = (void __user *) data; 508 509 switch (request) { 510 case PTRACE_PEEKUSR: /* read register specified by addr. */ 511 ret = ptrace_peekusr(child, addr, datap); 512 break; 513 514 case PTRACE_POKEUSR: /* write register specified by addr. */ 515 ret = ptrace_pokeusr(child, addr, data); 516 break; 517 518 case PTRACE_GETREGS: 519 ret = ptrace_getregs(child, datap); 520 break; 521 522 case PTRACE_SETREGS: 523 ret = ptrace_setregs(child, datap); 524 break; 525 526 case PTRACE_GETXTREGS: 527 ret = ptrace_getxregs(child, datap); 528 break; 529 530 case PTRACE_SETXTREGS: 531 ret = ptrace_setxregs(child, datap); 532 break; 533 #ifdef CONFIG_HAVE_HW_BREAKPOINT 534 case PTRACE_GETHBPREGS: 535 ret = ptrace_gethbpregs(child, addr, datap); 536 break; 537 538 case PTRACE_SETHBPREGS: 539 ret = ptrace_sethbpregs(child, addr, datap); 540 break; 541 #endif 542 default: 543 ret = ptrace_request(child, request, addr, data); 544 break; 545 } 546 547 return ret; 548 } 549 550 void do_syscall_trace_leave(struct pt_regs *regs); 551 int do_syscall_trace_enter(struct pt_regs *regs) 552 { 553 if (regs->syscall == NO_SYSCALL) 554 regs->areg[2] = -ENOSYS; 555 556 if (test_thread_flag(TIF_SYSCALL_TRACE) && 557 tracehook_report_syscall_entry(regs)) { 558 regs->areg[2] = -ENOSYS; 559 regs->syscall = NO_SYSCALL; 560 return 0; 561 } 562 563 if (regs->syscall == NO_SYSCALL || 564 secure_computing() == -1) { 565 do_syscall_trace_leave(regs); 566 return 0; 567 } 568 569 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 570 trace_sys_enter(regs, syscall_get_nr(current, regs)); 571 572 audit_syscall_entry(regs->syscall, regs->areg[6], 573 regs->areg[3], regs->areg[4], 574 regs->areg[5]); 575 return 1; 576 } 577 578 void do_syscall_trace_leave(struct pt_regs *regs) 579 { 580 int step; 581 582 audit_syscall_exit(regs); 583 584 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 585 trace_sys_exit(regs, regs_return_value(regs)); 586 587 step = test_thread_flag(TIF_SINGLESTEP); 588 589 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 590 tracehook_report_syscall_exit(regs, step); 591 } 592