1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/elf.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/mm.h> 23 #include <linux/errno.h> 24 #include <linux/ptrace.h> 25 #include <linux/regset.h> 26 #include <linux/smp.h> 27 #include <linux/security.h> 28 #include <linux/stddef.h> 29 #include <linux/tracehook.h> 30 #include <linux/audit.h> 31 #include <linux/seccomp.h> 32 #include <linux/ftrace.h> 33 34 #include <asm/byteorder.h> 35 #include <asm/cpu.h> 36 #include <asm/cpu-info.h> 37 #include <asm/dsp.h> 38 #include <asm/fpu.h> 39 #include <asm/mipsregs.h> 40 #include <asm/mipsmtregs.h> 41 #include <asm/pgtable.h> 42 #include <asm/page.h> 43 #include <asm/syscall.h> 44 #include <asm/uaccess.h> 45 #include <asm/bootinfo.h> 46 #include <asm/reg.h> 47 48 #define CREATE_TRACE_POINTS 49 #include <trace/events/syscalls.h> 50 51 static void init_fp_ctx(struct task_struct *target) 52 { 53 /* If FP has been used then the target already has context */ 54 if (tsk_used_math(target)) 55 return; 56 57 /* Begin with data registers set to all 1s... */ 58 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); 59 60 /* FCSR has been preset by `mips_set_personality_nan'. */ 61 62 /* 63 * Record that the target has "used" math, such that the context 64 * just initialised, and any modifications made by the caller, 65 * aren't discarded. 66 */ 67 set_stopped_child_used_math(target); 68 } 69 70 /* 71 * Called by kernel/ptrace.c when detaching.. 72 * 73 * Make sure single step bits etc are not set. 74 */ 75 void ptrace_disable(struct task_struct *child) 76 { 77 /* Don't load the watchpoint registers for the ex-child. */ 78 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 79 } 80 81 /* 82 * Poke at FCSR according to its mask. Set the Cause bits even 83 * if a corresponding Enable bit is set. This will be noticed at 84 * the time the thread is switched to and SIGFPE thrown accordingly. 85 */ 86 static void ptrace_setfcr31(struct task_struct *child, u32 value) 87 { 88 u32 fcr31; 89 u32 mask; 90 91 fcr31 = child->thread.fpu.fcr31; 92 mask = boot_cpu_data.fpu_msk31; 93 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 94 } 95 96 /* 97 * Read a general register set. We always use the 64-bit format, even 98 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 99 * Registers are sign extended to fill the available space. 100 */ 101 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) 102 { 103 struct pt_regs *regs; 104 int i; 105 106 if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 107 return -EIO; 108 109 regs = task_pt_regs(child); 110 111 for (i = 0; i < 32; i++) 112 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); 113 __put_user((long)regs->lo, (__s64 __user *)&data->lo); 114 __put_user((long)regs->hi, (__s64 __user *)&data->hi); 115 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 116 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); 117 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); 118 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); 119 120 return 0; 121 } 122 123 /* 124 * Write a general register set. As for PTRACE_GETREGS, we always use 125 * the 64-bit format. On a 32-bit kernel only the lower order half 126 * (according to endianness) will be used. 127 */ 128 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) 129 { 130 struct pt_regs *regs; 131 int i; 132 133 if (!access_ok(VERIFY_READ, data, 38 * 8)) 134 return -EIO; 135 136 regs = task_pt_regs(child); 137 138 for (i = 0; i < 32; i++) 139 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); 140 __get_user(regs->lo, (__s64 __user *)&data->lo); 141 __get_user(regs->hi, (__s64 __user *)&data->hi); 142 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 143 144 /* badvaddr, status, and cause may not be written. */ 145 146 return 0; 147 } 148 149 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 150 { 151 int i; 152 153 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 154 return -EIO; 155 156 if (tsk_used_math(child)) { 157 union fpureg *fregs = get_fpu_regs(child); 158 for (i = 0; i < 32; i++) 159 __put_user(get_fpr64(&fregs[i], 0), 160 i + (__u64 __user *)data); 161 } else { 162 for (i = 0; i < 32; i++) 163 __put_user((__u64) -1, i + (__u64 __user *) data); 164 } 165 166 __put_user(child->thread.fpu.fcr31, data + 64); 167 __put_user(boot_cpu_data.fpu_id, data + 65); 168 169 return 0; 170 } 171 172 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 173 { 174 union fpureg *fregs; 175 u64 fpr_val; 176 u32 value; 177 int i; 178 179 if (!access_ok(VERIFY_READ, data, 33 * 8)) 180 return -EIO; 181 182 init_fp_ctx(child); 183 fregs = get_fpu_regs(child); 184 185 for (i = 0; i < 32; i++) { 186 __get_user(fpr_val, i + (__u64 __user *)data); 187 set_fpr64(&fregs[i], 0, fpr_val); 188 } 189 190 __get_user(value, data + 64); 191 ptrace_setfcr31(child, value); 192 193 /* FIR may not be written. */ 194 195 return 0; 196 } 197 198 int ptrace_get_watch_regs(struct task_struct *child, 199 struct pt_watch_regs __user *addr) 200 { 201 enum pt_watch_style style; 202 int i; 203 204 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 205 return -EIO; 206 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 207 return -EIO; 208 209 #ifdef CONFIG_32BIT 210 style = pt_watch_style_mips32; 211 #define WATCH_STYLE mips32 212 #else 213 style = pt_watch_style_mips64; 214 #define WATCH_STYLE mips64 215 #endif 216 217 __put_user(style, &addr->style); 218 __put_user(boot_cpu_data.watch_reg_use_cnt, 219 &addr->WATCH_STYLE.num_valid); 220 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 221 __put_user(child->thread.watch.mips3264.watchlo[i], 222 &addr->WATCH_STYLE.watchlo[i]); 223 __put_user(child->thread.watch.mips3264.watchhi[i] & 224 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), 225 &addr->WATCH_STYLE.watchhi[i]); 226 __put_user(boot_cpu_data.watch_reg_masks[i], 227 &addr->WATCH_STYLE.watch_masks[i]); 228 } 229 for (; i < 8; i++) { 230 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 231 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 232 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 233 } 234 235 return 0; 236 } 237 238 int ptrace_set_watch_regs(struct task_struct *child, 239 struct pt_watch_regs __user *addr) 240 { 241 int i; 242 int watch_active = 0; 243 unsigned long lt[NUM_WATCH_REGS]; 244 u16 ht[NUM_WATCH_REGS]; 245 246 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 247 return -EIO; 248 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 249 return -EIO; 250 /* Check the values. */ 251 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 252 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 253 #ifdef CONFIG_32BIT 254 if (lt[i] & __UA_LIMIT) 255 return -EINVAL; 256 #else 257 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 258 if (lt[i] & 0xffffffff80000000UL) 259 return -EINVAL; 260 } else { 261 if (lt[i] & __UA_LIMIT) 262 return -EINVAL; 263 } 264 #endif 265 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 266 if (ht[i] & ~MIPS_WATCHHI_MASK) 267 return -EINVAL; 268 } 269 /* Install them. */ 270 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 271 if (lt[i] & MIPS_WATCHLO_IRW) 272 watch_active = 1; 273 child->thread.watch.mips3264.watchlo[i] = lt[i]; 274 /* Set the G bit. */ 275 child->thread.watch.mips3264.watchhi[i] = ht[i]; 276 } 277 278 if (watch_active) 279 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 280 else 281 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 282 283 return 0; 284 } 285 286 /* regset get/set implementations */ 287 288 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 289 290 static int gpr32_get(struct task_struct *target, 291 const struct user_regset *regset, 292 unsigned int pos, unsigned int count, 293 void *kbuf, void __user *ubuf) 294 { 295 struct pt_regs *regs = task_pt_regs(target); 296 u32 uregs[ELF_NGREG] = {}; 297 unsigned i; 298 299 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { 300 /* k0/k1 are copied as zero. */ 301 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27) 302 continue; 303 304 uregs[i] = regs->regs[i - MIPS32_EF_R0]; 305 } 306 307 uregs[MIPS32_EF_LO] = regs->lo; 308 uregs[MIPS32_EF_HI] = regs->hi; 309 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc; 310 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr; 311 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status; 312 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause; 313 314 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 315 sizeof(uregs)); 316 } 317 318 static int gpr32_set(struct task_struct *target, 319 const struct user_regset *regset, 320 unsigned int pos, unsigned int count, 321 const void *kbuf, const void __user *ubuf) 322 { 323 struct pt_regs *regs = task_pt_regs(target); 324 u32 uregs[ELF_NGREG]; 325 unsigned start, num_regs, i; 326 int err; 327 328 start = pos / sizeof(u32); 329 num_regs = count / sizeof(u32); 330 331 if (start + num_regs > ELF_NGREG) 332 return -EIO; 333 334 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 335 sizeof(uregs)); 336 if (err) 337 return err; 338 339 for (i = start; i < num_regs; i++) { 340 /* 341 * Cast all values to signed here so that if this is a 64-bit 342 * kernel, the supplied 32-bit values will be sign extended. 343 */ 344 switch (i) { 345 case MIPS32_EF_R1 ... MIPS32_EF_R25: 346 /* k0/k1 are ignored. */ 347 case MIPS32_EF_R28 ... MIPS32_EF_R31: 348 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; 349 break; 350 case MIPS32_EF_LO: 351 regs->lo = (s32)uregs[i]; 352 break; 353 case MIPS32_EF_HI: 354 regs->hi = (s32)uregs[i]; 355 break; 356 case MIPS32_EF_CP0_EPC: 357 regs->cp0_epc = (s32)uregs[i]; 358 break; 359 } 360 } 361 362 return 0; 363 } 364 365 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 366 367 #ifdef CONFIG_64BIT 368 369 static int gpr64_get(struct task_struct *target, 370 const struct user_regset *regset, 371 unsigned int pos, unsigned int count, 372 void *kbuf, void __user *ubuf) 373 { 374 struct pt_regs *regs = task_pt_regs(target); 375 u64 uregs[ELF_NGREG] = {}; 376 unsigned i; 377 378 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { 379 /* k0/k1 are copied as zero. */ 380 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27) 381 continue; 382 383 uregs[i] = regs->regs[i - MIPS64_EF_R0]; 384 } 385 386 uregs[MIPS64_EF_LO] = regs->lo; 387 uregs[MIPS64_EF_HI] = regs->hi; 388 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc; 389 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr; 390 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status; 391 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause; 392 393 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 394 sizeof(uregs)); 395 } 396 397 static int gpr64_set(struct task_struct *target, 398 const struct user_regset *regset, 399 unsigned int pos, unsigned int count, 400 const void *kbuf, const void __user *ubuf) 401 { 402 struct pt_regs *regs = task_pt_regs(target); 403 u64 uregs[ELF_NGREG]; 404 unsigned start, num_regs, i; 405 int err; 406 407 start = pos / sizeof(u64); 408 num_regs = count / sizeof(u64); 409 410 if (start + num_regs > ELF_NGREG) 411 return -EIO; 412 413 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 414 sizeof(uregs)); 415 if (err) 416 return err; 417 418 for (i = start; i < num_regs; i++) { 419 switch (i) { 420 case MIPS64_EF_R1 ... MIPS64_EF_R25: 421 /* k0/k1 are ignored. */ 422 case MIPS64_EF_R28 ... MIPS64_EF_R31: 423 regs->regs[i - MIPS64_EF_R0] = uregs[i]; 424 break; 425 case MIPS64_EF_LO: 426 regs->lo = uregs[i]; 427 break; 428 case MIPS64_EF_HI: 429 regs->hi = uregs[i]; 430 break; 431 case MIPS64_EF_CP0_EPC: 432 regs->cp0_epc = uregs[i]; 433 break; 434 } 435 } 436 437 return 0; 438 } 439 440 #endif /* CONFIG_64BIT */ 441 442 static int fpr_get(struct task_struct *target, 443 const struct user_regset *regset, 444 unsigned int pos, unsigned int count, 445 void *kbuf, void __user *ubuf) 446 { 447 unsigned i; 448 int err; 449 u64 fpr_val; 450 451 /* XXX fcr31 */ 452 453 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 454 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 455 &target->thread.fpu, 456 0, sizeof(elf_fpregset_t)); 457 458 for (i = 0; i < NUM_FPU_REGS; i++) { 459 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 460 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 461 &fpr_val, i * sizeof(elf_fpreg_t), 462 (i + 1) * sizeof(elf_fpreg_t)); 463 if (err) 464 return err; 465 } 466 467 return 0; 468 } 469 470 static int fpr_set(struct task_struct *target, 471 const struct user_regset *regset, 472 unsigned int pos, unsigned int count, 473 const void *kbuf, const void __user *ubuf) 474 { 475 unsigned i; 476 int err; 477 u64 fpr_val; 478 479 /* XXX fcr31 */ 480 481 init_fp_ctx(target); 482 483 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 484 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 485 &target->thread.fpu, 486 0, sizeof(elf_fpregset_t)); 487 488 for (i = 0; i < NUM_FPU_REGS; i++) { 489 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 490 &fpr_val, i * sizeof(elf_fpreg_t), 491 (i + 1) * sizeof(elf_fpreg_t)); 492 if (err) 493 return err; 494 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 495 } 496 497 return 0; 498 } 499 500 enum mips_regset { 501 REGSET_GPR, 502 REGSET_FPR, 503 }; 504 505 struct pt_regs_offset { 506 const char *name; 507 int offset; 508 }; 509 510 #define REG_OFFSET_NAME(reg, r) { \ 511 .name = #reg, \ 512 .offset = offsetof(struct pt_regs, r) \ 513 } 514 515 #define REG_OFFSET_END { \ 516 .name = NULL, \ 517 .offset = 0 \ 518 } 519 520 static const struct pt_regs_offset regoffset_table[] = { 521 REG_OFFSET_NAME(r0, regs[0]), 522 REG_OFFSET_NAME(r1, regs[1]), 523 REG_OFFSET_NAME(r2, regs[2]), 524 REG_OFFSET_NAME(r3, regs[3]), 525 REG_OFFSET_NAME(r4, regs[4]), 526 REG_OFFSET_NAME(r5, regs[5]), 527 REG_OFFSET_NAME(r6, regs[6]), 528 REG_OFFSET_NAME(r7, regs[7]), 529 REG_OFFSET_NAME(r8, regs[8]), 530 REG_OFFSET_NAME(r9, regs[9]), 531 REG_OFFSET_NAME(r10, regs[10]), 532 REG_OFFSET_NAME(r11, regs[11]), 533 REG_OFFSET_NAME(r12, regs[12]), 534 REG_OFFSET_NAME(r13, regs[13]), 535 REG_OFFSET_NAME(r14, regs[14]), 536 REG_OFFSET_NAME(r15, regs[15]), 537 REG_OFFSET_NAME(r16, regs[16]), 538 REG_OFFSET_NAME(r17, regs[17]), 539 REG_OFFSET_NAME(r18, regs[18]), 540 REG_OFFSET_NAME(r19, regs[19]), 541 REG_OFFSET_NAME(r20, regs[20]), 542 REG_OFFSET_NAME(r21, regs[21]), 543 REG_OFFSET_NAME(r22, regs[22]), 544 REG_OFFSET_NAME(r23, regs[23]), 545 REG_OFFSET_NAME(r24, regs[24]), 546 REG_OFFSET_NAME(r25, regs[25]), 547 REG_OFFSET_NAME(r26, regs[26]), 548 REG_OFFSET_NAME(r27, regs[27]), 549 REG_OFFSET_NAME(r28, regs[28]), 550 REG_OFFSET_NAME(r29, regs[29]), 551 REG_OFFSET_NAME(r30, regs[30]), 552 REG_OFFSET_NAME(r31, regs[31]), 553 REG_OFFSET_NAME(c0_status, cp0_status), 554 REG_OFFSET_NAME(hi, hi), 555 REG_OFFSET_NAME(lo, lo), 556 #ifdef CONFIG_CPU_HAS_SMARTMIPS 557 REG_OFFSET_NAME(acx, acx), 558 #endif 559 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), 560 REG_OFFSET_NAME(c0_cause, cp0_cause), 561 REG_OFFSET_NAME(c0_epc, cp0_epc), 562 #ifdef CONFIG_CPU_CAVIUM_OCTEON 563 REG_OFFSET_NAME(mpl0, mpl[0]), 564 REG_OFFSET_NAME(mpl1, mpl[1]), 565 REG_OFFSET_NAME(mpl2, mpl[2]), 566 REG_OFFSET_NAME(mtp0, mtp[0]), 567 REG_OFFSET_NAME(mtp1, mtp[1]), 568 REG_OFFSET_NAME(mtp2, mtp[2]), 569 #endif 570 REG_OFFSET_END, 571 }; 572 573 /** 574 * regs_query_register_offset() - query register offset from its name 575 * @name: the name of a register 576 * 577 * regs_query_register_offset() returns the offset of a register in struct 578 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 579 */ 580 int regs_query_register_offset(const char *name) 581 { 582 const struct pt_regs_offset *roff; 583 for (roff = regoffset_table; roff->name != NULL; roff++) 584 if (!strcmp(roff->name, name)) 585 return roff->offset; 586 return -EINVAL; 587 } 588 589 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 590 591 static const struct user_regset mips_regsets[] = { 592 [REGSET_GPR] = { 593 .core_note_type = NT_PRSTATUS, 594 .n = ELF_NGREG, 595 .size = sizeof(unsigned int), 596 .align = sizeof(unsigned int), 597 .get = gpr32_get, 598 .set = gpr32_set, 599 }, 600 [REGSET_FPR] = { 601 .core_note_type = NT_PRFPREG, 602 .n = ELF_NFPREG, 603 .size = sizeof(elf_fpreg_t), 604 .align = sizeof(elf_fpreg_t), 605 .get = fpr_get, 606 .set = fpr_set, 607 }, 608 }; 609 610 static const struct user_regset_view user_mips_view = { 611 .name = "mips", 612 .e_machine = ELF_ARCH, 613 .ei_osabi = ELF_OSABI, 614 .regsets = mips_regsets, 615 .n = ARRAY_SIZE(mips_regsets), 616 }; 617 618 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 619 620 #ifdef CONFIG_64BIT 621 622 static const struct user_regset mips64_regsets[] = { 623 [REGSET_GPR] = { 624 .core_note_type = NT_PRSTATUS, 625 .n = ELF_NGREG, 626 .size = sizeof(unsigned long), 627 .align = sizeof(unsigned long), 628 .get = gpr64_get, 629 .set = gpr64_set, 630 }, 631 [REGSET_FPR] = { 632 .core_note_type = NT_PRFPREG, 633 .n = ELF_NFPREG, 634 .size = sizeof(elf_fpreg_t), 635 .align = sizeof(elf_fpreg_t), 636 .get = fpr_get, 637 .set = fpr_set, 638 }, 639 }; 640 641 static const struct user_regset_view user_mips64_view = { 642 .name = "mips64", 643 .e_machine = ELF_ARCH, 644 .ei_osabi = ELF_OSABI, 645 .regsets = mips64_regsets, 646 .n = ARRAY_SIZE(mips64_regsets), 647 }; 648 649 #endif /* CONFIG_64BIT */ 650 651 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 652 { 653 #ifdef CONFIG_32BIT 654 return &user_mips_view; 655 #else 656 #ifdef CONFIG_MIPS32_O32 657 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) 658 return &user_mips_view; 659 #endif 660 return &user_mips64_view; 661 #endif 662 } 663 664 long arch_ptrace(struct task_struct *child, long request, 665 unsigned long addr, unsigned long data) 666 { 667 int ret; 668 void __user *addrp = (void __user *) addr; 669 void __user *datavp = (void __user *) data; 670 unsigned long __user *datalp = (void __user *) data; 671 672 switch (request) { 673 /* when I and D space are separate, these will need to be fixed. */ 674 case PTRACE_PEEKTEXT: /* read word at location addr. */ 675 case PTRACE_PEEKDATA: 676 ret = generic_ptrace_peekdata(child, addr, data); 677 break; 678 679 /* Read the word at location addr in the USER area. */ 680 case PTRACE_PEEKUSR: { 681 struct pt_regs *regs; 682 union fpureg *fregs; 683 unsigned long tmp = 0; 684 685 regs = task_pt_regs(child); 686 ret = 0; /* Default return value. */ 687 688 switch (addr) { 689 case 0 ... 31: 690 tmp = regs->regs[addr]; 691 break; 692 case FPR_BASE ... FPR_BASE + 31: 693 if (!tsk_used_math(child)) { 694 /* FP not yet used */ 695 tmp = -1; 696 break; 697 } 698 fregs = get_fpu_regs(child); 699 700 #ifdef CONFIG_32BIT 701 if (test_thread_flag(TIF_32BIT_FPREGS)) { 702 /* 703 * The odd registers are actually the high 704 * order bits of the values stored in the even 705 * registers - unless we're using r2k_switch.S. 706 */ 707 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], 708 addr & 1); 709 break; 710 } 711 #endif 712 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 713 break; 714 case PC: 715 tmp = regs->cp0_epc; 716 break; 717 case CAUSE: 718 tmp = regs->cp0_cause; 719 break; 720 case BADVADDR: 721 tmp = regs->cp0_badvaddr; 722 break; 723 case MMHI: 724 tmp = regs->hi; 725 break; 726 case MMLO: 727 tmp = regs->lo; 728 break; 729 #ifdef CONFIG_CPU_HAS_SMARTMIPS 730 case ACX: 731 tmp = regs->acx; 732 break; 733 #endif 734 case FPC_CSR: 735 tmp = child->thread.fpu.fcr31; 736 break; 737 case FPC_EIR: 738 /* implementation / version register */ 739 tmp = boot_cpu_data.fpu_id; 740 break; 741 case DSP_BASE ... DSP_BASE + 5: { 742 dspreg_t *dregs; 743 744 if (!cpu_has_dsp) { 745 tmp = 0; 746 ret = -EIO; 747 goto out; 748 } 749 dregs = __get_dsp_regs(child); 750 tmp = (unsigned long) (dregs[addr - DSP_BASE]); 751 break; 752 } 753 case DSP_CONTROL: 754 if (!cpu_has_dsp) { 755 tmp = 0; 756 ret = -EIO; 757 goto out; 758 } 759 tmp = child->thread.dsp.dspcontrol; 760 break; 761 default: 762 tmp = 0; 763 ret = -EIO; 764 goto out; 765 } 766 ret = put_user(tmp, datalp); 767 break; 768 } 769 770 /* when I and D space are separate, this will have to be fixed. */ 771 case PTRACE_POKETEXT: /* write the word at location addr. */ 772 case PTRACE_POKEDATA: 773 ret = generic_ptrace_pokedata(child, addr, data); 774 break; 775 776 case PTRACE_POKEUSR: { 777 struct pt_regs *regs; 778 ret = 0; 779 regs = task_pt_regs(child); 780 781 switch (addr) { 782 case 0 ... 31: 783 regs->regs[addr] = data; 784 break; 785 case FPR_BASE ... FPR_BASE + 31: { 786 union fpureg *fregs = get_fpu_regs(child); 787 788 init_fp_ctx(child); 789 #ifdef CONFIG_32BIT 790 if (test_thread_flag(TIF_32BIT_FPREGS)) { 791 /* 792 * The odd registers are actually the high 793 * order bits of the values stored in the even 794 * registers - unless we're using r2k_switch.S. 795 */ 796 set_fpr32(&fregs[(addr & ~1) - FPR_BASE], 797 addr & 1, data); 798 break; 799 } 800 #endif 801 set_fpr64(&fregs[addr - FPR_BASE], 0, data); 802 break; 803 } 804 case PC: 805 regs->cp0_epc = data; 806 break; 807 case MMHI: 808 regs->hi = data; 809 break; 810 case MMLO: 811 regs->lo = data; 812 break; 813 #ifdef CONFIG_CPU_HAS_SMARTMIPS 814 case ACX: 815 regs->acx = data; 816 break; 817 #endif 818 case FPC_CSR: 819 init_fp_ctx(child); 820 ptrace_setfcr31(child, data); 821 break; 822 case DSP_BASE ... DSP_BASE + 5: { 823 dspreg_t *dregs; 824 825 if (!cpu_has_dsp) { 826 ret = -EIO; 827 break; 828 } 829 830 dregs = __get_dsp_regs(child); 831 dregs[addr - DSP_BASE] = data; 832 break; 833 } 834 case DSP_CONTROL: 835 if (!cpu_has_dsp) { 836 ret = -EIO; 837 break; 838 } 839 child->thread.dsp.dspcontrol = data; 840 break; 841 default: 842 /* The rest are not allowed. */ 843 ret = -EIO; 844 break; 845 } 846 break; 847 } 848 849 case PTRACE_GETREGS: 850 ret = ptrace_getregs(child, datavp); 851 break; 852 853 case PTRACE_SETREGS: 854 ret = ptrace_setregs(child, datavp); 855 break; 856 857 case PTRACE_GETFPREGS: 858 ret = ptrace_getfpregs(child, datavp); 859 break; 860 861 case PTRACE_SETFPREGS: 862 ret = ptrace_setfpregs(child, datavp); 863 break; 864 865 case PTRACE_GET_THREAD_AREA: 866 ret = put_user(task_thread_info(child)->tp_value, datalp); 867 break; 868 869 case PTRACE_GET_WATCH_REGS: 870 ret = ptrace_get_watch_regs(child, addrp); 871 break; 872 873 case PTRACE_SET_WATCH_REGS: 874 ret = ptrace_set_watch_regs(child, addrp); 875 break; 876 877 default: 878 ret = ptrace_request(child, request, addr, data); 879 break; 880 } 881 out: 882 return ret; 883 } 884 885 /* 886 * Notification of system call entry/exit 887 * - triggered by current->work.syscall_trace 888 */ 889 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) 890 { 891 user_exit(); 892 893 current_thread_info()->syscall = syscall; 894 895 if (test_thread_flag(TIF_SYSCALL_TRACE) && 896 tracehook_report_syscall_entry(regs)) 897 return -1; 898 899 if (secure_computing(NULL) == -1) 900 return -1; 901 902 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 903 trace_sys_enter(regs, regs->regs[2]); 904 905 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], 906 regs->regs[6], regs->regs[7]); 907 return syscall; 908 } 909 910 /* 911 * Notification of system call entry/exit 912 * - triggered by current->work.syscall_trace 913 */ 914 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 915 { 916 /* 917 * We may come here right after calling schedule_user() 918 * or do_notify_resume(), in which case we can be in RCU 919 * user mode. 920 */ 921 user_exit(); 922 923 audit_syscall_exit(regs); 924 925 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 926 trace_sys_exit(regs, regs->regs[2]); 927 928 if (test_thread_flag(TIF_SYSCALL_TRACE)) 929 tracehook_report_syscall_exit(regs, 0); 930 931 user_enter(); 932 } 933