1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/elf.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/mm.h> 24 #include <linux/errno.h> 25 #include <linux/ptrace.h> 26 #include <linux/regset.h> 27 #include <linux/smp.h> 28 #include <linux/security.h> 29 #include <linux/stddef.h> 30 #include <linux/audit.h> 31 #include <linux/seccomp.h> 32 #include <linux/ftrace.h> 33 34 #include <asm/byteorder.h> 35 #include <asm/cpu.h> 36 #include <asm/cpu-info.h> 37 #include <asm/dsp.h> 38 #include <asm/fpu.h> 39 #include <asm/mipsregs.h> 40 #include <asm/mipsmtregs.h> 41 #include <asm/page.h> 42 #include <asm/processor.h> 43 #include <asm/syscall.h> 44 #include <linux/uaccess.h> 45 #include <asm/bootinfo.h> 46 #include <asm/reg.h> 47 48 #define CREATE_TRACE_POINTS 49 #include <trace/events/syscalls.h> 50 51 /* 52 * Called by kernel/ptrace.c when detaching.. 53 * 54 * Make sure single step bits etc are not set. 55 */ 56 void ptrace_disable(struct task_struct *child) 57 { 58 /* Don't load the watchpoint registers for the ex-child. */ 59 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 60 } 61 62 /* 63 * Read a general register set. We always use the 64-bit format, even 64 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 65 * Registers are sign extended to fill the available space. 66 */ 67 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) 68 { 69 struct pt_regs *regs; 70 int i; 71 72 if (!access_ok(data, 38 * 8)) 73 return -EIO; 74 75 regs = task_pt_regs(child); 76 77 for (i = 0; i < 32; i++) 78 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); 79 __put_user((long)regs->lo, (__s64 __user *)&data->lo); 80 __put_user((long)regs->hi, (__s64 __user *)&data->hi); 81 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 82 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); 83 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); 84 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); 85 86 return 0; 87 } 88 89 /* 90 * Write a general register set. As for PTRACE_GETREGS, we always use 91 * the 64-bit format. On a 32-bit kernel only the lower order half 92 * (according to endianness) will be used. 93 */ 94 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) 95 { 96 struct pt_regs *regs; 97 int i; 98 99 if (!access_ok(data, 38 * 8)) 100 return -EIO; 101 102 regs = task_pt_regs(child); 103 104 for (i = 0; i < 32; i++) 105 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); 106 __get_user(regs->lo, (__s64 __user *)&data->lo); 107 __get_user(regs->hi, (__s64 __user *)&data->hi); 108 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 109 110 /* badvaddr, status, and cause may not be written. */ 111 112 /* System call number may have been changed */ 113 mips_syscall_update_nr(child, regs); 114 115 return 0; 116 } 117 118 int ptrace_get_watch_regs(struct task_struct *child, 119 struct pt_watch_regs __user *addr) 120 { 121 enum pt_watch_style style; 122 int i; 123 124 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 125 return -EIO; 126 if (!access_ok(addr, sizeof(struct pt_watch_regs))) 127 return -EIO; 128 129 #ifdef CONFIG_32BIT 130 style = pt_watch_style_mips32; 131 #define WATCH_STYLE mips32 132 #else 133 style = pt_watch_style_mips64; 134 #define WATCH_STYLE mips64 135 #endif 136 137 __put_user(style, &addr->style); 138 __put_user(boot_cpu_data.watch_reg_use_cnt, 139 &addr->WATCH_STYLE.num_valid); 140 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 141 __put_user(child->thread.watch.mips3264.watchlo[i], 142 &addr->WATCH_STYLE.watchlo[i]); 143 __put_user(child->thread.watch.mips3264.watchhi[i] & 144 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), 145 &addr->WATCH_STYLE.watchhi[i]); 146 __put_user(boot_cpu_data.watch_reg_masks[i], 147 &addr->WATCH_STYLE.watch_masks[i]); 148 } 149 for (; i < 8; i++) { 150 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 151 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 152 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 153 } 154 155 return 0; 156 } 157 158 int ptrace_set_watch_regs(struct task_struct *child, 159 struct pt_watch_regs __user *addr) 160 { 161 int i; 162 int watch_active = 0; 163 unsigned long lt[NUM_WATCH_REGS]; 164 u16 ht[NUM_WATCH_REGS]; 165 166 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 167 return -EIO; 168 if (!access_ok(addr, sizeof(struct pt_watch_regs))) 169 return -EIO; 170 /* Check the values. */ 171 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 172 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 173 #ifdef CONFIG_32BIT 174 if (lt[i] & __UA_LIMIT) 175 return -EINVAL; 176 #else 177 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 178 if (lt[i] & 0xffffffff80000000UL) 179 return -EINVAL; 180 } else { 181 if (lt[i] & __UA_LIMIT) 182 return -EINVAL; 183 } 184 #endif 185 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 186 if (ht[i] & ~MIPS_WATCHHI_MASK) 187 return -EINVAL; 188 } 189 /* Install them. */ 190 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 191 if (lt[i] & MIPS_WATCHLO_IRW) 192 watch_active = 1; 193 child->thread.watch.mips3264.watchlo[i] = lt[i]; 194 /* Set the G bit. */ 195 child->thread.watch.mips3264.watchhi[i] = ht[i]; 196 } 197 198 if (watch_active) 199 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 200 else 201 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 202 203 return 0; 204 } 205 206 /* regset get/set implementations */ 207 208 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 209 210 static int gpr32_get(struct task_struct *target, 211 const struct user_regset *regset, 212 struct membuf to) 213 { 214 struct pt_regs *regs = task_pt_regs(target); 215 u32 uregs[ELF_NGREG] = {}; 216 217 mips_dump_regs32(uregs, regs); 218 return membuf_write(&to, uregs, sizeof(uregs)); 219 } 220 221 static int gpr32_set(struct task_struct *target, 222 const struct user_regset *regset, 223 unsigned int pos, unsigned int count, 224 const void *kbuf, const void __user *ubuf) 225 { 226 struct pt_regs *regs = task_pt_regs(target); 227 u32 uregs[ELF_NGREG]; 228 unsigned start, num_regs, i; 229 int err; 230 231 start = pos / sizeof(u32); 232 num_regs = count / sizeof(u32); 233 234 if (start + num_regs > ELF_NGREG) 235 return -EIO; 236 237 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 238 sizeof(uregs)); 239 if (err) 240 return err; 241 242 for (i = start; i < num_regs; i++) { 243 /* 244 * Cast all values to signed here so that if this is a 64-bit 245 * kernel, the supplied 32-bit values will be sign extended. 246 */ 247 switch (i) { 248 case MIPS32_EF_R1 ... MIPS32_EF_R25: 249 /* k0/k1 are ignored. */ 250 case MIPS32_EF_R28 ... MIPS32_EF_R31: 251 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; 252 break; 253 case MIPS32_EF_LO: 254 regs->lo = (s32)uregs[i]; 255 break; 256 case MIPS32_EF_HI: 257 regs->hi = (s32)uregs[i]; 258 break; 259 case MIPS32_EF_CP0_EPC: 260 regs->cp0_epc = (s32)uregs[i]; 261 break; 262 } 263 } 264 265 /* System call number may have been changed */ 266 mips_syscall_update_nr(target, regs); 267 268 return 0; 269 } 270 271 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 272 273 #ifdef CONFIG_64BIT 274 275 static int gpr64_get(struct task_struct *target, 276 const struct user_regset *regset, 277 struct membuf to) 278 { 279 struct pt_regs *regs = task_pt_regs(target); 280 u64 uregs[ELF_NGREG] = {}; 281 282 mips_dump_regs64(uregs, regs); 283 return membuf_write(&to, uregs, sizeof(uregs)); 284 } 285 286 static int gpr64_set(struct task_struct *target, 287 const struct user_regset *regset, 288 unsigned int pos, unsigned int count, 289 const void *kbuf, const void __user *ubuf) 290 { 291 struct pt_regs *regs = task_pt_regs(target); 292 u64 uregs[ELF_NGREG]; 293 unsigned start, num_regs, i; 294 int err; 295 296 start = pos / sizeof(u64); 297 num_regs = count / sizeof(u64); 298 299 if (start + num_regs > ELF_NGREG) 300 return -EIO; 301 302 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 303 sizeof(uregs)); 304 if (err) 305 return err; 306 307 for (i = start; i < num_regs; i++) { 308 switch (i) { 309 case MIPS64_EF_R1 ... MIPS64_EF_R25: 310 /* k0/k1 are ignored. */ 311 case MIPS64_EF_R28 ... MIPS64_EF_R31: 312 regs->regs[i - MIPS64_EF_R0] = uregs[i]; 313 break; 314 case MIPS64_EF_LO: 315 regs->lo = uregs[i]; 316 break; 317 case MIPS64_EF_HI: 318 regs->hi = uregs[i]; 319 break; 320 case MIPS64_EF_CP0_EPC: 321 regs->cp0_epc = uregs[i]; 322 break; 323 } 324 } 325 326 /* System call number may have been changed */ 327 mips_syscall_update_nr(target, regs); 328 329 return 0; 330 } 331 332 #endif /* CONFIG_64BIT */ 333 334 335 #ifdef CONFIG_MIPS_FP_SUPPORT 336 337 /* 338 * Poke at FCSR according to its mask. Set the Cause bits even 339 * if a corresponding Enable bit is set. This will be noticed at 340 * the time the thread is switched to and SIGFPE thrown accordingly. 341 */ 342 static void ptrace_setfcr31(struct task_struct *child, u32 value) 343 { 344 u32 fcr31; 345 u32 mask; 346 347 fcr31 = child->thread.fpu.fcr31; 348 mask = boot_cpu_data.fpu_msk31; 349 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 350 } 351 352 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 353 { 354 int i; 355 356 if (!access_ok(data, 33 * 8)) 357 return -EIO; 358 359 if (tsk_used_math(child)) { 360 union fpureg *fregs = get_fpu_regs(child); 361 for (i = 0; i < 32; i++) 362 __put_user(get_fpr64(&fregs[i], 0), 363 i + (__u64 __user *)data); 364 } else { 365 for (i = 0; i < 32; i++) 366 __put_user((__u64) -1, i + (__u64 __user *) data); 367 } 368 369 __put_user(child->thread.fpu.fcr31, data + 64); 370 __put_user(boot_cpu_data.fpu_id, data + 65); 371 372 return 0; 373 } 374 375 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 376 { 377 union fpureg *fregs; 378 u64 fpr_val; 379 u32 value; 380 int i; 381 382 if (!access_ok(data, 33 * 8)) 383 return -EIO; 384 385 init_fp_ctx(child); 386 fregs = get_fpu_regs(child); 387 388 for (i = 0; i < 32; i++) { 389 __get_user(fpr_val, i + (__u64 __user *)data); 390 set_fpr64(&fregs[i], 0, fpr_val); 391 } 392 393 __get_user(value, data + 64); 394 ptrace_setfcr31(child, value); 395 396 /* FIR may not be written. */ 397 398 return 0; 399 } 400 401 /* 402 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 403 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots 404 * correspond 1:1 to buffer slots. Only general registers are copied. 405 */ 406 static void fpr_get_fpa(struct task_struct *target, 407 struct membuf *to) 408 { 409 membuf_write(to, &target->thread.fpu, 410 NUM_FPU_REGS * sizeof(elf_fpreg_t)); 411 } 412 413 /* 414 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 415 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's 416 * general register slots are copied to buffer slots. Only general 417 * registers are copied. 418 */ 419 static void fpr_get_msa(struct task_struct *target, struct membuf *to) 420 { 421 unsigned int i; 422 423 BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t)); 424 for (i = 0; i < NUM_FPU_REGS; i++) 425 membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0)); 426 } 427 428 /* 429 * Copy the floating-point context to the supplied NT_PRFPREG buffer. 430 * Choose the appropriate helper for general registers, and then copy 431 * the FCSR and FIR registers separately. 432 */ 433 static int fpr_get(struct task_struct *target, 434 const struct user_regset *regset, 435 struct membuf to) 436 { 437 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 438 fpr_get_fpa(target, &to); 439 else 440 fpr_get_msa(target, &to); 441 442 membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32)); 443 membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32)); 444 return 0; 445 } 446 447 /* 448 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 449 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP 450 * context's general register slots. Only general registers are copied. 451 */ 452 static int fpr_set_fpa(struct task_struct *target, 453 unsigned int *pos, unsigned int *count, 454 const void **kbuf, const void __user **ubuf) 455 { 456 return user_regset_copyin(pos, count, kbuf, ubuf, 457 &target->thread.fpu, 458 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); 459 } 460 461 /* 462 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 463 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 464 * bits only of FP context's general register slots. Only general 465 * registers are copied. 466 */ 467 static int fpr_set_msa(struct task_struct *target, 468 unsigned int *pos, unsigned int *count, 469 const void **kbuf, const void __user **ubuf) 470 { 471 unsigned int i; 472 u64 fpr_val; 473 int err; 474 475 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 476 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { 477 err = user_regset_copyin(pos, count, kbuf, ubuf, 478 &fpr_val, i * sizeof(elf_fpreg_t), 479 (i + 1) * sizeof(elf_fpreg_t)); 480 if (err) 481 return err; 482 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 483 } 484 485 return 0; 486 } 487 488 /* 489 * Copy the supplied NT_PRFPREG buffer to the floating-point context. 490 * Choose the appropriate helper for general registers, and then copy 491 * the FCSR register separately. Ignore the incoming FIR register 492 * contents though, as the register is read-only. 493 * 494 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', 495 * which is supposed to have been guaranteed by the kernel before 496 * calling us, e.g. in `ptrace_regset'. We enforce that requirement, 497 * so that we can safely avoid preinitializing temporaries for 498 * partial register writes. 499 */ 500 static int fpr_set(struct task_struct *target, 501 const struct user_regset *regset, 502 unsigned int pos, unsigned int count, 503 const void *kbuf, const void __user *ubuf) 504 { 505 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 506 const int fir_pos = fcr31_pos + sizeof(u32); 507 u32 fcr31; 508 int err; 509 510 BUG_ON(count % sizeof(elf_fpreg_t)); 511 512 if (pos + count > sizeof(elf_fpregset_t)) 513 return -EIO; 514 515 init_fp_ctx(target); 516 517 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 518 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); 519 else 520 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); 521 if (err) 522 return err; 523 524 if (count > 0) { 525 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 526 &fcr31, 527 fcr31_pos, fcr31_pos + sizeof(u32)); 528 if (err) 529 return err; 530 531 ptrace_setfcr31(target, fcr31); 532 } 533 534 if (count > 0) { 535 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 536 fir_pos, fir_pos + sizeof(u32)); 537 return 0; 538 } 539 540 return err; 541 } 542 543 /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */ 544 static int fp_mode_get(struct task_struct *target, 545 const struct user_regset *regset, 546 struct membuf to) 547 { 548 return membuf_store(&to, (int)mips_get_process_fp_mode(target)); 549 } 550 551 /* 552 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. 553 * 554 * We optimize for the case where `count % sizeof(int) == 0', which 555 * is supposed to have been guaranteed by the kernel before calling 556 * us, e.g. in `ptrace_regset'. We enforce that requirement, so 557 * that we can safely avoid preinitializing temporaries for partial 558 * mode writes. 559 */ 560 static int fp_mode_set(struct task_struct *target, 561 const struct user_regset *regset, 562 unsigned int pos, unsigned int count, 563 const void *kbuf, const void __user *ubuf) 564 { 565 int fp_mode; 566 int err; 567 568 BUG_ON(count % sizeof(int)); 569 570 if (pos + count > sizeof(fp_mode)) 571 return -EIO; 572 573 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, 574 sizeof(fp_mode)); 575 if (err) 576 return err; 577 578 if (count > 0) 579 err = mips_set_process_fp_mode(target, fp_mode); 580 581 return err; 582 } 583 584 #endif /* CONFIG_MIPS_FP_SUPPORT */ 585 586 #ifdef CONFIG_CPU_HAS_MSA 587 588 struct msa_control_regs { 589 unsigned int fir; 590 unsigned int fcsr; 591 unsigned int msair; 592 unsigned int msacsr; 593 }; 594 595 static void copy_pad_fprs(struct task_struct *target, 596 const struct user_regset *regset, 597 struct membuf *to, 598 unsigned int live_sz) 599 { 600 int i, j; 601 unsigned long long fill = ~0ull; 602 unsigned int cp_sz, pad_sz; 603 604 cp_sz = min(regset->size, live_sz); 605 pad_sz = regset->size - cp_sz; 606 WARN_ON(pad_sz % sizeof(fill)); 607 608 for (i = 0; i < NUM_FPU_REGS; i++) { 609 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz); 610 for (j = 0; j < (pad_sz / sizeof(fill)); j++) 611 membuf_store(to, fill); 612 } 613 } 614 615 static int msa_get(struct task_struct *target, 616 const struct user_regset *regset, 617 struct membuf to) 618 { 619 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 620 const struct msa_control_regs ctrl_regs = { 621 .fir = boot_cpu_data.fpu_id, 622 .fcsr = target->thread.fpu.fcr31, 623 .msair = boot_cpu_data.msa_id, 624 .msacsr = target->thread.fpu.msacsr, 625 }; 626 627 if (!tsk_used_math(target)) { 628 /* The task hasn't used FP or MSA, fill with 0xff */ 629 copy_pad_fprs(target, regset, &to, 0); 630 } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) { 631 /* Copy scalar FP context, fill the rest with 0xff */ 632 copy_pad_fprs(target, regset, &to, 8); 633 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 634 /* Trivially copy the vector registers */ 635 membuf_write(&to, &target->thread.fpu.fpr, wr_size); 636 } else { 637 /* Copy as much context as possible, fill the rest with 0xff */ 638 copy_pad_fprs(target, regset, &to, 639 sizeof(target->thread.fpu.fpr[0])); 640 } 641 642 return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs)); 643 } 644 645 static int msa_set(struct task_struct *target, 646 const struct user_regset *regset, 647 unsigned int pos, unsigned int count, 648 const void *kbuf, const void __user *ubuf) 649 { 650 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 651 struct msa_control_regs ctrl_regs; 652 unsigned int cp_sz; 653 int i, err, start; 654 655 init_fp_ctx(target); 656 657 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 658 /* Trivially copy the vector registers */ 659 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 660 &target->thread.fpu.fpr, 661 0, wr_size); 662 } else { 663 /* Copy as much context as possible */ 664 cp_sz = min_t(unsigned int, regset->size, 665 sizeof(target->thread.fpu.fpr[0])); 666 667 i = start = err = 0; 668 for (; i < NUM_FPU_REGS; i++, start += regset->size) { 669 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 670 &target->thread.fpu.fpr[i], 671 start, start + cp_sz); 672 } 673 } 674 675 if (!err) 676 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs, 677 wr_size, wr_size + sizeof(ctrl_regs)); 678 if (!err) { 679 target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X; 680 target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF; 681 } 682 683 return err; 684 } 685 686 #endif /* CONFIG_CPU_HAS_MSA */ 687 688 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 689 690 /* 691 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer. 692 */ 693 static int dsp32_get(struct task_struct *target, 694 const struct user_regset *regset, 695 struct membuf to) 696 { 697 u32 dspregs[NUM_DSP_REGS + 1]; 698 unsigned int i; 699 700 BUG_ON(to.left % sizeof(u32)); 701 702 if (!cpu_has_dsp) 703 return -EIO; 704 705 for (i = 0; i < NUM_DSP_REGS; i++) 706 dspregs[i] = target->thread.dsp.dspr[i]; 707 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; 708 return membuf_write(&to, dspregs, sizeof(dspregs)); 709 } 710 711 /* 712 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context. 713 */ 714 static int dsp32_set(struct task_struct *target, 715 const struct user_regset *regset, 716 unsigned int pos, unsigned int count, 717 const void *kbuf, const void __user *ubuf) 718 { 719 unsigned int start, num_regs, i; 720 u32 dspregs[NUM_DSP_REGS + 1]; 721 int err; 722 723 BUG_ON(count % sizeof(u32)); 724 725 if (!cpu_has_dsp) 726 return -EIO; 727 728 start = pos / sizeof(u32); 729 num_regs = count / sizeof(u32); 730 731 if (start + num_regs > NUM_DSP_REGS + 1) 732 return -EIO; 733 734 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, 735 sizeof(dspregs)); 736 if (err) 737 return err; 738 739 for (i = start; i < num_regs; i++) 740 switch (i) { 741 case 0 ... NUM_DSP_REGS - 1: 742 target->thread.dsp.dspr[i] = (s32)dspregs[i]; 743 break; 744 case NUM_DSP_REGS: 745 target->thread.dsp.dspcontrol = (s32)dspregs[i]; 746 break; 747 } 748 749 return 0; 750 } 751 752 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 753 754 #ifdef CONFIG_64BIT 755 756 /* 757 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer. 758 */ 759 static int dsp64_get(struct task_struct *target, 760 const struct user_regset *regset, 761 struct membuf to) 762 { 763 u64 dspregs[NUM_DSP_REGS + 1]; 764 unsigned int i; 765 766 BUG_ON(to.left % sizeof(u64)); 767 768 if (!cpu_has_dsp) 769 return -EIO; 770 771 for (i = 0; i < NUM_DSP_REGS; i++) 772 dspregs[i] = target->thread.dsp.dspr[i]; 773 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; 774 return membuf_write(&to, dspregs, sizeof(dspregs)); 775 } 776 777 /* 778 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context. 779 */ 780 static int dsp64_set(struct task_struct *target, 781 const struct user_regset *regset, 782 unsigned int pos, unsigned int count, 783 const void *kbuf, const void __user *ubuf) 784 { 785 unsigned int start, num_regs, i; 786 u64 dspregs[NUM_DSP_REGS + 1]; 787 int err; 788 789 BUG_ON(count % sizeof(u64)); 790 791 if (!cpu_has_dsp) 792 return -EIO; 793 794 start = pos / sizeof(u64); 795 num_regs = count / sizeof(u64); 796 797 if (start + num_regs > NUM_DSP_REGS + 1) 798 return -EIO; 799 800 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, 801 sizeof(dspregs)); 802 if (err) 803 return err; 804 805 for (i = start; i < num_regs; i++) 806 switch (i) { 807 case 0 ... NUM_DSP_REGS - 1: 808 target->thread.dsp.dspr[i] = dspregs[i]; 809 break; 810 case NUM_DSP_REGS: 811 target->thread.dsp.dspcontrol = dspregs[i]; 812 break; 813 } 814 815 return 0; 816 } 817 818 #endif /* CONFIG_64BIT */ 819 820 /* 821 * Determine whether the DSP context is present. 822 */ 823 static int dsp_active(struct task_struct *target, 824 const struct user_regset *regset) 825 { 826 return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV; 827 } 828 829 enum mips_regset { 830 REGSET_GPR, 831 REGSET_DSP, 832 #ifdef CONFIG_MIPS_FP_SUPPORT 833 REGSET_FPR, 834 REGSET_FP_MODE, 835 #endif 836 #ifdef CONFIG_CPU_HAS_MSA 837 REGSET_MSA, 838 #endif 839 }; 840 841 struct pt_regs_offset { 842 const char *name; 843 int offset; 844 }; 845 846 #define REG_OFFSET_NAME(reg, r) { \ 847 .name = #reg, \ 848 .offset = offsetof(struct pt_regs, r) \ 849 } 850 851 #define REG_OFFSET_END { \ 852 .name = NULL, \ 853 .offset = 0 \ 854 } 855 856 static const struct pt_regs_offset regoffset_table[] = { 857 REG_OFFSET_NAME(r0, regs[0]), 858 REG_OFFSET_NAME(r1, regs[1]), 859 REG_OFFSET_NAME(r2, regs[2]), 860 REG_OFFSET_NAME(r3, regs[3]), 861 REG_OFFSET_NAME(r4, regs[4]), 862 REG_OFFSET_NAME(r5, regs[5]), 863 REG_OFFSET_NAME(r6, regs[6]), 864 REG_OFFSET_NAME(r7, regs[7]), 865 REG_OFFSET_NAME(r8, regs[8]), 866 REG_OFFSET_NAME(r9, regs[9]), 867 REG_OFFSET_NAME(r10, regs[10]), 868 REG_OFFSET_NAME(r11, regs[11]), 869 REG_OFFSET_NAME(r12, regs[12]), 870 REG_OFFSET_NAME(r13, regs[13]), 871 REG_OFFSET_NAME(r14, regs[14]), 872 REG_OFFSET_NAME(r15, regs[15]), 873 REG_OFFSET_NAME(r16, regs[16]), 874 REG_OFFSET_NAME(r17, regs[17]), 875 REG_OFFSET_NAME(r18, regs[18]), 876 REG_OFFSET_NAME(r19, regs[19]), 877 REG_OFFSET_NAME(r20, regs[20]), 878 REG_OFFSET_NAME(r21, regs[21]), 879 REG_OFFSET_NAME(r22, regs[22]), 880 REG_OFFSET_NAME(r23, regs[23]), 881 REG_OFFSET_NAME(r24, regs[24]), 882 REG_OFFSET_NAME(r25, regs[25]), 883 REG_OFFSET_NAME(r26, regs[26]), 884 REG_OFFSET_NAME(r27, regs[27]), 885 REG_OFFSET_NAME(r28, regs[28]), 886 REG_OFFSET_NAME(r29, regs[29]), 887 REG_OFFSET_NAME(r30, regs[30]), 888 REG_OFFSET_NAME(r31, regs[31]), 889 REG_OFFSET_NAME(c0_status, cp0_status), 890 REG_OFFSET_NAME(hi, hi), 891 REG_OFFSET_NAME(lo, lo), 892 #ifdef CONFIG_CPU_HAS_SMARTMIPS 893 REG_OFFSET_NAME(acx, acx), 894 #endif 895 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), 896 REG_OFFSET_NAME(c0_cause, cp0_cause), 897 REG_OFFSET_NAME(c0_epc, cp0_epc), 898 #ifdef CONFIG_CPU_CAVIUM_OCTEON 899 REG_OFFSET_NAME(mpl0, mpl[0]), 900 REG_OFFSET_NAME(mpl1, mpl[1]), 901 REG_OFFSET_NAME(mpl2, mpl[2]), 902 REG_OFFSET_NAME(mtp0, mtp[0]), 903 REG_OFFSET_NAME(mtp1, mtp[1]), 904 REG_OFFSET_NAME(mtp2, mtp[2]), 905 #endif 906 REG_OFFSET_END, 907 }; 908 909 /** 910 * regs_query_register_offset() - query register offset from its name 911 * @name: the name of a register 912 * 913 * regs_query_register_offset() returns the offset of a register in struct 914 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 915 */ 916 int regs_query_register_offset(const char *name) 917 { 918 const struct pt_regs_offset *roff; 919 for (roff = regoffset_table; roff->name != NULL; roff++) 920 if (!strcmp(roff->name, name)) 921 return roff->offset; 922 return -EINVAL; 923 } 924 925 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 926 927 static const struct user_regset mips_regsets[] = { 928 [REGSET_GPR] = { 929 .core_note_type = NT_PRSTATUS, 930 .n = ELF_NGREG, 931 .size = sizeof(unsigned int), 932 .align = sizeof(unsigned int), 933 .regset_get = gpr32_get, 934 .set = gpr32_set, 935 }, 936 [REGSET_DSP] = { 937 .core_note_type = NT_MIPS_DSP, 938 .n = NUM_DSP_REGS + 1, 939 .size = sizeof(u32), 940 .align = sizeof(u32), 941 .regset_get = dsp32_get, 942 .set = dsp32_set, 943 .active = dsp_active, 944 }, 945 #ifdef CONFIG_MIPS_FP_SUPPORT 946 [REGSET_FPR] = { 947 .core_note_type = NT_PRFPREG, 948 .n = ELF_NFPREG, 949 .size = sizeof(elf_fpreg_t), 950 .align = sizeof(elf_fpreg_t), 951 .regset_get = fpr_get, 952 .set = fpr_set, 953 }, 954 [REGSET_FP_MODE] = { 955 .core_note_type = NT_MIPS_FP_MODE, 956 .n = 1, 957 .size = sizeof(int), 958 .align = sizeof(int), 959 .regset_get = fp_mode_get, 960 .set = fp_mode_set, 961 }, 962 #endif 963 #ifdef CONFIG_CPU_HAS_MSA 964 [REGSET_MSA] = { 965 .core_note_type = NT_MIPS_MSA, 966 .n = NUM_FPU_REGS + 1, 967 .size = 16, 968 .align = 16, 969 .regset_get = msa_get, 970 .set = msa_set, 971 }, 972 #endif 973 }; 974 975 static const struct user_regset_view user_mips_view = { 976 .name = "mips", 977 .e_machine = ELF_ARCH, 978 .ei_osabi = ELF_OSABI, 979 .regsets = mips_regsets, 980 .n = ARRAY_SIZE(mips_regsets), 981 }; 982 983 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 984 985 #ifdef CONFIG_64BIT 986 987 static const struct user_regset mips64_regsets[] = { 988 [REGSET_GPR] = { 989 .core_note_type = NT_PRSTATUS, 990 .n = ELF_NGREG, 991 .size = sizeof(unsigned long), 992 .align = sizeof(unsigned long), 993 .regset_get = gpr64_get, 994 .set = gpr64_set, 995 }, 996 [REGSET_DSP] = { 997 .core_note_type = NT_MIPS_DSP, 998 .n = NUM_DSP_REGS + 1, 999 .size = sizeof(u64), 1000 .align = sizeof(u64), 1001 .regset_get = dsp64_get, 1002 .set = dsp64_set, 1003 .active = dsp_active, 1004 }, 1005 #ifdef CONFIG_MIPS_FP_SUPPORT 1006 [REGSET_FP_MODE] = { 1007 .core_note_type = NT_MIPS_FP_MODE, 1008 .n = 1, 1009 .size = sizeof(int), 1010 .align = sizeof(int), 1011 .regset_get = fp_mode_get, 1012 .set = fp_mode_set, 1013 }, 1014 [REGSET_FPR] = { 1015 .core_note_type = NT_PRFPREG, 1016 .n = ELF_NFPREG, 1017 .size = sizeof(elf_fpreg_t), 1018 .align = sizeof(elf_fpreg_t), 1019 .regset_get = fpr_get, 1020 .set = fpr_set, 1021 }, 1022 #endif 1023 #ifdef CONFIG_CPU_HAS_MSA 1024 [REGSET_MSA] = { 1025 .core_note_type = NT_MIPS_MSA, 1026 .n = NUM_FPU_REGS + 1, 1027 .size = 16, 1028 .align = 16, 1029 .regset_get = msa_get, 1030 .set = msa_set, 1031 }, 1032 #endif 1033 }; 1034 1035 static const struct user_regset_view user_mips64_view = { 1036 .name = "mips64", 1037 .e_machine = ELF_ARCH, 1038 .ei_osabi = ELF_OSABI, 1039 .regsets = mips64_regsets, 1040 .n = ARRAY_SIZE(mips64_regsets), 1041 }; 1042 1043 #ifdef CONFIG_MIPS32_N32 1044 1045 static const struct user_regset_view user_mipsn32_view = { 1046 .name = "mipsn32", 1047 .e_flags = EF_MIPS_ABI2, 1048 .e_machine = ELF_ARCH, 1049 .ei_osabi = ELF_OSABI, 1050 .regsets = mips64_regsets, 1051 .n = ARRAY_SIZE(mips64_regsets), 1052 }; 1053 1054 #endif /* CONFIG_MIPS32_N32 */ 1055 1056 #endif /* CONFIG_64BIT */ 1057 1058 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1059 { 1060 #ifdef CONFIG_32BIT 1061 return &user_mips_view; 1062 #else 1063 #ifdef CONFIG_MIPS32_O32 1064 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) 1065 return &user_mips_view; 1066 #endif 1067 #ifdef CONFIG_MIPS32_N32 1068 if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) 1069 return &user_mipsn32_view; 1070 #endif 1071 return &user_mips64_view; 1072 #endif 1073 } 1074 1075 long arch_ptrace(struct task_struct *child, long request, 1076 unsigned long addr, unsigned long data) 1077 { 1078 int ret; 1079 void __user *addrp = (void __user *) addr; 1080 void __user *datavp = (void __user *) data; 1081 unsigned long __user *datalp = (void __user *) data; 1082 1083 switch (request) { 1084 /* when I and D space are separate, these will need to be fixed. */ 1085 case PTRACE_PEEKTEXT: /* read word at location addr. */ 1086 case PTRACE_PEEKDATA: 1087 ret = generic_ptrace_peekdata(child, addr, data); 1088 break; 1089 1090 /* Read the word at location addr in the USER area. */ 1091 case PTRACE_PEEKUSR: { 1092 struct pt_regs *regs; 1093 unsigned long tmp = 0; 1094 1095 regs = task_pt_regs(child); 1096 ret = 0; /* Default return value. */ 1097 1098 switch (addr) { 1099 case 0 ... 31: 1100 tmp = regs->regs[addr]; 1101 break; 1102 #ifdef CONFIG_MIPS_FP_SUPPORT 1103 case FPR_BASE ... FPR_BASE + 31: { 1104 union fpureg *fregs; 1105 1106 if (!tsk_used_math(child)) { 1107 /* FP not yet used */ 1108 tmp = -1; 1109 break; 1110 } 1111 fregs = get_fpu_regs(child); 1112 1113 #ifdef CONFIG_32BIT 1114 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { 1115 /* 1116 * The odd registers are actually the high 1117 * order bits of the values stored in the even 1118 * registers. 1119 */ 1120 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], 1121 addr & 1); 1122 break; 1123 } 1124 #endif 1125 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); 1126 break; 1127 } 1128 case FPC_CSR: 1129 tmp = child->thread.fpu.fcr31; 1130 break; 1131 case FPC_EIR: 1132 /* implementation / version register */ 1133 tmp = boot_cpu_data.fpu_id; 1134 break; 1135 #endif 1136 case PC: 1137 tmp = regs->cp0_epc; 1138 break; 1139 case CAUSE: 1140 tmp = regs->cp0_cause; 1141 break; 1142 case BADVADDR: 1143 tmp = regs->cp0_badvaddr; 1144 break; 1145 case MMHI: 1146 tmp = regs->hi; 1147 break; 1148 case MMLO: 1149 tmp = regs->lo; 1150 break; 1151 #ifdef CONFIG_CPU_HAS_SMARTMIPS 1152 case ACX: 1153 tmp = regs->acx; 1154 break; 1155 #endif 1156 case DSP_BASE ... DSP_BASE + 5: { 1157 dspreg_t *dregs; 1158 1159 if (!cpu_has_dsp) { 1160 tmp = 0; 1161 ret = -EIO; 1162 goto out; 1163 } 1164 dregs = __get_dsp_regs(child); 1165 tmp = dregs[addr - DSP_BASE]; 1166 break; 1167 } 1168 case DSP_CONTROL: 1169 if (!cpu_has_dsp) { 1170 tmp = 0; 1171 ret = -EIO; 1172 goto out; 1173 } 1174 tmp = child->thread.dsp.dspcontrol; 1175 break; 1176 default: 1177 tmp = 0; 1178 ret = -EIO; 1179 goto out; 1180 } 1181 ret = put_user(tmp, datalp); 1182 break; 1183 } 1184 1185 /* when I and D space are separate, this will have to be fixed. */ 1186 case PTRACE_POKETEXT: /* write the word at location addr. */ 1187 case PTRACE_POKEDATA: 1188 ret = generic_ptrace_pokedata(child, addr, data); 1189 break; 1190 1191 case PTRACE_POKEUSR: { 1192 struct pt_regs *regs; 1193 ret = 0; 1194 regs = task_pt_regs(child); 1195 1196 switch (addr) { 1197 case 0 ... 31: 1198 regs->regs[addr] = data; 1199 /* System call number may have been changed */ 1200 if (addr == 2) 1201 mips_syscall_update_nr(child, regs); 1202 else if (addr == 4 && 1203 mips_syscall_is_indirect(child, regs)) 1204 mips_syscall_update_nr(child, regs); 1205 break; 1206 #ifdef CONFIG_MIPS_FP_SUPPORT 1207 case FPR_BASE ... FPR_BASE + 31: { 1208 union fpureg *fregs = get_fpu_regs(child); 1209 1210 init_fp_ctx(child); 1211 #ifdef CONFIG_32BIT 1212 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { 1213 /* 1214 * The odd registers are actually the high 1215 * order bits of the values stored in the even 1216 * registers. 1217 */ 1218 set_fpr32(&fregs[(addr & ~1) - FPR_BASE], 1219 addr & 1, data); 1220 break; 1221 } 1222 #endif 1223 set_fpr64(&fregs[addr - FPR_BASE], 0, data); 1224 break; 1225 } 1226 case FPC_CSR: 1227 init_fp_ctx(child); 1228 ptrace_setfcr31(child, data); 1229 break; 1230 #endif 1231 case PC: 1232 regs->cp0_epc = data; 1233 break; 1234 case MMHI: 1235 regs->hi = data; 1236 break; 1237 case MMLO: 1238 regs->lo = data; 1239 break; 1240 #ifdef CONFIG_CPU_HAS_SMARTMIPS 1241 case ACX: 1242 regs->acx = data; 1243 break; 1244 #endif 1245 case DSP_BASE ... DSP_BASE + 5: { 1246 dspreg_t *dregs; 1247 1248 if (!cpu_has_dsp) { 1249 ret = -EIO; 1250 break; 1251 } 1252 1253 dregs = __get_dsp_regs(child); 1254 dregs[addr - DSP_BASE] = data; 1255 break; 1256 } 1257 case DSP_CONTROL: 1258 if (!cpu_has_dsp) { 1259 ret = -EIO; 1260 break; 1261 } 1262 child->thread.dsp.dspcontrol = data; 1263 break; 1264 default: 1265 /* The rest are not allowed. */ 1266 ret = -EIO; 1267 break; 1268 } 1269 break; 1270 } 1271 1272 case PTRACE_GETREGS: 1273 ret = ptrace_getregs(child, datavp); 1274 break; 1275 1276 case PTRACE_SETREGS: 1277 ret = ptrace_setregs(child, datavp); 1278 break; 1279 1280 #ifdef CONFIG_MIPS_FP_SUPPORT 1281 case PTRACE_GETFPREGS: 1282 ret = ptrace_getfpregs(child, datavp); 1283 break; 1284 1285 case PTRACE_SETFPREGS: 1286 ret = ptrace_setfpregs(child, datavp); 1287 break; 1288 #endif 1289 case PTRACE_GET_THREAD_AREA: 1290 ret = put_user(task_thread_info(child)->tp_value, datalp); 1291 break; 1292 1293 case PTRACE_GET_WATCH_REGS: 1294 ret = ptrace_get_watch_regs(child, addrp); 1295 break; 1296 1297 case PTRACE_SET_WATCH_REGS: 1298 ret = ptrace_set_watch_regs(child, addrp); 1299 break; 1300 1301 default: 1302 ret = ptrace_request(child, request, addr, data); 1303 break; 1304 } 1305 out: 1306 return ret; 1307 } 1308 1309 /* 1310 * Notification of system call entry/exit 1311 * - triggered by current->work.syscall_trace 1312 */ 1313 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) 1314 { 1315 user_exit(); 1316 1317 current_thread_info()->syscall = syscall; 1318 1319 if (test_thread_flag(TIF_SYSCALL_TRACE)) { 1320 if (ptrace_report_syscall_entry(regs)) 1321 return -1; 1322 syscall = current_thread_info()->syscall; 1323 } 1324 1325 #ifdef CONFIG_SECCOMP 1326 if (unlikely(test_thread_flag(TIF_SECCOMP))) { 1327 int ret, i; 1328 struct seccomp_data sd; 1329 unsigned long args[6]; 1330 1331 sd.nr = syscall; 1332 sd.arch = syscall_get_arch(current); 1333 syscall_get_arguments(current, regs, args); 1334 for (i = 0; i < 6; i++) 1335 sd.args[i] = args[i]; 1336 sd.instruction_pointer = KSTK_EIP(current); 1337 1338 ret = __secure_computing(&sd); 1339 if (ret == -1) 1340 return ret; 1341 syscall = current_thread_info()->syscall; 1342 } 1343 #endif 1344 1345 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1346 trace_sys_enter(regs, regs->regs[2]); 1347 1348 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], 1349 regs->regs[6], regs->regs[7]); 1350 1351 /* 1352 * Negative syscall numbers are mistaken for rejected syscalls, but 1353 * won't have had the return value set appropriately, so we do so now. 1354 */ 1355 if (syscall < 0) 1356 syscall_set_return_value(current, regs, -ENOSYS, 0); 1357 return syscall; 1358 } 1359 1360 /* 1361 * Notification of system call entry/exit 1362 * - triggered by current->work.syscall_trace 1363 */ 1364 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 1365 { 1366 /* 1367 * We may come here right after calling schedule_user() 1368 * or do_notify_resume(), in which case we can be in RCU 1369 * user mode. 1370 */ 1371 user_exit(); 1372 1373 audit_syscall_exit(regs); 1374 1375 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1376 trace_sys_exit(regs, regs_return_value(regs)); 1377 1378 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1379 ptrace_report_syscall_exit(regs, 0); 1380 1381 user_enter(); 1382 } 1383