1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/audit.h> 23 #include <linux/compat.h> 24 #include <linux/kernel.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/task_stack.h> 27 #include <linux/mm.h> 28 #include <linux/smp.h> 29 #include <linux/ptrace.h> 30 #include <linux/user.h> 31 #include <linux/seccomp.h> 32 #include <linux/security.h> 33 #include <linux/init.h> 34 #include <linux/signal.h> 35 #include <linux/uaccess.h> 36 #include <linux/perf_event.h> 37 #include <linux/hw_breakpoint.h> 38 #include <linux/regset.h> 39 #include <linux/tracehook.h> 40 #include <linux/elf.h> 41 42 #include <asm/compat.h> 43 #include <asm/debug-monitors.h> 44 #include <asm/pgtable.h> 45 #include <asm/syscall.h> 46 #include <asm/traps.h> 47 #include <asm/system_misc.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/syscalls.h> 51 52 struct pt_regs_offset { 53 const char *name; 54 int offset; 55 }; 56 57 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 58 #define REG_OFFSET_END {.name = NULL, .offset = 0} 59 #define GPR_OFFSET_NAME(r) \ 60 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 61 62 static const struct pt_regs_offset regoffset_table[] = { 63 GPR_OFFSET_NAME(0), 64 GPR_OFFSET_NAME(1), 65 GPR_OFFSET_NAME(2), 66 GPR_OFFSET_NAME(3), 67 GPR_OFFSET_NAME(4), 68 GPR_OFFSET_NAME(5), 69 GPR_OFFSET_NAME(6), 70 GPR_OFFSET_NAME(7), 71 GPR_OFFSET_NAME(8), 72 GPR_OFFSET_NAME(9), 73 GPR_OFFSET_NAME(10), 74 GPR_OFFSET_NAME(11), 75 GPR_OFFSET_NAME(12), 76 GPR_OFFSET_NAME(13), 77 GPR_OFFSET_NAME(14), 78 GPR_OFFSET_NAME(15), 79 GPR_OFFSET_NAME(16), 80 GPR_OFFSET_NAME(17), 81 GPR_OFFSET_NAME(18), 82 GPR_OFFSET_NAME(19), 83 GPR_OFFSET_NAME(20), 84 GPR_OFFSET_NAME(21), 85 GPR_OFFSET_NAME(22), 86 GPR_OFFSET_NAME(23), 87 GPR_OFFSET_NAME(24), 88 GPR_OFFSET_NAME(25), 89 GPR_OFFSET_NAME(26), 90 GPR_OFFSET_NAME(27), 91 GPR_OFFSET_NAME(28), 92 GPR_OFFSET_NAME(29), 93 GPR_OFFSET_NAME(30), 94 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 95 REG_OFFSET_NAME(sp), 96 REG_OFFSET_NAME(pc), 97 REG_OFFSET_NAME(pstate), 98 REG_OFFSET_END, 99 }; 100 101 /** 102 * regs_query_register_offset() - query register offset from its name 103 * @name: the name of a register 104 * 105 * regs_query_register_offset() returns the offset of a register in struct 106 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 107 */ 108 int regs_query_register_offset(const char *name) 109 { 110 const struct pt_regs_offset *roff; 111 112 for (roff = regoffset_table; roff->name != NULL; roff++) 113 if (!strcmp(roff->name, name)) 114 return roff->offset; 115 return -EINVAL; 116 } 117 118 /** 119 * regs_within_kernel_stack() - check the address in the stack 120 * @regs: pt_regs which contains kernel stack pointer. 121 * @addr: address which is checked. 122 * 123 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 124 * If @addr is within the kernel stack, it returns true. If not, returns false. 125 */ 126 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 127 { 128 return ((addr & ~(THREAD_SIZE - 1)) == 129 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 130 on_irq_stack(addr, raw_smp_processor_id()); 131 } 132 133 /** 134 * regs_get_kernel_stack_nth() - get Nth entry of the stack 135 * @regs: pt_regs which contains kernel stack pointer. 136 * @n: stack entry number. 137 * 138 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 139 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 140 * this returns 0. 141 */ 142 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 143 { 144 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 145 146 addr += n; 147 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 148 return *addr; 149 else 150 return 0; 151 } 152 153 /* 154 * TODO: does not yet catch signals sent when the child dies. 155 * in exit.c or in signal.c. 156 */ 157 158 /* 159 * Called by kernel/ptrace.c when detaching.. 160 */ 161 void ptrace_disable(struct task_struct *child) 162 { 163 /* 164 * This would be better off in core code, but PTRACE_DETACH has 165 * grown its fair share of arch-specific worts and changing it 166 * is likely to cause regressions on obscure architectures. 167 */ 168 user_disable_single_step(child); 169 } 170 171 #ifdef CONFIG_HAVE_HW_BREAKPOINT 172 /* 173 * Handle hitting a HW-breakpoint. 174 */ 175 static void ptrace_hbptriggered(struct perf_event *bp, 176 struct perf_sample_data *data, 177 struct pt_regs *regs) 178 { 179 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 180 siginfo_t info = { 181 .si_signo = SIGTRAP, 182 .si_errno = 0, 183 .si_code = TRAP_HWBKPT, 184 .si_addr = (void __user *)(bkpt->trigger), 185 }; 186 187 #ifdef CONFIG_COMPAT 188 int i; 189 190 if (!is_compat_task()) 191 goto send_sig; 192 193 for (i = 0; i < ARM_MAX_BRP; ++i) { 194 if (current->thread.debug.hbp_break[i] == bp) { 195 info.si_errno = (i << 1) + 1; 196 break; 197 } 198 } 199 200 for (i = 0; i < ARM_MAX_WRP; ++i) { 201 if (current->thread.debug.hbp_watch[i] == bp) { 202 info.si_errno = -((i << 1) + 1); 203 break; 204 } 205 } 206 207 send_sig: 208 #endif 209 force_sig_info(SIGTRAP, &info, current); 210 } 211 212 /* 213 * Unregister breakpoints from this task and reset the pointers in 214 * the thread_struct. 215 */ 216 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 217 { 218 int i; 219 struct thread_struct *t = &tsk->thread; 220 221 for (i = 0; i < ARM_MAX_BRP; i++) { 222 if (t->debug.hbp_break[i]) { 223 unregister_hw_breakpoint(t->debug.hbp_break[i]); 224 t->debug.hbp_break[i] = NULL; 225 } 226 } 227 228 for (i = 0; i < ARM_MAX_WRP; i++) { 229 if (t->debug.hbp_watch[i]) { 230 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 231 t->debug.hbp_watch[i] = NULL; 232 } 233 } 234 } 235 236 void ptrace_hw_copy_thread(struct task_struct *tsk) 237 { 238 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 239 } 240 241 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 242 struct task_struct *tsk, 243 unsigned long idx) 244 { 245 struct perf_event *bp = ERR_PTR(-EINVAL); 246 247 switch (note_type) { 248 case NT_ARM_HW_BREAK: 249 if (idx < ARM_MAX_BRP) 250 bp = tsk->thread.debug.hbp_break[idx]; 251 break; 252 case NT_ARM_HW_WATCH: 253 if (idx < ARM_MAX_WRP) 254 bp = tsk->thread.debug.hbp_watch[idx]; 255 break; 256 } 257 258 return bp; 259 } 260 261 static int ptrace_hbp_set_event(unsigned int note_type, 262 struct task_struct *tsk, 263 unsigned long idx, 264 struct perf_event *bp) 265 { 266 int err = -EINVAL; 267 268 switch (note_type) { 269 case NT_ARM_HW_BREAK: 270 if (idx < ARM_MAX_BRP) { 271 tsk->thread.debug.hbp_break[idx] = bp; 272 err = 0; 273 } 274 break; 275 case NT_ARM_HW_WATCH: 276 if (idx < ARM_MAX_WRP) { 277 tsk->thread.debug.hbp_watch[idx] = bp; 278 err = 0; 279 } 280 break; 281 } 282 283 return err; 284 } 285 286 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 287 struct task_struct *tsk, 288 unsigned long idx) 289 { 290 struct perf_event *bp; 291 struct perf_event_attr attr; 292 int err, type; 293 294 switch (note_type) { 295 case NT_ARM_HW_BREAK: 296 type = HW_BREAKPOINT_X; 297 break; 298 case NT_ARM_HW_WATCH: 299 type = HW_BREAKPOINT_RW; 300 break; 301 default: 302 return ERR_PTR(-EINVAL); 303 } 304 305 ptrace_breakpoint_init(&attr); 306 307 /* 308 * Initialise fields to sane defaults 309 * (i.e. values that will pass validation). 310 */ 311 attr.bp_addr = 0; 312 attr.bp_len = HW_BREAKPOINT_LEN_4; 313 attr.bp_type = type; 314 attr.disabled = 1; 315 316 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 317 if (IS_ERR(bp)) 318 return bp; 319 320 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 321 if (err) 322 return ERR_PTR(err); 323 324 return bp; 325 } 326 327 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 328 struct arch_hw_breakpoint_ctrl ctrl, 329 struct perf_event_attr *attr) 330 { 331 int err, len, type, offset, disabled = !ctrl.enabled; 332 333 attr->disabled = disabled; 334 if (disabled) 335 return 0; 336 337 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 338 if (err) 339 return err; 340 341 switch (note_type) { 342 case NT_ARM_HW_BREAK: 343 if ((type & HW_BREAKPOINT_X) != type) 344 return -EINVAL; 345 break; 346 case NT_ARM_HW_WATCH: 347 if ((type & HW_BREAKPOINT_RW) != type) 348 return -EINVAL; 349 break; 350 default: 351 return -EINVAL; 352 } 353 354 attr->bp_len = len; 355 attr->bp_type = type; 356 attr->bp_addr += offset; 357 358 return 0; 359 } 360 361 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 362 { 363 u8 num; 364 u32 reg = 0; 365 366 switch (note_type) { 367 case NT_ARM_HW_BREAK: 368 num = hw_breakpoint_slots(TYPE_INST); 369 break; 370 case NT_ARM_HW_WATCH: 371 num = hw_breakpoint_slots(TYPE_DATA); 372 break; 373 default: 374 return -EINVAL; 375 } 376 377 reg |= debug_monitors_arch(); 378 reg <<= 8; 379 reg |= num; 380 381 *info = reg; 382 return 0; 383 } 384 385 static int ptrace_hbp_get_ctrl(unsigned int note_type, 386 struct task_struct *tsk, 387 unsigned long idx, 388 u32 *ctrl) 389 { 390 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 391 392 if (IS_ERR(bp)) 393 return PTR_ERR(bp); 394 395 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 396 return 0; 397 } 398 399 static int ptrace_hbp_get_addr(unsigned int note_type, 400 struct task_struct *tsk, 401 unsigned long idx, 402 u64 *addr) 403 { 404 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 405 406 if (IS_ERR(bp)) 407 return PTR_ERR(bp); 408 409 *addr = bp ? counter_arch_bp(bp)->address : 0; 410 return 0; 411 } 412 413 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 414 struct task_struct *tsk, 415 unsigned long idx) 416 { 417 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 418 419 if (!bp) 420 bp = ptrace_hbp_create(note_type, tsk, idx); 421 422 return bp; 423 } 424 425 static int ptrace_hbp_set_ctrl(unsigned int note_type, 426 struct task_struct *tsk, 427 unsigned long idx, 428 u32 uctrl) 429 { 430 int err; 431 struct perf_event *bp; 432 struct perf_event_attr attr; 433 struct arch_hw_breakpoint_ctrl ctrl; 434 435 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 436 if (IS_ERR(bp)) { 437 err = PTR_ERR(bp); 438 return err; 439 } 440 441 attr = bp->attr; 442 decode_ctrl_reg(uctrl, &ctrl); 443 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 444 if (err) 445 return err; 446 447 return modify_user_hw_breakpoint(bp, &attr); 448 } 449 450 static int ptrace_hbp_set_addr(unsigned int note_type, 451 struct task_struct *tsk, 452 unsigned long idx, 453 u64 addr) 454 { 455 int err; 456 struct perf_event *bp; 457 struct perf_event_attr attr; 458 459 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 460 if (IS_ERR(bp)) { 461 err = PTR_ERR(bp); 462 return err; 463 } 464 465 attr = bp->attr; 466 attr.bp_addr = addr; 467 err = modify_user_hw_breakpoint(bp, &attr); 468 return err; 469 } 470 471 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 472 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 473 #define PTRACE_HBP_PAD_SZ sizeof(u32) 474 475 static int hw_break_get(struct task_struct *target, 476 const struct user_regset *regset, 477 unsigned int pos, unsigned int count, 478 void *kbuf, void __user *ubuf) 479 { 480 unsigned int note_type = regset->core_note_type; 481 int ret, idx = 0, offset, limit; 482 u32 info, ctrl; 483 u64 addr; 484 485 /* Resource info */ 486 ret = ptrace_hbp_get_resource_info(note_type, &info); 487 if (ret) 488 return ret; 489 490 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 491 sizeof(info)); 492 if (ret) 493 return ret; 494 495 /* Pad */ 496 offset = offsetof(struct user_hwdebug_state, pad); 497 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 498 offset + PTRACE_HBP_PAD_SZ); 499 if (ret) 500 return ret; 501 502 /* (address, ctrl) registers */ 503 offset = offsetof(struct user_hwdebug_state, dbg_regs); 504 limit = regset->n * regset->size; 505 while (count && offset < limit) { 506 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 507 if (ret) 508 return ret; 509 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 510 offset, offset + PTRACE_HBP_ADDR_SZ); 511 if (ret) 512 return ret; 513 offset += PTRACE_HBP_ADDR_SZ; 514 515 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 516 if (ret) 517 return ret; 518 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 519 offset, offset + PTRACE_HBP_CTRL_SZ); 520 if (ret) 521 return ret; 522 offset += PTRACE_HBP_CTRL_SZ; 523 524 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 525 offset, 526 offset + PTRACE_HBP_PAD_SZ); 527 if (ret) 528 return ret; 529 offset += PTRACE_HBP_PAD_SZ; 530 idx++; 531 } 532 533 return 0; 534 } 535 536 static int hw_break_set(struct task_struct *target, 537 const struct user_regset *regset, 538 unsigned int pos, unsigned int count, 539 const void *kbuf, const void __user *ubuf) 540 { 541 unsigned int note_type = regset->core_note_type; 542 int ret, idx = 0, offset, limit; 543 u32 ctrl; 544 u64 addr; 545 546 /* Resource info and pad */ 547 offset = offsetof(struct user_hwdebug_state, dbg_regs); 548 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 549 if (ret) 550 return ret; 551 552 /* (address, ctrl) registers */ 553 limit = regset->n * regset->size; 554 while (count && offset < limit) { 555 if (count < PTRACE_HBP_ADDR_SZ) 556 return -EINVAL; 557 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 558 offset, offset + PTRACE_HBP_ADDR_SZ); 559 if (ret) 560 return ret; 561 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 562 if (ret) 563 return ret; 564 offset += PTRACE_HBP_ADDR_SZ; 565 566 if (!count) 567 break; 568 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 569 offset, offset + PTRACE_HBP_CTRL_SZ); 570 if (ret) 571 return ret; 572 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 573 if (ret) 574 return ret; 575 offset += PTRACE_HBP_CTRL_SZ; 576 577 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 578 offset, 579 offset + PTRACE_HBP_PAD_SZ); 580 if (ret) 581 return ret; 582 offset += PTRACE_HBP_PAD_SZ; 583 idx++; 584 } 585 586 return 0; 587 } 588 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 589 590 static int gpr_get(struct task_struct *target, 591 const struct user_regset *regset, 592 unsigned int pos, unsigned int count, 593 void *kbuf, void __user *ubuf) 594 { 595 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 596 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 597 } 598 599 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 600 unsigned int pos, unsigned int count, 601 const void *kbuf, const void __user *ubuf) 602 { 603 int ret; 604 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 605 606 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 607 if (ret) 608 return ret; 609 610 if (!valid_user_regs(&newregs, target)) 611 return -EINVAL; 612 613 task_pt_regs(target)->user_regs = newregs; 614 return 0; 615 } 616 617 /* 618 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 619 */ 620 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 621 unsigned int pos, unsigned int count, 622 void *kbuf, void __user *ubuf) 623 { 624 struct user_fpsimd_state *uregs; 625 uregs = &target->thread.fpsimd_state.user_fpsimd; 626 627 if (target == current) 628 fpsimd_preserve_current_state(); 629 630 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 631 } 632 633 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 634 unsigned int pos, unsigned int count, 635 const void *kbuf, const void __user *ubuf) 636 { 637 int ret; 638 struct user_fpsimd_state newstate = 639 target->thread.fpsimd_state.user_fpsimd; 640 641 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 642 if (ret) 643 return ret; 644 645 target->thread.fpsimd_state.user_fpsimd = newstate; 646 fpsimd_flush_task_state(target); 647 return ret; 648 } 649 650 static int tls_get(struct task_struct *target, const struct user_regset *regset, 651 unsigned int pos, unsigned int count, 652 void *kbuf, void __user *ubuf) 653 { 654 unsigned long *tls = &target->thread.tp_value; 655 656 if (target == current) 657 tls_preserve_current_state(); 658 659 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 660 } 661 662 static int tls_set(struct task_struct *target, const struct user_regset *regset, 663 unsigned int pos, unsigned int count, 664 const void *kbuf, const void __user *ubuf) 665 { 666 int ret; 667 unsigned long tls = target->thread.tp_value; 668 669 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 670 if (ret) 671 return ret; 672 673 target->thread.tp_value = tls; 674 return ret; 675 } 676 677 static int system_call_get(struct task_struct *target, 678 const struct user_regset *regset, 679 unsigned int pos, unsigned int count, 680 void *kbuf, void __user *ubuf) 681 { 682 int syscallno = task_pt_regs(target)->syscallno; 683 684 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 685 &syscallno, 0, -1); 686 } 687 688 static int system_call_set(struct task_struct *target, 689 const struct user_regset *regset, 690 unsigned int pos, unsigned int count, 691 const void *kbuf, const void __user *ubuf) 692 { 693 int syscallno = task_pt_regs(target)->syscallno; 694 int ret; 695 696 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 697 if (ret) 698 return ret; 699 700 task_pt_regs(target)->syscallno = syscallno; 701 return ret; 702 } 703 704 enum aarch64_regset { 705 REGSET_GPR, 706 REGSET_FPR, 707 REGSET_TLS, 708 #ifdef CONFIG_HAVE_HW_BREAKPOINT 709 REGSET_HW_BREAK, 710 REGSET_HW_WATCH, 711 #endif 712 REGSET_SYSTEM_CALL, 713 }; 714 715 static const struct user_regset aarch64_regsets[] = { 716 [REGSET_GPR] = { 717 .core_note_type = NT_PRSTATUS, 718 .n = sizeof(struct user_pt_regs) / sizeof(u64), 719 .size = sizeof(u64), 720 .align = sizeof(u64), 721 .get = gpr_get, 722 .set = gpr_set 723 }, 724 [REGSET_FPR] = { 725 .core_note_type = NT_PRFPREG, 726 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 727 /* 728 * We pretend we have 32-bit registers because the fpsr and 729 * fpcr are 32-bits wide. 730 */ 731 .size = sizeof(u32), 732 .align = sizeof(u32), 733 .get = fpr_get, 734 .set = fpr_set 735 }, 736 [REGSET_TLS] = { 737 .core_note_type = NT_ARM_TLS, 738 .n = 1, 739 .size = sizeof(void *), 740 .align = sizeof(void *), 741 .get = tls_get, 742 .set = tls_set, 743 }, 744 #ifdef CONFIG_HAVE_HW_BREAKPOINT 745 [REGSET_HW_BREAK] = { 746 .core_note_type = NT_ARM_HW_BREAK, 747 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 748 .size = sizeof(u32), 749 .align = sizeof(u32), 750 .get = hw_break_get, 751 .set = hw_break_set, 752 }, 753 [REGSET_HW_WATCH] = { 754 .core_note_type = NT_ARM_HW_WATCH, 755 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 756 .size = sizeof(u32), 757 .align = sizeof(u32), 758 .get = hw_break_get, 759 .set = hw_break_set, 760 }, 761 #endif 762 [REGSET_SYSTEM_CALL] = { 763 .core_note_type = NT_ARM_SYSTEM_CALL, 764 .n = 1, 765 .size = sizeof(int), 766 .align = sizeof(int), 767 .get = system_call_get, 768 .set = system_call_set, 769 }, 770 }; 771 772 static const struct user_regset_view user_aarch64_view = { 773 .name = "aarch64", .e_machine = EM_AARCH64, 774 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 775 }; 776 777 #ifdef CONFIG_COMPAT 778 #include <linux/compat.h> 779 780 enum compat_regset { 781 REGSET_COMPAT_GPR, 782 REGSET_COMPAT_VFP, 783 }; 784 785 static int compat_gpr_get(struct task_struct *target, 786 const struct user_regset *regset, 787 unsigned int pos, unsigned int count, 788 void *kbuf, void __user *ubuf) 789 { 790 int ret = 0; 791 unsigned int i, start, num_regs; 792 793 /* Calculate the number of AArch32 registers contained in count */ 794 num_regs = count / regset->size; 795 796 /* Convert pos into an register number */ 797 start = pos / regset->size; 798 799 if (start + num_regs > regset->n) 800 return -EIO; 801 802 for (i = 0; i < num_regs; ++i) { 803 unsigned int idx = start + i; 804 compat_ulong_t reg; 805 806 switch (idx) { 807 case 15: 808 reg = task_pt_regs(target)->pc; 809 break; 810 case 16: 811 reg = task_pt_regs(target)->pstate; 812 break; 813 case 17: 814 reg = task_pt_regs(target)->orig_x0; 815 break; 816 default: 817 reg = task_pt_regs(target)->regs[idx]; 818 } 819 820 if (kbuf) { 821 memcpy(kbuf, ®, sizeof(reg)); 822 kbuf += sizeof(reg); 823 } else { 824 ret = copy_to_user(ubuf, ®, sizeof(reg)); 825 if (ret) { 826 ret = -EFAULT; 827 break; 828 } 829 830 ubuf += sizeof(reg); 831 } 832 } 833 834 return ret; 835 } 836 837 static int compat_gpr_set(struct task_struct *target, 838 const struct user_regset *regset, 839 unsigned int pos, unsigned int count, 840 const void *kbuf, const void __user *ubuf) 841 { 842 struct pt_regs newregs; 843 int ret = 0; 844 unsigned int i, start, num_regs; 845 846 /* Calculate the number of AArch32 registers contained in count */ 847 num_regs = count / regset->size; 848 849 /* Convert pos into an register number */ 850 start = pos / regset->size; 851 852 if (start + num_regs > regset->n) 853 return -EIO; 854 855 newregs = *task_pt_regs(target); 856 857 for (i = 0; i < num_regs; ++i) { 858 unsigned int idx = start + i; 859 compat_ulong_t reg; 860 861 if (kbuf) { 862 memcpy(®, kbuf, sizeof(reg)); 863 kbuf += sizeof(reg); 864 } else { 865 ret = copy_from_user(®, ubuf, sizeof(reg)); 866 if (ret) { 867 ret = -EFAULT; 868 break; 869 } 870 871 ubuf += sizeof(reg); 872 } 873 874 switch (idx) { 875 case 15: 876 newregs.pc = reg; 877 break; 878 case 16: 879 newregs.pstate = reg; 880 break; 881 case 17: 882 newregs.orig_x0 = reg; 883 break; 884 default: 885 newregs.regs[idx] = reg; 886 } 887 888 } 889 890 if (valid_user_regs(&newregs.user_regs, target)) 891 *task_pt_regs(target) = newregs; 892 else 893 ret = -EINVAL; 894 895 return ret; 896 } 897 898 static int compat_vfp_get(struct task_struct *target, 899 const struct user_regset *regset, 900 unsigned int pos, unsigned int count, 901 void *kbuf, void __user *ubuf) 902 { 903 struct user_fpsimd_state *uregs; 904 compat_ulong_t fpscr; 905 int ret, vregs_end_pos; 906 907 uregs = &target->thread.fpsimd_state.user_fpsimd; 908 909 if (target == current) 910 fpsimd_preserve_current_state(); 911 912 /* 913 * The VFP registers are packed into the fpsimd_state, so they all sit 914 * nicely together for us. We just need to create the fpscr separately. 915 */ 916 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 917 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 918 0, vregs_end_pos); 919 920 if (count && !ret) { 921 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 922 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 923 924 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr, 925 vregs_end_pos, VFP_STATE_SIZE); 926 } 927 928 return ret; 929 } 930 931 static int compat_vfp_set(struct task_struct *target, 932 const struct user_regset *regset, 933 unsigned int pos, unsigned int count, 934 const void *kbuf, const void __user *ubuf) 935 { 936 struct user_fpsimd_state *uregs; 937 compat_ulong_t fpscr; 938 int ret, vregs_end_pos; 939 940 uregs = &target->thread.fpsimd_state.user_fpsimd; 941 942 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 943 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 944 vregs_end_pos); 945 946 if (count && !ret) { 947 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 948 vregs_end_pos, VFP_STATE_SIZE); 949 if (!ret) { 950 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 951 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 952 } 953 } 954 955 fpsimd_flush_task_state(target); 956 return ret; 957 } 958 959 static int compat_tls_get(struct task_struct *target, 960 const struct user_regset *regset, unsigned int pos, 961 unsigned int count, void *kbuf, void __user *ubuf) 962 { 963 compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value; 964 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 965 } 966 967 static int compat_tls_set(struct task_struct *target, 968 const struct user_regset *regset, unsigned int pos, 969 unsigned int count, const void *kbuf, 970 const void __user *ubuf) 971 { 972 int ret; 973 compat_ulong_t tls = target->thread.tp_value; 974 975 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 976 if (ret) 977 return ret; 978 979 target->thread.tp_value = tls; 980 return ret; 981 } 982 983 static const struct user_regset aarch32_regsets[] = { 984 [REGSET_COMPAT_GPR] = { 985 .core_note_type = NT_PRSTATUS, 986 .n = COMPAT_ELF_NGREG, 987 .size = sizeof(compat_elf_greg_t), 988 .align = sizeof(compat_elf_greg_t), 989 .get = compat_gpr_get, 990 .set = compat_gpr_set 991 }, 992 [REGSET_COMPAT_VFP] = { 993 .core_note_type = NT_ARM_VFP, 994 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 995 .size = sizeof(compat_ulong_t), 996 .align = sizeof(compat_ulong_t), 997 .get = compat_vfp_get, 998 .set = compat_vfp_set 999 }, 1000 }; 1001 1002 static const struct user_regset_view user_aarch32_view = { 1003 .name = "aarch32", .e_machine = EM_ARM, 1004 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1005 }; 1006 1007 static const struct user_regset aarch32_ptrace_regsets[] = { 1008 [REGSET_GPR] = { 1009 .core_note_type = NT_PRSTATUS, 1010 .n = COMPAT_ELF_NGREG, 1011 .size = sizeof(compat_elf_greg_t), 1012 .align = sizeof(compat_elf_greg_t), 1013 .get = compat_gpr_get, 1014 .set = compat_gpr_set 1015 }, 1016 [REGSET_FPR] = { 1017 .core_note_type = NT_ARM_VFP, 1018 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1019 .size = sizeof(compat_ulong_t), 1020 .align = sizeof(compat_ulong_t), 1021 .get = compat_vfp_get, 1022 .set = compat_vfp_set 1023 }, 1024 [REGSET_TLS] = { 1025 .core_note_type = NT_ARM_TLS, 1026 .n = 1, 1027 .size = sizeof(compat_ulong_t), 1028 .align = sizeof(compat_ulong_t), 1029 .get = compat_tls_get, 1030 .set = compat_tls_set, 1031 }, 1032 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1033 [REGSET_HW_BREAK] = { 1034 .core_note_type = NT_ARM_HW_BREAK, 1035 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1036 .size = sizeof(u32), 1037 .align = sizeof(u32), 1038 .get = hw_break_get, 1039 .set = hw_break_set, 1040 }, 1041 [REGSET_HW_WATCH] = { 1042 .core_note_type = NT_ARM_HW_WATCH, 1043 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1044 .size = sizeof(u32), 1045 .align = sizeof(u32), 1046 .get = hw_break_get, 1047 .set = hw_break_set, 1048 }, 1049 #endif 1050 [REGSET_SYSTEM_CALL] = { 1051 .core_note_type = NT_ARM_SYSTEM_CALL, 1052 .n = 1, 1053 .size = sizeof(int), 1054 .align = sizeof(int), 1055 .get = system_call_get, 1056 .set = system_call_set, 1057 }, 1058 }; 1059 1060 static const struct user_regset_view user_aarch32_ptrace_view = { 1061 .name = "aarch32", .e_machine = EM_ARM, 1062 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1063 }; 1064 1065 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1066 compat_ulong_t __user *ret) 1067 { 1068 compat_ulong_t tmp; 1069 1070 if (off & 3) 1071 return -EIO; 1072 1073 if (off == COMPAT_PT_TEXT_ADDR) 1074 tmp = tsk->mm->start_code; 1075 else if (off == COMPAT_PT_DATA_ADDR) 1076 tmp = tsk->mm->start_data; 1077 else if (off == COMPAT_PT_TEXT_END_ADDR) 1078 tmp = tsk->mm->end_code; 1079 else if (off < sizeof(compat_elf_gregset_t)) 1080 return copy_regset_to_user(tsk, &user_aarch32_view, 1081 REGSET_COMPAT_GPR, off, 1082 sizeof(compat_ulong_t), ret); 1083 else if (off >= COMPAT_USER_SZ) 1084 return -EIO; 1085 else 1086 tmp = 0; 1087 1088 return put_user(tmp, ret); 1089 } 1090 1091 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1092 compat_ulong_t val) 1093 { 1094 int ret; 1095 mm_segment_t old_fs = get_fs(); 1096 1097 if (off & 3 || off >= COMPAT_USER_SZ) 1098 return -EIO; 1099 1100 if (off >= sizeof(compat_elf_gregset_t)) 1101 return 0; 1102 1103 set_fs(KERNEL_DS); 1104 ret = copy_regset_from_user(tsk, &user_aarch32_view, 1105 REGSET_COMPAT_GPR, off, 1106 sizeof(compat_ulong_t), 1107 &val); 1108 set_fs(old_fs); 1109 1110 return ret; 1111 } 1112 1113 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1114 1115 /* 1116 * Convert a virtual register number into an index for a thread_info 1117 * breakpoint array. Breakpoints are identified using positive numbers 1118 * whilst watchpoints are negative. The registers are laid out as pairs 1119 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1120 * Register 0 is reserved for describing resource information. 1121 */ 1122 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1123 { 1124 return (abs(num) - 1) >> 1; 1125 } 1126 1127 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1128 { 1129 u8 num_brps, num_wrps, debug_arch, wp_len; 1130 u32 reg = 0; 1131 1132 num_brps = hw_breakpoint_slots(TYPE_INST); 1133 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1134 1135 debug_arch = debug_monitors_arch(); 1136 wp_len = 8; 1137 reg |= debug_arch; 1138 reg <<= 8; 1139 reg |= wp_len; 1140 reg <<= 8; 1141 reg |= num_wrps; 1142 reg <<= 8; 1143 reg |= num_brps; 1144 1145 *kdata = reg; 1146 return 0; 1147 } 1148 1149 static int compat_ptrace_hbp_get(unsigned int note_type, 1150 struct task_struct *tsk, 1151 compat_long_t num, 1152 u32 *kdata) 1153 { 1154 u64 addr = 0; 1155 u32 ctrl = 0; 1156 1157 int err, idx = compat_ptrace_hbp_num_to_idx(num);; 1158 1159 if (num & 1) { 1160 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1161 *kdata = (u32)addr; 1162 } else { 1163 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1164 *kdata = ctrl; 1165 } 1166 1167 return err; 1168 } 1169 1170 static int compat_ptrace_hbp_set(unsigned int note_type, 1171 struct task_struct *tsk, 1172 compat_long_t num, 1173 u32 *kdata) 1174 { 1175 u64 addr; 1176 u32 ctrl; 1177 1178 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1179 1180 if (num & 1) { 1181 addr = *kdata; 1182 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1183 } else { 1184 ctrl = *kdata; 1185 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1186 } 1187 1188 return err; 1189 } 1190 1191 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1192 compat_ulong_t __user *data) 1193 { 1194 int ret; 1195 u32 kdata; 1196 mm_segment_t old_fs = get_fs(); 1197 1198 set_fs(KERNEL_DS); 1199 /* Watchpoint */ 1200 if (num < 0) { 1201 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1202 /* Resource info */ 1203 } else if (num == 0) { 1204 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1205 /* Breakpoint */ 1206 } else { 1207 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1208 } 1209 set_fs(old_fs); 1210 1211 if (!ret) 1212 ret = put_user(kdata, data); 1213 1214 return ret; 1215 } 1216 1217 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1218 compat_ulong_t __user *data) 1219 { 1220 int ret; 1221 u32 kdata = 0; 1222 mm_segment_t old_fs = get_fs(); 1223 1224 if (num == 0) 1225 return 0; 1226 1227 ret = get_user(kdata, data); 1228 if (ret) 1229 return ret; 1230 1231 set_fs(KERNEL_DS); 1232 if (num < 0) 1233 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1234 else 1235 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1236 set_fs(old_fs); 1237 1238 return ret; 1239 } 1240 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1241 1242 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1243 compat_ulong_t caddr, compat_ulong_t cdata) 1244 { 1245 unsigned long addr = caddr; 1246 unsigned long data = cdata; 1247 void __user *datap = compat_ptr(data); 1248 int ret; 1249 1250 switch (request) { 1251 case PTRACE_PEEKUSR: 1252 ret = compat_ptrace_read_user(child, addr, datap); 1253 break; 1254 1255 case PTRACE_POKEUSR: 1256 ret = compat_ptrace_write_user(child, addr, data); 1257 break; 1258 1259 case COMPAT_PTRACE_GETREGS: 1260 ret = copy_regset_to_user(child, 1261 &user_aarch32_view, 1262 REGSET_COMPAT_GPR, 1263 0, sizeof(compat_elf_gregset_t), 1264 datap); 1265 break; 1266 1267 case COMPAT_PTRACE_SETREGS: 1268 ret = copy_regset_from_user(child, 1269 &user_aarch32_view, 1270 REGSET_COMPAT_GPR, 1271 0, sizeof(compat_elf_gregset_t), 1272 datap); 1273 break; 1274 1275 case COMPAT_PTRACE_GET_THREAD_AREA: 1276 ret = put_user((compat_ulong_t)child->thread.tp_value, 1277 (compat_ulong_t __user *)datap); 1278 break; 1279 1280 case COMPAT_PTRACE_SET_SYSCALL: 1281 task_pt_regs(child)->syscallno = data; 1282 ret = 0; 1283 break; 1284 1285 case COMPAT_PTRACE_GETVFPREGS: 1286 ret = copy_regset_to_user(child, 1287 &user_aarch32_view, 1288 REGSET_COMPAT_VFP, 1289 0, VFP_STATE_SIZE, 1290 datap); 1291 break; 1292 1293 case COMPAT_PTRACE_SETVFPREGS: 1294 ret = copy_regset_from_user(child, 1295 &user_aarch32_view, 1296 REGSET_COMPAT_VFP, 1297 0, VFP_STATE_SIZE, 1298 datap); 1299 break; 1300 1301 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1302 case COMPAT_PTRACE_GETHBPREGS: 1303 ret = compat_ptrace_gethbpregs(child, addr, datap); 1304 break; 1305 1306 case COMPAT_PTRACE_SETHBPREGS: 1307 ret = compat_ptrace_sethbpregs(child, addr, datap); 1308 break; 1309 #endif 1310 1311 default: 1312 ret = compat_ptrace_request(child, request, addr, 1313 data); 1314 break; 1315 } 1316 1317 return ret; 1318 } 1319 #endif /* CONFIG_COMPAT */ 1320 1321 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1322 { 1323 #ifdef CONFIG_COMPAT 1324 /* 1325 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1326 * user_aarch32_view compatible with arm32. Native ptrace requests on 1327 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1328 * access to the TLS register. 1329 */ 1330 if (is_compat_task()) 1331 return &user_aarch32_view; 1332 else if (is_compat_thread(task_thread_info(task))) 1333 return &user_aarch32_ptrace_view; 1334 #endif 1335 return &user_aarch64_view; 1336 } 1337 1338 long arch_ptrace(struct task_struct *child, long request, 1339 unsigned long addr, unsigned long data) 1340 { 1341 return ptrace_request(child, request, addr, data); 1342 } 1343 1344 enum ptrace_syscall_dir { 1345 PTRACE_SYSCALL_ENTER = 0, 1346 PTRACE_SYSCALL_EXIT, 1347 }; 1348 1349 static void tracehook_report_syscall(struct pt_regs *regs, 1350 enum ptrace_syscall_dir dir) 1351 { 1352 int regno; 1353 unsigned long saved_reg; 1354 1355 /* 1356 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1357 * used to denote syscall entry/exit: 1358 */ 1359 regno = (is_compat_task() ? 12 : 7); 1360 saved_reg = regs->regs[regno]; 1361 regs->regs[regno] = dir; 1362 1363 if (dir == PTRACE_SYSCALL_EXIT) 1364 tracehook_report_syscall_exit(regs, 0); 1365 else if (tracehook_report_syscall_entry(regs)) 1366 regs->syscallno = ~0UL; 1367 1368 regs->regs[regno] = saved_reg; 1369 } 1370 1371 asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1372 { 1373 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1374 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1375 1376 /* Do the secure computing after ptrace; failures should be fast. */ 1377 if (secure_computing(NULL) == -1) 1378 return -1; 1379 1380 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1381 trace_sys_enter(regs, regs->syscallno); 1382 1383 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1384 regs->regs[2], regs->regs[3]); 1385 1386 return regs->syscallno; 1387 } 1388 1389 asmlinkage void syscall_trace_exit(struct pt_regs *regs) 1390 { 1391 audit_syscall_exit(regs); 1392 1393 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1394 trace_sys_exit(regs, regs_return_value(regs)); 1395 1396 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1397 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1398 } 1399 1400 /* 1401 * Bits which are always architecturally RES0 per ARM DDI 0487A.h 1402 * Userspace cannot use these until they have an architectural meaning. 1403 * We also reserve IL for the kernel; SS is handled dynamically. 1404 */ 1405 #define SPSR_EL1_AARCH64_RES0_BITS \ 1406 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \ 1407 GENMASK_ULL(5, 5)) 1408 #define SPSR_EL1_AARCH32_RES0_BITS \ 1409 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20)) 1410 1411 static int valid_compat_regs(struct user_pt_regs *regs) 1412 { 1413 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1414 1415 if (!system_supports_mixed_endian_el0()) { 1416 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1417 regs->pstate |= COMPAT_PSR_E_BIT; 1418 else 1419 regs->pstate &= ~COMPAT_PSR_E_BIT; 1420 } 1421 1422 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1423 (regs->pstate & COMPAT_PSR_A_BIT) == 0 && 1424 (regs->pstate & COMPAT_PSR_I_BIT) == 0 && 1425 (regs->pstate & COMPAT_PSR_F_BIT) == 0) { 1426 return 1; 1427 } 1428 1429 /* 1430 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1431 * arch/arm. 1432 */ 1433 regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT | 1434 COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT | 1435 COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK | 1436 COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT | 1437 COMPAT_PSR_T_BIT; 1438 regs->pstate |= PSR_MODE32_BIT; 1439 1440 return 0; 1441 } 1442 1443 static int valid_native_regs(struct user_pt_regs *regs) 1444 { 1445 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1446 1447 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1448 (regs->pstate & PSR_D_BIT) == 0 && 1449 (regs->pstate & PSR_A_BIT) == 0 && 1450 (regs->pstate & PSR_I_BIT) == 0 && 1451 (regs->pstate & PSR_F_BIT) == 0) { 1452 return 1; 1453 } 1454 1455 /* Force PSR to a valid 64-bit EL0t */ 1456 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1457 1458 return 0; 1459 } 1460 1461 /* 1462 * Are the current registers suitable for user mode? (used to maintain 1463 * security in signal handlers) 1464 */ 1465 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1466 { 1467 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) 1468 regs->pstate &= ~DBG_SPSR_SS; 1469 1470 if (is_compat_thread(task_thread_info(task))) 1471 return valid_compat_regs(regs); 1472 else 1473 return valid_native_regs(regs); 1474 } 1475