1 /* 2 * Ptrace user space interface. 3 * 4 * Copyright IBM Corp. 1999, 2010 5 * Author(s): Denis Joseph Barrow 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/mm.h> 12 #include <linux/smp.h> 13 #include <linux/errno.h> 14 #include <linux/ptrace.h> 15 #include <linux/user.h> 16 #include <linux/security.h> 17 #include <linux/audit.h> 18 #include <linux/signal.h> 19 #include <linux/elf.h> 20 #include <linux/regset.h> 21 #include <linux/tracehook.h> 22 #include <linux/seccomp.h> 23 #include <linux/compat.h> 24 #include <trace/syscall.h> 25 #include <asm/segment.h> 26 #include <asm/page.h> 27 #include <asm/pgtable.h> 28 #include <asm/pgalloc.h> 29 #include <asm/uaccess.h> 30 #include <asm/unistd.h> 31 #include <asm/switch_to.h> 32 #include "entry.h" 33 34 #ifdef CONFIG_COMPAT 35 #include "compat_ptrace.h" 36 #endif 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/syscalls.h> 40 41 enum s390_regset { 42 REGSET_GENERAL, 43 REGSET_FP, 44 REGSET_LAST_BREAK, 45 REGSET_TDB, 46 REGSET_SYSTEM_CALL, 47 REGSET_GENERAL_EXTENDED, 48 }; 49 50 void update_per_regs(struct task_struct *task) 51 { 52 struct pt_regs *regs = task_pt_regs(task); 53 struct thread_struct *thread = &task->thread; 54 struct per_regs old, new; 55 56 #ifdef CONFIG_64BIT 57 /* Take care of the enable/disable of transactional execution. */ 58 if (MACHINE_HAS_TE) { 59 unsigned long cr0, cr0_new; 60 61 __ctl_store(cr0, 0, 0); 62 /* set or clear transaction execution bits 8 and 9. */ 63 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 cr0_new = cr0 & ~(3UL << 54); 65 else 66 cr0_new = cr0 | (3UL << 54); 67 /* Only load control register 0 if necessary. */ 68 if (cr0 != cr0_new) 69 __ctl_load(cr0_new, 0, 0); 70 } 71 #endif 72 /* Copy user specified PER registers */ 73 new.control = thread->per_user.control; 74 new.start = thread->per_user.start; 75 new.end = thread->per_user.end; 76 77 /* merge TIF_SINGLE_STEP into user specified PER registers. */ 78 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { 79 new.control |= PER_EVENT_IFETCH; 80 #ifdef CONFIG_64BIT 81 new.control |= PER_CONTROL_SUSPENSION; 82 new.control |= PER_EVENT_TRANSACTION_END; 83 #endif 84 new.start = 0; 85 new.end = PSW_ADDR_INSN; 86 } 87 88 /* Take care of the PER enablement bit in the PSW. */ 89 if (!(new.control & PER_EVENT_MASK)) { 90 regs->psw.mask &= ~PSW_MASK_PER; 91 return; 92 } 93 regs->psw.mask |= PSW_MASK_PER; 94 __ctl_store(old, 9, 11); 95 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0) 96 __ctl_load(new, 9, 11); 97 } 98 99 void user_enable_single_step(struct task_struct *task) 100 { 101 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 102 if (task == current) 103 update_per_regs(task); 104 } 105 106 void user_disable_single_step(struct task_struct *task) 107 { 108 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 109 if (task == current) 110 update_per_regs(task); 111 } 112 113 /* 114 * Called by kernel/ptrace.c when detaching.. 115 * 116 * Clear all debugging related fields. 117 */ 118 void ptrace_disable(struct task_struct *task) 119 { 120 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); 121 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); 122 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 123 clear_tsk_thread_flag(task, TIF_PER_TRAP); 124 task->thread.per_flags = 0; 125 } 126 127 #ifndef CONFIG_64BIT 128 # define __ADDR_MASK 3 129 #else 130 # define __ADDR_MASK 7 131 #endif 132 133 static inline unsigned long __peek_user_per(struct task_struct *child, 134 addr_t addr) 135 { 136 struct per_struct_kernel *dummy = NULL; 137 138 if (addr == (addr_t) &dummy->cr9) 139 /* Control bits of the active per set. */ 140 return test_thread_flag(TIF_SINGLE_STEP) ? 141 PER_EVENT_IFETCH : child->thread.per_user.control; 142 else if (addr == (addr_t) &dummy->cr10) 143 /* Start address of the active per set. */ 144 return test_thread_flag(TIF_SINGLE_STEP) ? 145 0 : child->thread.per_user.start; 146 else if (addr == (addr_t) &dummy->cr11) 147 /* End address of the active per set. */ 148 return test_thread_flag(TIF_SINGLE_STEP) ? 149 PSW_ADDR_INSN : child->thread.per_user.end; 150 else if (addr == (addr_t) &dummy->bits) 151 /* Single-step bit. */ 152 return test_thread_flag(TIF_SINGLE_STEP) ? 153 (1UL << (BITS_PER_LONG - 1)) : 0; 154 else if (addr == (addr_t) &dummy->starting_addr) 155 /* Start address of the user specified per set. */ 156 return child->thread.per_user.start; 157 else if (addr == (addr_t) &dummy->ending_addr) 158 /* End address of the user specified per set. */ 159 return child->thread.per_user.end; 160 else if (addr == (addr_t) &dummy->perc_atmid) 161 /* PER code, ATMID and AI of the last PER trap */ 162 return (unsigned long) 163 child->thread.per_event.cause << (BITS_PER_LONG - 16); 164 else if (addr == (addr_t) &dummy->address) 165 /* Address of the last PER trap */ 166 return child->thread.per_event.address; 167 else if (addr == (addr_t) &dummy->access_id) 168 /* Access id of the last PER trap */ 169 return (unsigned long) 170 child->thread.per_event.paid << (BITS_PER_LONG - 8); 171 return 0; 172 } 173 174 /* 175 * Read the word at offset addr from the user area of a process. The 176 * trouble here is that the information is littered over different 177 * locations. The process registers are found on the kernel stack, 178 * the floating point stuff and the trace settings are stored in 179 * the task structure. In addition the different structures in 180 * struct user contain pad bytes that should be read as zeroes. 181 * Lovely... 182 */ 183 static unsigned long __peek_user(struct task_struct *child, addr_t addr) 184 { 185 struct user *dummy = NULL; 186 addr_t offset, tmp; 187 188 if (addr < (addr_t) &dummy->regs.acrs) { 189 /* 190 * psw and gprs are stored on the stack 191 */ 192 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 193 if (addr == (addr_t) &dummy->regs.psw.mask) 194 /* Return a clean psw mask. */ 195 tmp = psw_user_bits | (tmp & PSW_MASK_USER); 196 197 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 198 /* 199 * access registers are stored in the thread structure 200 */ 201 offset = addr - (addr_t) &dummy->regs.acrs; 202 #ifdef CONFIG_64BIT 203 /* 204 * Very special case: old & broken 64 bit gdb reading 205 * from acrs[15]. Result is a 64 bit value. Read the 206 * 32 bit acrs[15] value and shift it by 32. Sick... 207 */ 208 if (addr == (addr_t) &dummy->regs.acrs[15]) 209 tmp = ((unsigned long) child->thread.acrs[15]) << 32; 210 else 211 #endif 212 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); 213 214 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 215 /* 216 * orig_gpr2 is stored on the kernel stack 217 */ 218 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; 219 220 } else if (addr < (addr_t) &dummy->regs.fp_regs) { 221 /* 222 * prevent reads of padding hole between 223 * orig_gpr2 and fp_regs on s390. 224 */ 225 tmp = 0; 226 227 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 228 /* 229 * floating point regs. are stored in the thread structure 230 */ 231 offset = addr - (addr_t) &dummy->regs.fp_regs; 232 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); 233 if (addr == (addr_t) &dummy->regs.fp_regs.fpc) 234 tmp &= (unsigned long) FPC_VALID_MASK 235 << (BITS_PER_LONG - 32); 236 237 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 238 /* 239 * Handle access to the per_info structure. 240 */ 241 addr -= (addr_t) &dummy->regs.per_info; 242 tmp = __peek_user_per(child, addr); 243 244 } else 245 tmp = 0; 246 247 return tmp; 248 } 249 250 static int 251 peek_user(struct task_struct *child, addr_t addr, addr_t data) 252 { 253 addr_t tmp, mask; 254 255 /* 256 * Stupid gdb peeks/pokes the access registers in 64 bit with 257 * an alignment of 4. Programmers from hell... 258 */ 259 mask = __ADDR_MASK; 260 #ifdef CONFIG_64BIT 261 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 262 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 263 mask = 3; 264 #endif 265 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 266 return -EIO; 267 268 tmp = __peek_user(child, addr); 269 return put_user(tmp, (addr_t __user *) data); 270 } 271 272 static inline void __poke_user_per(struct task_struct *child, 273 addr_t addr, addr_t data) 274 { 275 struct per_struct_kernel *dummy = NULL; 276 277 /* 278 * There are only three fields in the per_info struct that the 279 * debugger user can write to. 280 * 1) cr9: the debugger wants to set a new PER event mask 281 * 2) starting_addr: the debugger wants to set a new starting 282 * address to use with the PER event mask. 283 * 3) ending_addr: the debugger wants to set a new ending 284 * address to use with the PER event mask. 285 * The user specified PER event mask and the start and end 286 * addresses are used only if single stepping is not in effect. 287 * Writes to any other field in per_info are ignored. 288 */ 289 if (addr == (addr_t) &dummy->cr9) 290 /* PER event mask of the user specified per set. */ 291 child->thread.per_user.control = 292 data & (PER_EVENT_MASK | PER_CONTROL_MASK); 293 else if (addr == (addr_t) &dummy->starting_addr) 294 /* Starting address of the user specified per set. */ 295 child->thread.per_user.start = data; 296 else if (addr == (addr_t) &dummy->ending_addr) 297 /* Ending address of the user specified per set. */ 298 child->thread.per_user.end = data; 299 } 300 301 /* 302 * Write a word to the user area of a process at location addr. This 303 * operation does have an additional problem compared to peek_user. 304 * Stores to the program status word and on the floating point 305 * control register needs to get checked for validity. 306 */ 307 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) 308 { 309 struct user *dummy = NULL; 310 addr_t offset; 311 312 if (addr < (addr_t) &dummy->regs.acrs) { 313 /* 314 * psw and gprs are stored on the stack 315 */ 316 if (addr == (addr_t) &dummy->regs.psw.mask && 317 ((data & ~PSW_MASK_USER) != psw_user_bits || 318 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) 319 /* Invalid psw mask. */ 320 return -EINVAL; 321 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 322 323 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 324 /* 325 * access registers are stored in the thread structure 326 */ 327 offset = addr - (addr_t) &dummy->regs.acrs; 328 #ifdef CONFIG_64BIT 329 /* 330 * Very special case: old & broken 64 bit gdb writing 331 * to acrs[15] with a 64 bit value. Ignore the lower 332 * half of the value and write the upper 32 bit to 333 * acrs[15]. Sick... 334 */ 335 if (addr == (addr_t) &dummy->regs.acrs[15]) 336 child->thread.acrs[15] = (unsigned int) (data >> 32); 337 else 338 #endif 339 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; 340 341 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 342 /* 343 * orig_gpr2 is stored on the kernel stack 344 */ 345 task_pt_regs(child)->orig_gpr2 = data; 346 347 } else if (addr < (addr_t) &dummy->regs.fp_regs) { 348 /* 349 * prevent writes of padding hole between 350 * orig_gpr2 and fp_regs on s390. 351 */ 352 return 0; 353 354 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 355 /* 356 * floating point regs. are stored in the thread structure 357 */ 358 if (addr == (addr_t) &dummy->regs.fp_regs.fpc && 359 (data & ~((unsigned long) FPC_VALID_MASK 360 << (BITS_PER_LONG - 32))) != 0) 361 return -EINVAL; 362 offset = addr - (addr_t) &dummy->regs.fp_regs; 363 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; 364 365 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 366 /* 367 * Handle access to the per_info structure. 368 */ 369 addr -= (addr_t) &dummy->regs.per_info; 370 __poke_user_per(child, addr, data); 371 372 } 373 374 return 0; 375 } 376 377 static int poke_user(struct task_struct *child, addr_t addr, addr_t data) 378 { 379 addr_t mask; 380 381 /* 382 * Stupid gdb peeks/pokes the access registers in 64 bit with 383 * an alignment of 4. Programmers from hell indeed... 384 */ 385 mask = __ADDR_MASK; 386 #ifdef CONFIG_64BIT 387 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 388 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 389 mask = 3; 390 #endif 391 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 392 return -EIO; 393 394 return __poke_user(child, addr, data); 395 } 396 397 long arch_ptrace(struct task_struct *child, long request, 398 unsigned long addr, unsigned long data) 399 { 400 ptrace_area parea; 401 int copied, ret; 402 403 switch (request) { 404 case PTRACE_PEEKUSR: 405 /* read the word at location addr in the USER area. */ 406 return peek_user(child, addr, data); 407 408 case PTRACE_POKEUSR: 409 /* write the word at location addr in the USER area */ 410 return poke_user(child, addr, data); 411 412 case PTRACE_PEEKUSR_AREA: 413 case PTRACE_POKEUSR_AREA: 414 if (copy_from_user(&parea, (void __force __user *) addr, 415 sizeof(parea))) 416 return -EFAULT; 417 addr = parea.kernel_addr; 418 data = parea.process_addr; 419 copied = 0; 420 while (copied < parea.len) { 421 if (request == PTRACE_PEEKUSR_AREA) 422 ret = peek_user(child, addr, data); 423 else { 424 addr_t utmp; 425 if (get_user(utmp, 426 (addr_t __force __user *) data)) 427 return -EFAULT; 428 ret = poke_user(child, addr, utmp); 429 } 430 if (ret) 431 return ret; 432 addr += sizeof(unsigned long); 433 data += sizeof(unsigned long); 434 copied += sizeof(unsigned long); 435 } 436 return 0; 437 case PTRACE_GET_LAST_BREAK: 438 put_user(task_thread_info(child)->last_break, 439 (unsigned long __user *) data); 440 return 0; 441 case PTRACE_ENABLE_TE: 442 if (!MACHINE_HAS_TE) 443 return -EIO; 444 child->thread.per_flags &= ~PER_FLAG_NO_TE; 445 return 0; 446 case PTRACE_DISABLE_TE: 447 if (!MACHINE_HAS_TE) 448 return -EIO; 449 child->thread.per_flags |= PER_FLAG_NO_TE; 450 return 0; 451 default: 452 /* Removing high order bit from addr (only for 31 bit). */ 453 addr &= PSW_ADDR_INSN; 454 return ptrace_request(child, request, addr, data); 455 } 456 } 457 458 #ifdef CONFIG_COMPAT 459 /* 460 * Now the fun part starts... a 31 bit program running in the 461 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, 462 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy 463 * to handle, the difference to the 64 bit versions of the requests 464 * is that the access is done in multiples of 4 byte instead of 465 * 8 bytes (sizeof(unsigned long) on 31/64 bit). 466 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA, 467 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program 468 * is a 31 bit program too, the content of struct user can be 469 * emulated. A 31 bit program peeking into the struct user of 470 * a 64 bit program is a no-no. 471 */ 472 473 /* 474 * Same as peek_user_per but for a 31 bit program. 475 */ 476 static inline __u32 __peek_user_per_compat(struct task_struct *child, 477 addr_t addr) 478 { 479 struct compat_per_struct_kernel *dummy32 = NULL; 480 481 if (addr == (addr_t) &dummy32->cr9) 482 /* Control bits of the active per set. */ 483 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 484 PER_EVENT_IFETCH : child->thread.per_user.control; 485 else if (addr == (addr_t) &dummy32->cr10) 486 /* Start address of the active per set. */ 487 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 488 0 : child->thread.per_user.start; 489 else if (addr == (addr_t) &dummy32->cr11) 490 /* End address of the active per set. */ 491 return test_thread_flag(TIF_SINGLE_STEP) ? 492 PSW32_ADDR_INSN : child->thread.per_user.end; 493 else if (addr == (addr_t) &dummy32->bits) 494 /* Single-step bit. */ 495 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 496 0x80000000 : 0; 497 else if (addr == (addr_t) &dummy32->starting_addr) 498 /* Start address of the user specified per set. */ 499 return (__u32) child->thread.per_user.start; 500 else if (addr == (addr_t) &dummy32->ending_addr) 501 /* End address of the user specified per set. */ 502 return (__u32) child->thread.per_user.end; 503 else if (addr == (addr_t) &dummy32->perc_atmid) 504 /* PER code, ATMID and AI of the last PER trap */ 505 return (__u32) child->thread.per_event.cause << 16; 506 else if (addr == (addr_t) &dummy32->address) 507 /* Address of the last PER trap */ 508 return (__u32) child->thread.per_event.address; 509 else if (addr == (addr_t) &dummy32->access_id) 510 /* Access id of the last PER trap */ 511 return (__u32) child->thread.per_event.paid << 24; 512 return 0; 513 } 514 515 /* 516 * Same as peek_user but for a 31 bit program. 517 */ 518 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 519 { 520 struct compat_user *dummy32 = NULL; 521 addr_t offset; 522 __u32 tmp; 523 524 if (addr < (addr_t) &dummy32->regs.acrs) { 525 struct pt_regs *regs = task_pt_regs(child); 526 /* 527 * psw and gprs are stored on the stack 528 */ 529 if (addr == (addr_t) &dummy32->regs.psw.mask) { 530 /* Fake a 31 bit psw mask. */ 531 tmp = (__u32)(regs->psw.mask >> 32); 532 tmp = psw32_user_bits | (tmp & PSW32_MASK_USER); 533 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 534 /* Fake a 31 bit psw address. */ 535 tmp = (__u32) regs->psw.addr | 536 (__u32)(regs->psw.mask & PSW_MASK_BA); 537 } else { 538 /* gpr 0-15 */ 539 tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4); 540 } 541 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 542 /* 543 * access registers are stored in the thread structure 544 */ 545 offset = addr - (addr_t) &dummy32->regs.acrs; 546 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); 547 548 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { 549 /* 550 * orig_gpr2 is stored on the kernel stack 551 */ 552 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); 553 554 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { 555 /* 556 * prevent reads of padding hole between 557 * orig_gpr2 and fp_regs on s390. 558 */ 559 tmp = 0; 560 561 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 562 /* 563 * floating point regs. are stored in the thread structure 564 */ 565 offset = addr - (addr_t) &dummy32->regs.fp_regs; 566 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); 567 568 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 569 /* 570 * Handle access to the per_info structure. 571 */ 572 addr -= (addr_t) &dummy32->regs.per_info; 573 tmp = __peek_user_per_compat(child, addr); 574 575 } else 576 tmp = 0; 577 578 return tmp; 579 } 580 581 static int peek_user_compat(struct task_struct *child, 582 addr_t addr, addr_t data) 583 { 584 __u32 tmp; 585 586 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) 587 return -EIO; 588 589 tmp = __peek_user_compat(child, addr); 590 return put_user(tmp, (__u32 __user *) data); 591 } 592 593 /* 594 * Same as poke_user_per but for a 31 bit program. 595 */ 596 static inline void __poke_user_per_compat(struct task_struct *child, 597 addr_t addr, __u32 data) 598 { 599 struct compat_per_struct_kernel *dummy32 = NULL; 600 601 if (addr == (addr_t) &dummy32->cr9) 602 /* PER event mask of the user specified per set. */ 603 child->thread.per_user.control = 604 data & (PER_EVENT_MASK | PER_CONTROL_MASK); 605 else if (addr == (addr_t) &dummy32->starting_addr) 606 /* Starting address of the user specified per set. */ 607 child->thread.per_user.start = data; 608 else if (addr == (addr_t) &dummy32->ending_addr) 609 /* Ending address of the user specified per set. */ 610 child->thread.per_user.end = data; 611 } 612 613 /* 614 * Same as poke_user but for a 31 bit program. 615 */ 616 static int __poke_user_compat(struct task_struct *child, 617 addr_t addr, addr_t data) 618 { 619 struct compat_user *dummy32 = NULL; 620 __u32 tmp = (__u32) data; 621 addr_t offset; 622 623 if (addr < (addr_t) &dummy32->regs.acrs) { 624 struct pt_regs *regs = task_pt_regs(child); 625 /* 626 * psw, gprs, acrs and orig_gpr2 are stored on the stack 627 */ 628 if (addr == (addr_t) &dummy32->regs.psw.mask) { 629 /* Build a 64 bit psw mask from 31 bit mask. */ 630 if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits) 631 /* Invalid psw mask. */ 632 return -EINVAL; 633 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 634 (regs->psw.mask & PSW_MASK_BA) | 635 (__u64)(tmp & PSW32_MASK_USER) << 32; 636 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 637 /* Build a 64 bit psw address from 31 bit address. */ 638 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; 639 /* Transfer 31 bit amode bit to psw mask. */ 640 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | 641 (__u64)(tmp & PSW32_ADDR_AMODE); 642 } else { 643 /* gpr 0-15 */ 644 *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; 645 } 646 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 647 /* 648 * access registers are stored in the thread structure 649 */ 650 offset = addr - (addr_t) &dummy32->regs.acrs; 651 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; 652 653 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { 654 /* 655 * orig_gpr2 is stored on the kernel stack 656 */ 657 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; 658 659 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { 660 /* 661 * prevent writess of padding hole between 662 * orig_gpr2 and fp_regs on s390. 663 */ 664 return 0; 665 666 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 667 /* 668 * floating point regs. are stored in the thread structure 669 */ 670 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && 671 (tmp & ~FPC_VALID_MASK) != 0) 672 /* Invalid floating point control. */ 673 return -EINVAL; 674 offset = addr - (addr_t) &dummy32->regs.fp_regs; 675 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; 676 677 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 678 /* 679 * Handle access to the per_info structure. 680 */ 681 addr -= (addr_t) &dummy32->regs.per_info; 682 __poke_user_per_compat(child, addr, data); 683 } 684 685 return 0; 686 } 687 688 static int poke_user_compat(struct task_struct *child, 689 addr_t addr, addr_t data) 690 { 691 if (!is_compat_task() || (addr & 3) || 692 addr > sizeof(struct compat_user) - 3) 693 return -EIO; 694 695 return __poke_user_compat(child, addr, data); 696 } 697 698 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 699 compat_ulong_t caddr, compat_ulong_t cdata) 700 { 701 unsigned long addr = caddr; 702 unsigned long data = cdata; 703 compat_ptrace_area parea; 704 int copied, ret; 705 706 switch (request) { 707 case PTRACE_PEEKUSR: 708 /* read the word at location addr in the USER area. */ 709 return peek_user_compat(child, addr, data); 710 711 case PTRACE_POKEUSR: 712 /* write the word at location addr in the USER area */ 713 return poke_user_compat(child, addr, data); 714 715 case PTRACE_PEEKUSR_AREA: 716 case PTRACE_POKEUSR_AREA: 717 if (copy_from_user(&parea, (void __force __user *) addr, 718 sizeof(parea))) 719 return -EFAULT; 720 addr = parea.kernel_addr; 721 data = parea.process_addr; 722 copied = 0; 723 while (copied < parea.len) { 724 if (request == PTRACE_PEEKUSR_AREA) 725 ret = peek_user_compat(child, addr, data); 726 else { 727 __u32 utmp; 728 if (get_user(utmp, 729 (__u32 __force __user *) data)) 730 return -EFAULT; 731 ret = poke_user_compat(child, addr, utmp); 732 } 733 if (ret) 734 return ret; 735 addr += sizeof(unsigned int); 736 data += sizeof(unsigned int); 737 copied += sizeof(unsigned int); 738 } 739 return 0; 740 case PTRACE_GET_LAST_BREAK: 741 put_user(task_thread_info(child)->last_break, 742 (unsigned int __user *) data); 743 return 0; 744 } 745 return compat_ptrace_request(child, request, addr, data); 746 } 747 #endif 748 749 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 750 { 751 long ret = 0; 752 753 /* Do the secure computing check first. */ 754 if (secure_computing(regs->gprs[2])) { 755 /* seccomp failures shouldn't expose any additional code. */ 756 ret = -1; 757 goto out; 758 } 759 760 /* 761 * The sysc_tracesys code in entry.S stored the system 762 * call number to gprs[2]. 763 */ 764 if (test_thread_flag(TIF_SYSCALL_TRACE) && 765 (tracehook_report_syscall_entry(regs) || 766 regs->gprs[2] >= NR_syscalls)) { 767 /* 768 * Tracing decided this syscall should not happen or the 769 * debugger stored an invalid system call number. Skip 770 * the system call and the system call restart handling. 771 */ 772 clear_thread_flag(TIF_SYSCALL); 773 ret = -1; 774 } 775 776 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 777 trace_sys_enter(regs, regs->gprs[2]); 778 779 audit_syscall_entry(is_compat_task() ? 780 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, 781 regs->gprs[2], regs->orig_gpr2, 782 regs->gprs[3], regs->gprs[4], 783 regs->gprs[5]); 784 out: 785 return ret ?: regs->gprs[2]; 786 } 787 788 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) 789 { 790 audit_syscall_exit(regs); 791 792 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 793 trace_sys_exit(regs, regs->gprs[2]); 794 795 if (test_thread_flag(TIF_SYSCALL_TRACE)) 796 tracehook_report_syscall_exit(regs, 0); 797 } 798 799 /* 800 * user_regset definitions. 801 */ 802 803 static int s390_regs_get(struct task_struct *target, 804 const struct user_regset *regset, 805 unsigned int pos, unsigned int count, 806 void *kbuf, void __user *ubuf) 807 { 808 if (target == current) 809 save_access_regs(target->thread.acrs); 810 811 if (kbuf) { 812 unsigned long *k = kbuf; 813 while (count > 0) { 814 *k++ = __peek_user(target, pos); 815 count -= sizeof(*k); 816 pos += sizeof(*k); 817 } 818 } else { 819 unsigned long __user *u = ubuf; 820 while (count > 0) { 821 if (__put_user(__peek_user(target, pos), u++)) 822 return -EFAULT; 823 count -= sizeof(*u); 824 pos += sizeof(*u); 825 } 826 } 827 return 0; 828 } 829 830 static int s390_regs_set(struct task_struct *target, 831 const struct user_regset *regset, 832 unsigned int pos, unsigned int count, 833 const void *kbuf, const void __user *ubuf) 834 { 835 int rc = 0; 836 837 if (target == current) 838 save_access_regs(target->thread.acrs); 839 840 if (kbuf) { 841 const unsigned long *k = kbuf; 842 while (count > 0 && !rc) { 843 rc = __poke_user(target, pos, *k++); 844 count -= sizeof(*k); 845 pos += sizeof(*k); 846 } 847 } else { 848 const unsigned long __user *u = ubuf; 849 while (count > 0 && !rc) { 850 unsigned long word; 851 rc = __get_user(word, u++); 852 if (rc) 853 break; 854 rc = __poke_user(target, pos, word); 855 count -= sizeof(*u); 856 pos += sizeof(*u); 857 } 858 } 859 860 if (rc == 0 && target == current) 861 restore_access_regs(target->thread.acrs); 862 863 return rc; 864 } 865 866 static int s390_fpregs_get(struct task_struct *target, 867 const struct user_regset *regset, unsigned int pos, 868 unsigned int count, void *kbuf, void __user *ubuf) 869 { 870 if (target == current) 871 save_fp_regs(&target->thread.fp_regs); 872 873 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 874 &target->thread.fp_regs, 0, -1); 875 } 876 877 static int s390_fpregs_set(struct task_struct *target, 878 const struct user_regset *regset, unsigned int pos, 879 unsigned int count, const void *kbuf, 880 const void __user *ubuf) 881 { 882 int rc = 0; 883 884 if (target == current) 885 save_fp_regs(&target->thread.fp_regs); 886 887 /* If setting FPC, must validate it first. */ 888 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 889 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 }; 890 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc, 891 0, offsetof(s390_fp_regs, fprs)); 892 if (rc) 893 return rc; 894 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0) 895 return -EINVAL; 896 target->thread.fp_regs.fpc = fpc[0]; 897 } 898 899 if (rc == 0 && count > 0) 900 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 901 target->thread.fp_regs.fprs, 902 offsetof(s390_fp_regs, fprs), -1); 903 904 if (rc == 0 && target == current) 905 restore_fp_regs(&target->thread.fp_regs); 906 907 return rc; 908 } 909 910 #ifdef CONFIG_64BIT 911 912 static int s390_last_break_get(struct task_struct *target, 913 const struct user_regset *regset, 914 unsigned int pos, unsigned int count, 915 void *kbuf, void __user *ubuf) 916 { 917 if (count > 0) { 918 if (kbuf) { 919 unsigned long *k = kbuf; 920 *k = task_thread_info(target)->last_break; 921 } else { 922 unsigned long __user *u = ubuf; 923 if (__put_user(task_thread_info(target)->last_break, u)) 924 return -EFAULT; 925 } 926 } 927 return 0; 928 } 929 930 static int s390_last_break_set(struct task_struct *target, 931 const struct user_regset *regset, 932 unsigned int pos, unsigned int count, 933 const void *kbuf, const void __user *ubuf) 934 { 935 return 0; 936 } 937 938 static int s390_tdb_get(struct task_struct *target, 939 const struct user_regset *regset, 940 unsigned int pos, unsigned int count, 941 void *kbuf, void __user *ubuf) 942 { 943 struct pt_regs *regs = task_pt_regs(target); 944 unsigned char *data; 945 946 if (!(regs->int_code & 0x200)) 947 return -ENODATA; 948 data = target->thread.trap_tdb; 949 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256); 950 } 951 952 static int s390_tdb_set(struct task_struct *target, 953 const struct user_regset *regset, 954 unsigned int pos, unsigned int count, 955 const void *kbuf, const void __user *ubuf) 956 { 957 return 0; 958 } 959 960 #endif 961 962 static int s390_system_call_get(struct task_struct *target, 963 const struct user_regset *regset, 964 unsigned int pos, unsigned int count, 965 void *kbuf, void __user *ubuf) 966 { 967 unsigned int *data = &task_thread_info(target)->system_call; 968 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 969 data, 0, sizeof(unsigned int)); 970 } 971 972 static int s390_system_call_set(struct task_struct *target, 973 const struct user_regset *regset, 974 unsigned int pos, unsigned int count, 975 const void *kbuf, const void __user *ubuf) 976 { 977 unsigned int *data = &task_thread_info(target)->system_call; 978 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 979 data, 0, sizeof(unsigned int)); 980 } 981 982 static const struct user_regset s390_regsets[] = { 983 [REGSET_GENERAL] = { 984 .core_note_type = NT_PRSTATUS, 985 .n = sizeof(s390_regs) / sizeof(long), 986 .size = sizeof(long), 987 .align = sizeof(long), 988 .get = s390_regs_get, 989 .set = s390_regs_set, 990 }, 991 [REGSET_FP] = { 992 .core_note_type = NT_PRFPREG, 993 .n = sizeof(s390_fp_regs) / sizeof(long), 994 .size = sizeof(long), 995 .align = sizeof(long), 996 .get = s390_fpregs_get, 997 .set = s390_fpregs_set, 998 }, 999 #ifdef CONFIG_64BIT 1000 [REGSET_LAST_BREAK] = { 1001 .core_note_type = NT_S390_LAST_BREAK, 1002 .n = 1, 1003 .size = sizeof(long), 1004 .align = sizeof(long), 1005 .get = s390_last_break_get, 1006 .set = s390_last_break_set, 1007 }, 1008 [REGSET_TDB] = { 1009 .core_note_type = NT_S390_TDB, 1010 .n = 1, 1011 .size = 256, 1012 .align = 1, 1013 .get = s390_tdb_get, 1014 .set = s390_tdb_set, 1015 }, 1016 #endif 1017 [REGSET_SYSTEM_CALL] = { 1018 .core_note_type = NT_S390_SYSTEM_CALL, 1019 .n = 1, 1020 .size = sizeof(unsigned int), 1021 .align = sizeof(unsigned int), 1022 .get = s390_system_call_get, 1023 .set = s390_system_call_set, 1024 }, 1025 }; 1026 1027 static const struct user_regset_view user_s390_view = { 1028 .name = UTS_MACHINE, 1029 .e_machine = EM_S390, 1030 .regsets = s390_regsets, 1031 .n = ARRAY_SIZE(s390_regsets) 1032 }; 1033 1034 #ifdef CONFIG_COMPAT 1035 static int s390_compat_regs_get(struct task_struct *target, 1036 const struct user_regset *regset, 1037 unsigned int pos, unsigned int count, 1038 void *kbuf, void __user *ubuf) 1039 { 1040 if (target == current) 1041 save_access_regs(target->thread.acrs); 1042 1043 if (kbuf) { 1044 compat_ulong_t *k = kbuf; 1045 while (count > 0) { 1046 *k++ = __peek_user_compat(target, pos); 1047 count -= sizeof(*k); 1048 pos += sizeof(*k); 1049 } 1050 } else { 1051 compat_ulong_t __user *u = ubuf; 1052 while (count > 0) { 1053 if (__put_user(__peek_user_compat(target, pos), u++)) 1054 return -EFAULT; 1055 count -= sizeof(*u); 1056 pos += sizeof(*u); 1057 } 1058 } 1059 return 0; 1060 } 1061 1062 static int s390_compat_regs_set(struct task_struct *target, 1063 const struct user_regset *regset, 1064 unsigned int pos, unsigned int count, 1065 const void *kbuf, const void __user *ubuf) 1066 { 1067 int rc = 0; 1068 1069 if (target == current) 1070 save_access_regs(target->thread.acrs); 1071 1072 if (kbuf) { 1073 const compat_ulong_t *k = kbuf; 1074 while (count > 0 && !rc) { 1075 rc = __poke_user_compat(target, pos, *k++); 1076 count -= sizeof(*k); 1077 pos += sizeof(*k); 1078 } 1079 } else { 1080 const compat_ulong_t __user *u = ubuf; 1081 while (count > 0 && !rc) { 1082 compat_ulong_t word; 1083 rc = __get_user(word, u++); 1084 if (rc) 1085 break; 1086 rc = __poke_user_compat(target, pos, word); 1087 count -= sizeof(*u); 1088 pos += sizeof(*u); 1089 } 1090 } 1091 1092 if (rc == 0 && target == current) 1093 restore_access_regs(target->thread.acrs); 1094 1095 return rc; 1096 } 1097 1098 static int s390_compat_regs_high_get(struct task_struct *target, 1099 const struct user_regset *regset, 1100 unsigned int pos, unsigned int count, 1101 void *kbuf, void __user *ubuf) 1102 { 1103 compat_ulong_t *gprs_high; 1104 1105 gprs_high = (compat_ulong_t *) 1106 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; 1107 if (kbuf) { 1108 compat_ulong_t *k = kbuf; 1109 while (count > 0) { 1110 *k++ = *gprs_high; 1111 gprs_high += 2; 1112 count -= sizeof(*k); 1113 } 1114 } else { 1115 compat_ulong_t __user *u = ubuf; 1116 while (count > 0) { 1117 if (__put_user(*gprs_high, u++)) 1118 return -EFAULT; 1119 gprs_high += 2; 1120 count -= sizeof(*u); 1121 } 1122 } 1123 return 0; 1124 } 1125 1126 static int s390_compat_regs_high_set(struct task_struct *target, 1127 const struct user_regset *regset, 1128 unsigned int pos, unsigned int count, 1129 const void *kbuf, const void __user *ubuf) 1130 { 1131 compat_ulong_t *gprs_high; 1132 int rc = 0; 1133 1134 gprs_high = (compat_ulong_t *) 1135 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; 1136 if (kbuf) { 1137 const compat_ulong_t *k = kbuf; 1138 while (count > 0) { 1139 *gprs_high = *k++; 1140 *gprs_high += 2; 1141 count -= sizeof(*k); 1142 } 1143 } else { 1144 const compat_ulong_t __user *u = ubuf; 1145 while (count > 0 && !rc) { 1146 unsigned long word; 1147 rc = __get_user(word, u++); 1148 if (rc) 1149 break; 1150 *gprs_high = word; 1151 *gprs_high += 2; 1152 count -= sizeof(*u); 1153 } 1154 } 1155 1156 return rc; 1157 } 1158 1159 static int s390_compat_last_break_get(struct task_struct *target, 1160 const struct user_regset *regset, 1161 unsigned int pos, unsigned int count, 1162 void *kbuf, void __user *ubuf) 1163 { 1164 compat_ulong_t last_break; 1165 1166 if (count > 0) { 1167 last_break = task_thread_info(target)->last_break; 1168 if (kbuf) { 1169 unsigned long *k = kbuf; 1170 *k = last_break; 1171 } else { 1172 unsigned long __user *u = ubuf; 1173 if (__put_user(last_break, u)) 1174 return -EFAULT; 1175 } 1176 } 1177 return 0; 1178 } 1179 1180 static int s390_compat_last_break_set(struct task_struct *target, 1181 const struct user_regset *regset, 1182 unsigned int pos, unsigned int count, 1183 const void *kbuf, const void __user *ubuf) 1184 { 1185 return 0; 1186 } 1187 1188 static const struct user_regset s390_compat_regsets[] = { 1189 [REGSET_GENERAL] = { 1190 .core_note_type = NT_PRSTATUS, 1191 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), 1192 .size = sizeof(compat_long_t), 1193 .align = sizeof(compat_long_t), 1194 .get = s390_compat_regs_get, 1195 .set = s390_compat_regs_set, 1196 }, 1197 [REGSET_FP] = { 1198 .core_note_type = NT_PRFPREG, 1199 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), 1200 .size = sizeof(compat_long_t), 1201 .align = sizeof(compat_long_t), 1202 .get = s390_fpregs_get, 1203 .set = s390_fpregs_set, 1204 }, 1205 [REGSET_LAST_BREAK] = { 1206 .core_note_type = NT_S390_LAST_BREAK, 1207 .n = 1, 1208 .size = sizeof(long), 1209 .align = sizeof(long), 1210 .get = s390_compat_last_break_get, 1211 .set = s390_compat_last_break_set, 1212 }, 1213 [REGSET_TDB] = { 1214 .core_note_type = NT_S390_TDB, 1215 .n = 1, 1216 .size = 256, 1217 .align = 1, 1218 .get = s390_tdb_get, 1219 .set = s390_tdb_set, 1220 }, 1221 [REGSET_SYSTEM_CALL] = { 1222 .core_note_type = NT_S390_SYSTEM_CALL, 1223 .n = 1, 1224 .size = sizeof(compat_uint_t), 1225 .align = sizeof(compat_uint_t), 1226 .get = s390_system_call_get, 1227 .set = s390_system_call_set, 1228 }, 1229 [REGSET_GENERAL_EXTENDED] = { 1230 .core_note_type = NT_S390_HIGH_GPRS, 1231 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1232 .size = sizeof(compat_long_t), 1233 .align = sizeof(compat_long_t), 1234 .get = s390_compat_regs_high_get, 1235 .set = s390_compat_regs_high_set, 1236 }, 1237 }; 1238 1239 static const struct user_regset_view user_s390_compat_view = { 1240 .name = "s390", 1241 .e_machine = EM_S390, 1242 .regsets = s390_compat_regsets, 1243 .n = ARRAY_SIZE(s390_compat_regsets) 1244 }; 1245 #endif 1246 1247 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1248 { 1249 #ifdef CONFIG_COMPAT 1250 if (test_tsk_thread_flag(task, TIF_31BIT)) 1251 return &user_s390_compat_view; 1252 #endif 1253 return &user_s390_view; 1254 } 1255 1256 static const char *gpr_names[NUM_GPRS] = { 1257 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1258 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 1259 }; 1260 1261 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) 1262 { 1263 if (offset >= NUM_GPRS) 1264 return 0; 1265 return regs->gprs[offset]; 1266 } 1267 1268 int regs_query_register_offset(const char *name) 1269 { 1270 unsigned long offset; 1271 1272 if (!name || *name != 'r') 1273 return -EINVAL; 1274 if (strict_strtoul(name + 1, 10, &offset)) 1275 return -EINVAL; 1276 if (offset >= NUM_GPRS) 1277 return -EINVAL; 1278 return offset; 1279 } 1280 1281 const char *regs_query_register_name(unsigned int offset) 1282 { 1283 if (offset >= NUM_GPRS) 1284 return NULL; 1285 return gpr_names[offset]; 1286 } 1287 1288 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 1289 { 1290 unsigned long ksp = kernel_stack_pointer(regs); 1291 1292 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); 1293 } 1294 1295 /** 1296 * regs_get_kernel_stack_nth() - get Nth entry of the stack 1297 * @regs:pt_regs which contains kernel stack pointer. 1298 * @n:stack entry number. 1299 * 1300 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 1301 * is specifined by @regs. If the @n th entry is NOT in the kernel stack, 1302 * this returns 0. 1303 */ 1304 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 1305 { 1306 unsigned long addr; 1307 1308 addr = kernel_stack_pointer(regs) + n * sizeof(long); 1309 if (!regs_within_kernel_stack(regs, addr)) 1310 return 0; 1311 return *(unsigned long *)addr; 1312 } 1313