Lines Matching +full:data +full:- +full:addr

1 // SPDX-License-Identifier: GPL-2.0
28 #include <asm/access-regs.h>
45 struct thread_struct *thread = &task->thread; in update_cr_regs()
66 if (task->thread.per_flags & PER_FLAG_NO_TE) in update_cr_regs()
70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { in update_cr_regs()
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) in update_cr_regs()
80 if (task->thread.gs_cb) in update_cr_regs()
91 new.control.val = thread->per_user.control; in update_cr_regs()
92 new.start.val = thread->per_user.start; in update_cr_regs()
93 new.end.val = thread->per_user.end; in update_cr_regs()
107 new.end.val = -1UL; in update_cr_regs()
112 regs->psw.mask &= ~PSW_MASK_PER; in update_cr_regs()
115 regs->psw.mask |= PSW_MASK_PER; in update_cr_regs()
146 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); in ptrace_disable()
147 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); in ptrace_disable()
150 task->thread.per_flags = 0; in ptrace_disable()
156 addr_t addr) in __peek_user_per() argument
158 if (addr == offsetof(struct per_struct_kernel, cr9)) in __peek_user_per()
161 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per()
162 else if (addr == offsetof(struct per_struct_kernel, cr10)) in __peek_user_per()
165 0 : child->thread.per_user.start; in __peek_user_per()
166 else if (addr == offsetof(struct per_struct_kernel, cr11)) in __peek_user_per()
169 -1UL : child->thread.per_user.end; in __peek_user_per()
170 else if (addr == offsetof(struct per_struct_kernel, bits)) in __peek_user_per()
171 /* Single-step bit. */ in __peek_user_per()
173 (1UL << (BITS_PER_LONG - 1)) : 0; in __peek_user_per()
174 else if (addr == offsetof(struct per_struct_kernel, starting_addr)) in __peek_user_per()
176 return child->thread.per_user.start; in __peek_user_per()
177 else if (addr == offsetof(struct per_struct_kernel, ending_addr)) in __peek_user_per()
179 return child->thread.per_user.end; in __peek_user_per()
180 else if (addr == offsetof(struct per_struct_kernel, perc_atmid)) in __peek_user_per()
183 child->thread.per_event.cause << (BITS_PER_LONG - 16); in __peek_user_per()
184 else if (addr == offsetof(struct per_struct_kernel, address)) in __peek_user_per()
186 return child->thread.per_event.address; in __peek_user_per()
187 else if (addr == offsetof(struct per_struct_kernel, access_id)) in __peek_user_per()
190 child->thread.per_event.paid << (BITS_PER_LONG - 8); in __peek_user_per()
195 * Read the word at offset addr from the user area of a process. The
203 static unsigned long __peek_user(struct task_struct *child, addr_t addr) in __peek_user() argument
207 if (addr < offsetof(struct user, regs.acrs)) { in __peek_user()
211 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); in __peek_user()
212 if (addr == offsetof(struct user, regs.psw.mask)) { in __peek_user()
218 } else if (addr < offsetof(struct user, regs.orig_gpr2)) { in __peek_user()
222 offset = addr - offsetof(struct user, regs.acrs); in __peek_user()
228 if (addr == offsetof(struct user, regs.acrs[15])) in __peek_user()
229 tmp = ((unsigned long) child->thread.acrs[15]) << 32; in __peek_user()
231 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); in __peek_user()
233 } else if (addr == offsetof(struct user, regs.orig_gpr2)) { in __peek_user()
237 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; in __peek_user()
239 } else if (addr < offsetof(struct user, regs.fp_regs)) { in __peek_user()
246 } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) { in __peek_user()
250 tmp = child->thread.ufpu.fpc; in __peek_user()
251 tmp <<= BITS_PER_LONG - 32; in __peek_user()
253 } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __peek_user()
255 * floating point regs. are in the child->thread.ufpu.vxrs array in __peek_user()
257 offset = addr - offsetof(struct user, regs.fp_regs.fprs); in __peek_user()
258 tmp = *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset); in __peek_user()
259 } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { in __peek_user()
263 addr -= offsetof(struct user, regs.per_info); in __peek_user()
264 tmp = __peek_user_per(child, addr); in __peek_user()
273 peek_user(struct task_struct *child, addr_t addr, addr_t data) in peek_user() argument
282 if (addr >= offsetof(struct user, regs.acrs) && in peek_user()
283 addr < offsetof(struct user, regs.orig_gpr2)) in peek_user()
285 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in peek_user()
286 return -EIO; in peek_user()
288 tmp = __peek_user(child, addr); in peek_user()
289 return put_user(tmp, (addr_t __user *) data); in peek_user()
293 addr_t addr, addr_t data) in __poke_user_per() argument
307 if (addr == offsetof(struct per_struct_kernel, cr9)) in __poke_user_per()
309 child->thread.per_user.control = in __poke_user_per()
310 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per()
311 else if (addr == offsetof(struct per_struct_kernel, starting_addr)) in __poke_user_per()
313 child->thread.per_user.start = data; in __poke_user_per()
314 else if (addr == offsetof(struct per_struct_kernel, ending_addr)) in __poke_user_per()
316 child->thread.per_user.end = data; in __poke_user_per()
320 * Write a word to the user area of a process at location addr. This
325 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) in __poke_user() argument
330 if (addr < offsetof(struct user, regs.acrs)) { in __poke_user()
335 if (addr == offsetof(struct user, regs.psw.mask)) { in __poke_user()
339 if ((data ^ PSW_USER_BITS) & ~mask) in __poke_user()
341 return -EINVAL; in __poke_user()
342 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) in __poke_user()
343 /* Invalid address-space-control bits */ in __poke_user()
344 return -EINVAL; in __poke_user()
345 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) in __poke_user()
347 return -EINVAL; in __poke_user()
351 addr == offsetof(struct user, regs.gprs[2])) { in __poke_user()
354 regs->int_code = 0x20000 | (data & 0xffff); in __poke_user()
356 *(addr_t *)((addr_t) &regs->psw + addr) = data; in __poke_user()
357 } else if (addr < offsetof(struct user, regs.orig_gpr2)) { in __poke_user()
361 offset = addr - offsetof(struct user, regs.acrs); in __poke_user()
368 if (addr == offsetof(struct user, regs.acrs[15])) in __poke_user()
369 child->thread.acrs[15] = (unsigned int) (data >> 32); in __poke_user()
371 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; in __poke_user()
373 } else if (addr == offsetof(struct user, regs.orig_gpr2)) { in __poke_user()
377 task_pt_regs(child)->orig_gpr2 = data; in __poke_user()
379 } else if (addr < offsetof(struct user, regs.fp_regs)) { in __poke_user()
386 } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) { in __poke_user()
390 if ((unsigned int)data != 0) in __poke_user()
391 return -EINVAL; in __poke_user()
392 child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32); in __poke_user()
394 } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __poke_user()
396 * floating point regs. are in the child->thread.ufpu.vxrs array in __poke_user()
398 offset = addr - offsetof(struct user, regs.fp_regs.fprs); in __poke_user()
399 *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = data; in __poke_user()
400 } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { in __poke_user()
404 addr -= offsetof(struct user, regs.per_info); in __poke_user()
405 __poke_user_per(child, addr, data); in __poke_user()
412 static int poke_user(struct task_struct *child, addr_t addr, addr_t data) in poke_user() argument
421 if (addr >= offsetof(struct user, regs.acrs) && in poke_user()
422 addr < offsetof(struct user, regs.orig_gpr2)) in poke_user()
424 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in poke_user()
425 return -EIO; in poke_user()
427 return __poke_user(child, addr, data); in poke_user()
431 unsigned long addr, unsigned long data) in arch_ptrace() argument
438 /* read the word at location addr in the USER area. */ in arch_ptrace()
439 return peek_user(child, addr, data); in arch_ptrace()
442 /* write the word at location addr in the USER area */ in arch_ptrace()
443 return poke_user(child, addr, data); in arch_ptrace()
447 if (copy_from_user(&parea, (void __force __user *) addr, in arch_ptrace()
449 return -EFAULT; in arch_ptrace()
450 addr = parea.kernel_addr; in arch_ptrace()
451 data = parea.process_addr; in arch_ptrace()
455 ret = peek_user(child, addr, data); in arch_ptrace()
459 (addr_t __force __user *) data)) in arch_ptrace()
460 return -EFAULT; in arch_ptrace()
461 ret = poke_user(child, addr, utmp); in arch_ptrace()
465 addr += sizeof(unsigned long); in arch_ptrace()
466 data += sizeof(unsigned long); in arch_ptrace()
471 return put_user(child->thread.last_break, (unsigned long __user *)data); in arch_ptrace()
474 return -EIO; in arch_ptrace()
475 child->thread.per_flags &= ~PER_FLAG_NO_TE; in arch_ptrace()
479 return -EIO; in arch_ptrace()
480 child->thread.per_flags |= PER_FLAG_NO_TE; in arch_ptrace()
481 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
484 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) in arch_ptrace()
485 return -EIO; in arch_ptrace()
486 switch (data) { in arch_ptrace()
488 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
491 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
492 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
495 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
496 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
499 return -EINVAL; in arch_ptrace()
503 return ptrace_request(child, request, addr, data); in arch_ptrace()
519 * a 64 bit program is a no-no.
526 addr_t addr) in __peek_user_per_compat() argument
528 if (addr == offsetof(struct compat_per_struct_kernel, cr9)) in __peek_user_per_compat()
531 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per_compat()
532 else if (addr == offsetof(struct compat_per_struct_kernel, cr10)) in __peek_user_per_compat()
535 0 : child->thread.per_user.start; in __peek_user_per_compat()
536 else if (addr == offsetof(struct compat_per_struct_kernel, cr11)) in __peek_user_per_compat()
539 PSW32_ADDR_INSN : child->thread.per_user.end; in __peek_user_per_compat()
540 else if (addr == offsetof(struct compat_per_struct_kernel, bits)) in __peek_user_per_compat()
541 /* Single-step bit. */ in __peek_user_per_compat()
544 else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) in __peek_user_per_compat()
546 return (__u32) child->thread.per_user.start; in __peek_user_per_compat()
547 else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) in __peek_user_per_compat()
549 return (__u32) child->thread.per_user.end; in __peek_user_per_compat()
550 else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid)) in __peek_user_per_compat()
552 return (__u32) child->thread.per_event.cause << 16; in __peek_user_per_compat()
553 else if (addr == offsetof(struct compat_per_struct_kernel, address)) in __peek_user_per_compat()
555 return (__u32) child->thread.per_event.address; in __peek_user_per_compat()
556 else if (addr == offsetof(struct compat_per_struct_kernel, access_id)) in __peek_user_per_compat()
558 return (__u32) child->thread.per_event.paid << 24; in __peek_user_per_compat()
565 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) in __peek_user_compat() argument
570 if (addr < offsetof(struct compat_user, regs.acrs)) { in __peek_user_compat()
575 if (addr == offsetof(struct compat_user, regs.psw.mask)) { in __peek_user_compat()
577 tmp = (__u32)(regs->psw.mask >> 32); in __peek_user_compat()
580 } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { in __peek_user_compat()
582 tmp = (__u32) regs->psw.addr | in __peek_user_compat()
583 (__u32)(regs->psw.mask & PSW_MASK_BA); in __peek_user_compat()
585 /* gpr 0-15 */ in __peek_user_compat()
586 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4); in __peek_user_compat()
588 } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { in __peek_user_compat()
592 offset = addr - offsetof(struct compat_user, regs.acrs); in __peek_user_compat()
593 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); in __peek_user_compat()
595 } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { in __peek_user_compat()
599 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); in __peek_user_compat()
601 } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { in __peek_user_compat()
608 } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { in __peek_user_compat()
612 tmp = child->thread.ufpu.fpc; in __peek_user_compat()
614 } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __peek_user_compat()
616 * floating point regs. are in the child->thread.ufpu.vxrs array in __peek_user_compat()
618 offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); in __peek_user_compat()
619 tmp = *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset); in __peek_user_compat()
620 …} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_ke… in __peek_user_compat()
624 addr -= offsetof(struct compat_user, regs.per_info); in __peek_user_compat()
625 tmp = __peek_user_per_compat(child, addr); in __peek_user_compat()
634 addr_t addr, addr_t data) in peek_user_compat() argument
638 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) in peek_user_compat()
639 return -EIO; in peek_user_compat()
641 tmp = __peek_user_compat(child, addr); in peek_user_compat()
642 return put_user(tmp, (__u32 __user *) data); in peek_user_compat()
649 addr_t addr, __u32 data) in __poke_user_per_compat() argument
651 if (addr == offsetof(struct compat_per_struct_kernel, cr9)) in __poke_user_per_compat()
653 child->thread.per_user.control = in __poke_user_per_compat()
654 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per_compat()
655 else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) in __poke_user_per_compat()
657 child->thread.per_user.start = data; in __poke_user_per_compat()
658 else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) in __poke_user_per_compat()
660 child->thread.per_user.end = data; in __poke_user_per_compat()
667 addr_t addr, addr_t data) in __poke_user_compat() argument
669 __u32 tmp = (__u32) data; in __poke_user_compat()
672 if (addr < offsetof(struct compat_user, regs.acrs)) { in __poke_user_compat()
677 if (addr == offsetof(struct compat_user, regs.psw.mask)) { in __poke_user_compat()
684 return -EINVAL; in __poke_user_compat()
685 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) in __poke_user_compat()
686 /* Invalid address-space-control bits */ in __poke_user_compat()
687 return -EINVAL; in __poke_user_compat()
688 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | in __poke_user_compat()
689 (regs->psw.mask & PSW_MASK_BA) | in __poke_user_compat()
691 } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { in __poke_user_compat()
693 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; in __poke_user_compat()
695 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | in __poke_user_compat()
699 addr == offsetof(struct compat_user, regs.gprs[2])) { in __poke_user_compat()
702 regs->int_code = 0x20000 | (data & 0xffff); in __poke_user_compat()
704 /* gpr 0-15 */ in __poke_user_compat()
705 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; in __poke_user_compat()
707 } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { in __poke_user_compat()
711 offset = addr - offsetof(struct compat_user, regs.acrs); in __poke_user_compat()
712 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; in __poke_user_compat()
714 } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { in __poke_user_compat()
718 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; in __poke_user_compat()
720 } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { in __poke_user_compat()
727 } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { in __poke_user_compat()
731 child->thread.ufpu.fpc = data; in __poke_user_compat()
733 } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __poke_user_compat()
735 * floating point regs. are in the child->thread.ufpu.vxrs array in __poke_user_compat()
737 offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); in __poke_user_compat()
738 *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = tmp; in __poke_user_compat()
739 …} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_ke… in __poke_user_compat()
743 addr -= offsetof(struct compat_user, regs.per_info); in __poke_user_compat()
744 __poke_user_per_compat(child, addr, data); in __poke_user_compat()
751 addr_t addr, addr_t data) in poke_user_compat() argument
753 if (!is_compat_task() || (addr & 3) || in poke_user_compat()
754 addr > sizeof(struct compat_user) - 3) in poke_user_compat()
755 return -EIO; in poke_user_compat()
757 return __poke_user_compat(child, addr, data); in poke_user_compat()
763 unsigned long addr = caddr; in compat_arch_ptrace() local
764 unsigned long data = cdata; in compat_arch_ptrace() local
770 /* read the word at location addr in the USER area. */ in compat_arch_ptrace()
771 return peek_user_compat(child, addr, data); in compat_arch_ptrace()
774 /* write the word at location addr in the USER area */ in compat_arch_ptrace()
775 return poke_user_compat(child, addr, data); in compat_arch_ptrace()
779 if (copy_from_user(&parea, (void __force __user *) addr, in compat_arch_ptrace()
781 return -EFAULT; in compat_arch_ptrace()
782 addr = parea.kernel_addr; in compat_arch_ptrace()
783 data = parea.process_addr; in compat_arch_ptrace()
787 ret = peek_user_compat(child, addr, data); in compat_arch_ptrace()
791 (__u32 __force __user *) data)) in compat_arch_ptrace()
792 return -EFAULT; in compat_arch_ptrace()
793 ret = poke_user_compat(child, addr, utmp); in compat_arch_ptrace()
797 addr += sizeof(unsigned int); in compat_arch_ptrace()
798 data += sizeof(unsigned int); in compat_arch_ptrace()
803 return put_user(child->thread.last_break, (unsigned int __user *)data); in compat_arch_ptrace()
805 return compat_ptrace_request(child, request, addr, data); in compat_arch_ptrace()
819 save_access_regs(target->thread.acrs); in s390_regs_get()
834 save_access_regs(target->thread.acrs); in s390_regs_set()
840 count -= sizeof(*k); in s390_regs_set()
851 count -= sizeof(*u); in s390_regs_set()
857 restore_access_regs(target->thread.acrs); in s390_regs_set()
871 fp_regs.fpc = target->thread.ufpu.fpc; in s390_fpregs_get()
872 fpregs_store(&fp_regs, &target->thread.ufpu); in s390_fpregs_get()
887 convert_vx_to_fp(fprs, target->thread.ufpu.vxrs); in s390_fpregs_set()
889 u32 ufpc[2] = { target->thread.ufpu.fpc, 0 }; in s390_fpregs_set()
895 return -EINVAL; in s390_fpregs_set()
896 target->thread.ufpu.fpc = ufpc[0]; in s390_fpregs_set()
901 fprs, offsetof(s390_fp_regs, fprs), -1); in s390_fpregs_set()
904 convert_fp_to_vx(target->thread.ufpu.vxrs, fprs); in s390_fpregs_set()
912 return membuf_store(&to, target->thread.last_break); in s390_last_break_get()
930 if (!(regs->int_code & 0x200)) in s390_tdb_get()
931 return -ENODATA; in s390_tdb_get()
932 size = sizeof(target->thread.trap_tdb.data); in s390_tdb_get()
933 return membuf_write(&to, target->thread.trap_tdb.data, size); in s390_tdb_get()
952 return -ENODEV; in s390_vxrs_low_get()
956 vxrs[i] = target->thread.ufpu.vxrs[i].low; in s390_vxrs_low_get()
969 return -ENODEV; in s390_vxrs_low_set()
974 vxrs[i] = target->thread.ufpu.vxrs[i].low; in s390_vxrs_low_set()
976 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); in s390_vxrs_low_set()
979 target->thread.ufpu.vxrs[i].low = vxrs[i]; in s390_vxrs_low_set()
989 return -ENODEV; in s390_vxrs_high_get()
992 return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW, in s390_vxrs_high_get()
1004 return -ENODEV; in s390_vxrs_high_set()
1009 target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1); in s390_vxrs_high_set()
1017 return membuf_store(&to, target->thread.system_call); in s390_system_call_get()
1025 unsigned int *data = &target->thread.system_call; in s390_system_call_set() local
1027 data, 0, sizeof(unsigned int)); in s390_system_call_set()
1034 struct gs_cb *data = target->thread.gs_cb; in s390_gs_cb_get() local
1037 return -ENODEV; in s390_gs_cb_get()
1038 if (!data) in s390_gs_cb_get()
1039 return -ENODATA; in s390_gs_cb_get()
1041 save_gs_cb(data); in s390_gs_cb_get()
1042 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_cb_get()
1050 struct gs_cb gs_cb = { }, *data = NULL; in s390_gs_cb_set() local
1054 return -ENODEV; in s390_gs_cb_set()
1055 if (!target->thread.gs_cb) { in s390_gs_cb_set()
1056 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_cb_set()
1057 if (!data) in s390_gs_cb_set()
1058 return -ENOMEM; in s390_gs_cb_set()
1060 if (!target->thread.gs_cb) in s390_gs_cb_set()
1065 gs_cb = *target->thread.gs_cb; in s390_gs_cb_set()
1069 kfree(data); in s390_gs_cb_set()
1070 return -EFAULT; in s390_gs_cb_set()
1073 if (!target->thread.gs_cb) in s390_gs_cb_set()
1074 target->thread.gs_cb = data; in s390_gs_cb_set()
1075 *target->thread.gs_cb = gs_cb; in s390_gs_cb_set()
1078 restore_gs_cb(target->thread.gs_cb); in s390_gs_cb_set()
1088 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_get() local
1091 return -ENODEV; in s390_gs_bc_get()
1092 if (!data) in s390_gs_bc_get()
1093 return -ENODATA; in s390_gs_bc_get()
1094 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_bc_get()
1102 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_set() local
1105 return -ENODEV; in s390_gs_bc_set()
1106 if (!data) { in s390_gs_bc_set()
1107 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_bc_set()
1108 if (!data) in s390_gs_bc_set()
1109 return -ENOMEM; in s390_gs_bc_set()
1110 target->thread.gs_bc_cb = data; in s390_gs_bc_set()
1113 data, 0, sizeof(struct gs_cb)); in s390_gs_bc_set()
1118 return (cb->rca & 0x1f) == 0 && in is_ri_cb_valid()
1119 (cb->roa & 0xfff) == 0 && in is_ri_cb_valid()
1120 (cb->rla & 0xfff) == 0xfff && in is_ri_cb_valid()
1121 cb->s == 1 && in is_ri_cb_valid()
1122 cb->k == 1 && in is_ri_cb_valid()
1123 cb->h == 0 && in is_ri_cb_valid()
1124 cb->reserved1 == 0 && in is_ri_cb_valid()
1125 cb->ps == 1 && in is_ri_cb_valid()
1126 cb->qs == 0 && in is_ri_cb_valid()
1127 cb->pc == 1 && in is_ri_cb_valid()
1128 cb->qc == 0 && in is_ri_cb_valid()
1129 cb->reserved2 == 0 && in is_ri_cb_valid()
1130 cb->reserved3 == 0 && in is_ri_cb_valid()
1131 cb->reserved4 == 0 && in is_ri_cb_valid()
1132 cb->reserved5 == 0 && in is_ri_cb_valid()
1133 cb->reserved6 == 0 && in is_ri_cb_valid()
1134 cb->reserved7 == 0 && in is_ri_cb_valid()
1135 cb->reserved8 == 0 && in is_ri_cb_valid()
1136 cb->rla >= cb->roa && in is_ri_cb_valid()
1137 cb->rca >= cb->roa && in is_ri_cb_valid()
1138 cb->rca <= cb->rla+1 && in is_ri_cb_valid()
1139 cb->m < 3; in is_ri_cb_valid()
1146 struct runtime_instr_cb *data = target->thread.ri_cb; in s390_runtime_instr_get() local
1149 return -ENODEV; in s390_runtime_instr_get()
1150 if (!data) in s390_runtime_instr_get()
1151 return -ENODATA; in s390_runtime_instr_get()
1153 return membuf_write(&to, data, sizeof(struct runtime_instr_cb)); in s390_runtime_instr_get()
1161 struct runtime_instr_cb ri_cb = { }, *data = NULL; in s390_runtime_instr_set() local
1165 return -ENODEV; in s390_runtime_instr_set()
1167 if (!target->thread.ri_cb) { in s390_runtime_instr_set()
1168 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_runtime_instr_set()
1169 if (!data) in s390_runtime_instr_set()
1170 return -ENOMEM; in s390_runtime_instr_set()
1173 if (target->thread.ri_cb) { in s390_runtime_instr_set()
1177 ri_cb = *target->thread.ri_cb; in s390_runtime_instr_set()
1183 kfree(data); in s390_runtime_instr_set()
1184 return -EFAULT; in s390_runtime_instr_set()
1188 kfree(data); in s390_runtime_instr_set()
1189 return -EINVAL; in s390_runtime_instr_set()
1197 if (!target->thread.ri_cb) in s390_runtime_instr_set()
1198 target->thread.ri_cb = data; in s390_runtime_instr_set()
1199 *target->thread.ri_cb = ri_cb; in s390_runtime_instr_set()
1201 load_runtime_instr_cb(target->thread.ri_cb); in s390_runtime_instr_set()
1305 save_access_regs(target->thread.acrs); in s390_compat_regs_get()
1320 save_access_regs(target->thread.acrs); in s390_compat_regs_set()
1326 count -= sizeof(*k); in s390_compat_regs_set()
1337 count -= sizeof(*u); in s390_compat_regs_set()
1343 restore_access_regs(target->thread.acrs); in s390_compat_regs_set()
1355 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs; in s390_compat_regs_high_get()
1370 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; in s390_compat_regs_high_set()
1376 count -= sizeof(*k); in s390_compat_regs_high_set()
1387 count -= sizeof(*u); in s390_compat_regs_high_set()
1398 compat_ulong_t last_break = target->thread.last_break; in s390_compat_last_break_get()
1528 return regs->gprs[offset]; in regs_get_register()
1536 return -EINVAL; in regs_query_register_offset()
1538 return -EINVAL; in regs_query_register_offset()
1540 return -EINVAL; in regs_query_register_offset()
1551 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) in regs_within_kernel_stack() argument
1555 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); in regs_within_kernel_stack()
1559 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1569 unsigned long addr; in regs_get_kernel_stack_nth() local
1571 addr = kernel_stack_pointer(regs) + n * sizeof(long); in regs_get_kernel_stack_nth()
1572 if (!regs_within_kernel_stack(regs, addr)) in regs_get_kernel_stack_nth()
1574 return *(unsigned long *)addr; in regs_get_kernel_stack_nth()