xref: /linux/arch/s390/kernel/ptrace.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/segment.h>
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <linux/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/switch_to.h>
34 #include "entry.h"
35 
36 #ifdef CONFIG_COMPAT
37 #include "compat_ptrace.h"
38 #endif
39 
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/syscalls.h>
42 
43 void update_cr_regs(struct task_struct *task)
44 {
45 	struct pt_regs *regs = task_pt_regs(task);
46 	struct thread_struct *thread = &task->thread;
47 	struct per_regs old, new;
48 	unsigned long cr0_old, cr0_new;
49 	unsigned long cr2_old, cr2_new;
50 	int cr0_changed, cr2_changed;
51 
52 	__ctl_store(cr0_old, 0, 0);
53 	__ctl_store(cr2_old, 2, 2);
54 	cr0_new = cr0_old;
55 	cr2_new = cr2_old;
56 	/* Take care of the enable/disable of transactional execution. */
57 	if (MACHINE_HAS_TE) {
58 		/* Set or clear transaction execution TXC bit 8. */
59 		cr0_new |= (1UL << 55);
60 		if (task->thread.per_flags & PER_FLAG_NO_TE)
61 			cr0_new &= ~(1UL << 55);
62 		/* Set or clear transaction execution TDC bits 62 and 63. */
63 		cr2_new &= ~3UL;
64 		if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
65 			if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
66 				cr2_new |= 1UL;
67 			else
68 				cr2_new |= 2UL;
69 		}
70 	}
71 	/* Take care of enable/disable of guarded storage. */
72 	if (MACHINE_HAS_GS) {
73 		cr2_new &= ~(1UL << 4);
74 		if (task->thread.gs_cb)
75 			cr2_new |= (1UL << 4);
76 	}
77 	/* Load control register 0/2 iff changed */
78 	cr0_changed = cr0_new != cr0_old;
79 	cr2_changed = cr2_new != cr2_old;
80 	if (cr0_changed)
81 		__ctl_load(cr0_new, 0, 0);
82 	if (cr2_changed)
83 		__ctl_load(cr2_new, 2, 2);
84 	/* Copy user specified PER registers */
85 	new.control = thread->per_user.control;
86 	new.start = thread->per_user.start;
87 	new.end = thread->per_user.end;
88 
89 	/* merge TIF_SINGLE_STEP into user specified PER registers. */
90 	if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91 	    test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
92 		if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
93 			new.control |= PER_EVENT_BRANCH;
94 		else
95 			new.control |= PER_EVENT_IFETCH;
96 		new.control |= PER_CONTROL_SUSPENSION;
97 		new.control |= PER_EVENT_TRANSACTION_END;
98 		if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
99 			new.control |= PER_EVENT_IFETCH;
100 		new.start = 0;
101 		new.end = -1UL;
102 	}
103 
104 	/* Take care of the PER enablement bit in the PSW. */
105 	if (!(new.control & PER_EVENT_MASK)) {
106 		regs->psw.mask &= ~PSW_MASK_PER;
107 		return;
108 	}
109 	regs->psw.mask |= PSW_MASK_PER;
110 	__ctl_store(old, 9, 11);
111 	if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
112 		__ctl_load(new, 9, 11);
113 }
114 
115 void user_enable_single_step(struct task_struct *task)
116 {
117 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
118 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
119 }
120 
121 void user_disable_single_step(struct task_struct *task)
122 {
123 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
124 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
125 }
126 
127 void user_enable_block_step(struct task_struct *task)
128 {
129 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
130 	set_tsk_thread_flag(task, TIF_BLOCK_STEP);
131 }
132 
133 /*
134  * Called by kernel/ptrace.c when detaching..
135  *
136  * Clear all debugging related fields.
137  */
138 void ptrace_disable(struct task_struct *task)
139 {
140 	memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
141 	memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
142 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
143 	clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
144 	task->thread.per_flags = 0;
145 }
146 
147 #define __ADDR_MASK 7
148 
149 static inline unsigned long __peek_user_per(struct task_struct *child,
150 					    addr_t addr)
151 {
152 	struct per_struct_kernel *dummy = NULL;
153 
154 	if (addr == (addr_t) &dummy->cr9)
155 		/* Control bits of the active per set. */
156 		return test_thread_flag(TIF_SINGLE_STEP) ?
157 			PER_EVENT_IFETCH : child->thread.per_user.control;
158 	else if (addr == (addr_t) &dummy->cr10)
159 		/* Start address of the active per set. */
160 		return test_thread_flag(TIF_SINGLE_STEP) ?
161 			0 : child->thread.per_user.start;
162 	else if (addr == (addr_t) &dummy->cr11)
163 		/* End address of the active per set. */
164 		return test_thread_flag(TIF_SINGLE_STEP) ?
165 			-1UL : child->thread.per_user.end;
166 	else if (addr == (addr_t) &dummy->bits)
167 		/* Single-step bit. */
168 		return test_thread_flag(TIF_SINGLE_STEP) ?
169 			(1UL << (BITS_PER_LONG - 1)) : 0;
170 	else if (addr == (addr_t) &dummy->starting_addr)
171 		/* Start address of the user specified per set. */
172 		return child->thread.per_user.start;
173 	else if (addr == (addr_t) &dummy->ending_addr)
174 		/* End address of the user specified per set. */
175 		return child->thread.per_user.end;
176 	else if (addr == (addr_t) &dummy->perc_atmid)
177 		/* PER code, ATMID and AI of the last PER trap */
178 		return (unsigned long)
179 			child->thread.per_event.cause << (BITS_PER_LONG - 16);
180 	else if (addr == (addr_t) &dummy->address)
181 		/* Address of the last PER trap */
182 		return child->thread.per_event.address;
183 	else if (addr == (addr_t) &dummy->access_id)
184 		/* Access id of the last PER trap */
185 		return (unsigned long)
186 			child->thread.per_event.paid << (BITS_PER_LONG - 8);
187 	return 0;
188 }
189 
190 /*
191  * Read the word at offset addr from the user area of a process. The
192  * trouble here is that the information is littered over different
193  * locations. The process registers are found on the kernel stack,
194  * the floating point stuff and the trace settings are stored in
195  * the task structure. In addition the different structures in
196  * struct user contain pad bytes that should be read as zeroes.
197  * Lovely...
198  */
199 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
200 {
201 	struct user *dummy = NULL;
202 	addr_t offset, tmp;
203 
204 	if (addr < (addr_t) &dummy->regs.acrs) {
205 		/*
206 		 * psw and gprs are stored on the stack
207 		 */
208 		tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
209 		if (addr == (addr_t) &dummy->regs.psw.mask) {
210 			/* Return a clean psw mask. */
211 			tmp &= PSW_MASK_USER | PSW_MASK_RI;
212 			tmp |= PSW_USER_BITS;
213 		}
214 
215 	} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
216 		/*
217 		 * access registers are stored in the thread structure
218 		 */
219 		offset = addr - (addr_t) &dummy->regs.acrs;
220 		/*
221 		 * Very special case: old & broken 64 bit gdb reading
222 		 * from acrs[15]. Result is a 64 bit value. Read the
223 		 * 32 bit acrs[15] value and shift it by 32. Sick...
224 		 */
225 		if (addr == (addr_t) &dummy->regs.acrs[15])
226 			tmp = ((unsigned long) child->thread.acrs[15]) << 32;
227 		else
228 			tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
229 
230 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
231 		/*
232 		 * orig_gpr2 is stored on the kernel stack
233 		 */
234 		tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
235 
236 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
237 		/*
238 		 * prevent reads of padding hole between
239 		 * orig_gpr2 and fp_regs on s390.
240 		 */
241 		tmp = 0;
242 
243 	} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
244 		/*
245 		 * floating point control reg. is in the thread structure
246 		 */
247 		tmp = child->thread.fpu.fpc;
248 		tmp <<= BITS_PER_LONG - 32;
249 
250 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
251 		/*
252 		 * floating point regs. are either in child->thread.fpu
253 		 * or the child->thread.fpu.vxrs array
254 		 */
255 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256 		if (MACHINE_HAS_VX)
257 			tmp = *(addr_t *)
258 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
259 		else
260 			tmp = *(addr_t *)
261 			       ((addr_t) child->thread.fpu.fprs + offset);
262 
263 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
264 		/*
265 		 * Handle access to the per_info structure.
266 		 */
267 		addr -= (addr_t) &dummy->regs.per_info;
268 		tmp = __peek_user_per(child, addr);
269 
270 	} else
271 		tmp = 0;
272 
273 	return tmp;
274 }
275 
276 static int
277 peek_user(struct task_struct *child, addr_t addr, addr_t data)
278 {
279 	addr_t tmp, mask;
280 
281 	/*
282 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
283 	 * an alignment of 4. Programmers from hell...
284 	 */
285 	mask = __ADDR_MASK;
286 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
287 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
288 		mask = 3;
289 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
290 		return -EIO;
291 
292 	tmp = __peek_user(child, addr);
293 	return put_user(tmp, (addr_t __user *) data);
294 }
295 
296 static inline void __poke_user_per(struct task_struct *child,
297 				   addr_t addr, addr_t data)
298 {
299 	struct per_struct_kernel *dummy = NULL;
300 
301 	/*
302 	 * There are only three fields in the per_info struct that the
303 	 * debugger user can write to.
304 	 * 1) cr9: the debugger wants to set a new PER event mask
305 	 * 2) starting_addr: the debugger wants to set a new starting
306 	 *    address to use with the PER event mask.
307 	 * 3) ending_addr: the debugger wants to set a new ending
308 	 *    address to use with the PER event mask.
309 	 * The user specified PER event mask and the start and end
310 	 * addresses are used only if single stepping is not in effect.
311 	 * Writes to any other field in per_info are ignored.
312 	 */
313 	if (addr == (addr_t) &dummy->cr9)
314 		/* PER event mask of the user specified per set. */
315 		child->thread.per_user.control =
316 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
317 	else if (addr == (addr_t) &dummy->starting_addr)
318 		/* Starting address of the user specified per set. */
319 		child->thread.per_user.start = data;
320 	else if (addr == (addr_t) &dummy->ending_addr)
321 		/* Ending address of the user specified per set. */
322 		child->thread.per_user.end = data;
323 }
324 
325 /*
326  * Write a word to the user area of a process at location addr. This
327  * operation does have an additional problem compared to peek_user.
328  * Stores to the program status word and on the floating point
329  * control register needs to get checked for validity.
330  */
331 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
332 {
333 	struct user *dummy = NULL;
334 	addr_t offset;
335 
336 	if (addr < (addr_t) &dummy->regs.acrs) {
337 		/*
338 		 * psw and gprs are stored on the stack
339 		 */
340 		if (addr == (addr_t) &dummy->regs.psw.mask) {
341 			unsigned long mask = PSW_MASK_USER;
342 
343 			mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
344 			if ((data ^ PSW_USER_BITS) & ~mask)
345 				/* Invalid psw mask. */
346 				return -EINVAL;
347 			if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
348 				/* Invalid address-space-control bits */
349 				return -EINVAL;
350 			if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
351 				/* Invalid addressing mode bits */
352 				return -EINVAL;
353 		}
354 		*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
355 
356 	} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
357 		/*
358 		 * access registers are stored in the thread structure
359 		 */
360 		offset = addr - (addr_t) &dummy->regs.acrs;
361 		/*
362 		 * Very special case: old & broken 64 bit gdb writing
363 		 * to acrs[15] with a 64 bit value. Ignore the lower
364 		 * half of the value and write the upper 32 bit to
365 		 * acrs[15]. Sick...
366 		 */
367 		if (addr == (addr_t) &dummy->regs.acrs[15])
368 			child->thread.acrs[15] = (unsigned int) (data >> 32);
369 		else
370 			*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
371 
372 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
373 		/*
374 		 * orig_gpr2 is stored on the kernel stack
375 		 */
376 		task_pt_regs(child)->orig_gpr2 = data;
377 
378 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
379 		/*
380 		 * prevent writes of padding hole between
381 		 * orig_gpr2 and fp_regs on s390.
382 		 */
383 		return 0;
384 
385 	} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
386 		/*
387 		 * floating point control reg. is in the thread structure
388 		 */
389 		if ((unsigned int) data != 0 ||
390 		    test_fp_ctl(data >> (BITS_PER_LONG - 32)))
391 			return -EINVAL;
392 		child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
393 
394 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
395 		/*
396 		 * floating point regs. are either in child->thread.fpu
397 		 * or the child->thread.fpu.vxrs array
398 		 */
399 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
400 		if (MACHINE_HAS_VX)
401 			*(addr_t *)((addr_t)
402 				child->thread.fpu.vxrs + 2*offset) = data;
403 		else
404 			*(addr_t *)((addr_t)
405 				child->thread.fpu.fprs + offset) = data;
406 
407 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
408 		/*
409 		 * Handle access to the per_info structure.
410 		 */
411 		addr -= (addr_t) &dummy->regs.per_info;
412 		__poke_user_per(child, addr, data);
413 
414 	}
415 
416 	return 0;
417 }
418 
419 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
420 {
421 	addr_t mask;
422 
423 	/*
424 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
425 	 * an alignment of 4. Programmers from hell indeed...
426 	 */
427 	mask = __ADDR_MASK;
428 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
429 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
430 		mask = 3;
431 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
432 		return -EIO;
433 
434 	return __poke_user(child, addr, data);
435 }
436 
437 long arch_ptrace(struct task_struct *child, long request,
438 		 unsigned long addr, unsigned long data)
439 {
440 	ptrace_area parea;
441 	int copied, ret;
442 
443 	switch (request) {
444 	case PTRACE_PEEKUSR:
445 		/* read the word at location addr in the USER area. */
446 		return peek_user(child, addr, data);
447 
448 	case PTRACE_POKEUSR:
449 		/* write the word at location addr in the USER area */
450 		return poke_user(child, addr, data);
451 
452 	case PTRACE_PEEKUSR_AREA:
453 	case PTRACE_POKEUSR_AREA:
454 		if (copy_from_user(&parea, (void __force __user *) addr,
455 							sizeof(parea)))
456 			return -EFAULT;
457 		addr = parea.kernel_addr;
458 		data = parea.process_addr;
459 		copied = 0;
460 		while (copied < parea.len) {
461 			if (request == PTRACE_PEEKUSR_AREA)
462 				ret = peek_user(child, addr, data);
463 			else {
464 				addr_t utmp;
465 				if (get_user(utmp,
466 					     (addr_t __force __user *) data))
467 					return -EFAULT;
468 				ret = poke_user(child, addr, utmp);
469 			}
470 			if (ret)
471 				return ret;
472 			addr += sizeof(unsigned long);
473 			data += sizeof(unsigned long);
474 			copied += sizeof(unsigned long);
475 		}
476 		return 0;
477 	case PTRACE_GET_LAST_BREAK:
478 		put_user(child->thread.last_break,
479 			 (unsigned long __user *) data);
480 		return 0;
481 	case PTRACE_ENABLE_TE:
482 		if (!MACHINE_HAS_TE)
483 			return -EIO;
484 		child->thread.per_flags &= ~PER_FLAG_NO_TE;
485 		return 0;
486 	case PTRACE_DISABLE_TE:
487 		if (!MACHINE_HAS_TE)
488 			return -EIO;
489 		child->thread.per_flags |= PER_FLAG_NO_TE;
490 		child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
491 		return 0;
492 	case PTRACE_TE_ABORT_RAND:
493 		if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
494 			return -EIO;
495 		switch (data) {
496 		case 0UL:
497 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
498 			break;
499 		case 1UL:
500 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
501 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
502 			break;
503 		case 2UL:
504 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
505 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
506 			break;
507 		default:
508 			return -EINVAL;
509 		}
510 		return 0;
511 	default:
512 		return ptrace_request(child, request, addr, data);
513 	}
514 }
515 
516 #ifdef CONFIG_COMPAT
517 /*
518  * Now the fun part starts... a 31 bit program running in the
519  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
520  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
521  * to handle, the difference to the 64 bit versions of the requests
522  * is that the access is done in multiples of 4 byte instead of
523  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
524  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
525  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
526  * is a 31 bit program too, the content of struct user can be
527  * emulated. A 31 bit program peeking into the struct user of
528  * a 64 bit program is a no-no.
529  */
530 
531 /*
532  * Same as peek_user_per but for a 31 bit program.
533  */
534 static inline __u32 __peek_user_per_compat(struct task_struct *child,
535 					   addr_t addr)
536 {
537 	struct compat_per_struct_kernel *dummy32 = NULL;
538 
539 	if (addr == (addr_t) &dummy32->cr9)
540 		/* Control bits of the active per set. */
541 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
542 			PER_EVENT_IFETCH : child->thread.per_user.control;
543 	else if (addr == (addr_t) &dummy32->cr10)
544 		/* Start address of the active per set. */
545 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
546 			0 : child->thread.per_user.start;
547 	else if (addr == (addr_t) &dummy32->cr11)
548 		/* End address of the active per set. */
549 		return test_thread_flag(TIF_SINGLE_STEP) ?
550 			PSW32_ADDR_INSN : child->thread.per_user.end;
551 	else if (addr == (addr_t) &dummy32->bits)
552 		/* Single-step bit. */
553 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
554 			0x80000000 : 0;
555 	else if (addr == (addr_t) &dummy32->starting_addr)
556 		/* Start address of the user specified per set. */
557 		return (__u32) child->thread.per_user.start;
558 	else if (addr == (addr_t) &dummy32->ending_addr)
559 		/* End address of the user specified per set. */
560 		return (__u32) child->thread.per_user.end;
561 	else if (addr == (addr_t) &dummy32->perc_atmid)
562 		/* PER code, ATMID and AI of the last PER trap */
563 		return (__u32) child->thread.per_event.cause << 16;
564 	else if (addr == (addr_t) &dummy32->address)
565 		/* Address of the last PER trap */
566 		return (__u32) child->thread.per_event.address;
567 	else if (addr == (addr_t) &dummy32->access_id)
568 		/* Access id of the last PER trap */
569 		return (__u32) child->thread.per_event.paid << 24;
570 	return 0;
571 }
572 
573 /*
574  * Same as peek_user but for a 31 bit program.
575  */
576 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
577 {
578 	struct compat_user *dummy32 = NULL;
579 	addr_t offset;
580 	__u32 tmp;
581 
582 	if (addr < (addr_t) &dummy32->regs.acrs) {
583 		struct pt_regs *regs = task_pt_regs(child);
584 		/*
585 		 * psw and gprs are stored on the stack
586 		 */
587 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
588 			/* Fake a 31 bit psw mask. */
589 			tmp = (__u32)(regs->psw.mask >> 32);
590 			tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
591 			tmp |= PSW32_USER_BITS;
592 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
593 			/* Fake a 31 bit psw address. */
594 			tmp = (__u32) regs->psw.addr |
595 				(__u32)(regs->psw.mask & PSW_MASK_BA);
596 		} else {
597 			/* gpr 0-15 */
598 			tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
599 		}
600 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
601 		/*
602 		 * access registers are stored in the thread structure
603 		 */
604 		offset = addr - (addr_t) &dummy32->regs.acrs;
605 		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
606 
607 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
608 		/*
609 		 * orig_gpr2 is stored on the kernel stack
610 		 */
611 		tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
612 
613 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
614 		/*
615 		 * prevent reads of padding hole between
616 		 * orig_gpr2 and fp_regs on s390.
617 		 */
618 		tmp = 0;
619 
620 	} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
621 		/*
622 		 * floating point control reg. is in the thread structure
623 		 */
624 		tmp = child->thread.fpu.fpc;
625 
626 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
627 		/*
628 		 * floating point regs. are either in child->thread.fpu
629 		 * or the child->thread.fpu.vxrs array
630 		 */
631 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
632 		if (MACHINE_HAS_VX)
633 			tmp = *(__u32 *)
634 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
635 		else
636 			tmp = *(__u32 *)
637 			       ((addr_t) child->thread.fpu.fprs + offset);
638 
639 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
640 		/*
641 		 * Handle access to the per_info structure.
642 		 */
643 		addr -= (addr_t) &dummy32->regs.per_info;
644 		tmp = __peek_user_per_compat(child, addr);
645 
646 	} else
647 		tmp = 0;
648 
649 	return tmp;
650 }
651 
652 static int peek_user_compat(struct task_struct *child,
653 			    addr_t addr, addr_t data)
654 {
655 	__u32 tmp;
656 
657 	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
658 		return -EIO;
659 
660 	tmp = __peek_user_compat(child, addr);
661 	return put_user(tmp, (__u32 __user *) data);
662 }
663 
664 /*
665  * Same as poke_user_per but for a 31 bit program.
666  */
667 static inline void __poke_user_per_compat(struct task_struct *child,
668 					  addr_t addr, __u32 data)
669 {
670 	struct compat_per_struct_kernel *dummy32 = NULL;
671 
672 	if (addr == (addr_t) &dummy32->cr9)
673 		/* PER event mask of the user specified per set. */
674 		child->thread.per_user.control =
675 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
676 	else if (addr == (addr_t) &dummy32->starting_addr)
677 		/* Starting address of the user specified per set. */
678 		child->thread.per_user.start = data;
679 	else if (addr == (addr_t) &dummy32->ending_addr)
680 		/* Ending address of the user specified per set. */
681 		child->thread.per_user.end = data;
682 }
683 
684 /*
685  * Same as poke_user but for a 31 bit program.
686  */
687 static int __poke_user_compat(struct task_struct *child,
688 			      addr_t addr, addr_t data)
689 {
690 	struct compat_user *dummy32 = NULL;
691 	__u32 tmp = (__u32) data;
692 	addr_t offset;
693 
694 	if (addr < (addr_t) &dummy32->regs.acrs) {
695 		struct pt_regs *regs = task_pt_regs(child);
696 		/*
697 		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
698 		 */
699 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
700 			__u32 mask = PSW32_MASK_USER;
701 
702 			mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
703 			/* Build a 64 bit psw mask from 31 bit mask. */
704 			if ((tmp ^ PSW32_USER_BITS) & ~mask)
705 				/* Invalid psw mask. */
706 				return -EINVAL;
707 			if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
708 				/* Invalid address-space-control bits */
709 				return -EINVAL;
710 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
711 				(regs->psw.mask & PSW_MASK_BA) |
712 				(__u64)(tmp & mask) << 32;
713 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
714 			/* Build a 64 bit psw address from 31 bit address. */
715 			regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
716 			/* Transfer 31 bit amode bit to psw mask. */
717 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
718 				(__u64)(tmp & PSW32_ADDR_AMODE);
719 		} else {
720 			/* gpr 0-15 */
721 			*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
722 		}
723 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
724 		/*
725 		 * access registers are stored in the thread structure
726 		 */
727 		offset = addr - (addr_t) &dummy32->regs.acrs;
728 		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
729 
730 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
731 		/*
732 		 * orig_gpr2 is stored on the kernel stack
733 		 */
734 		*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
735 
736 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
737 		/*
738 		 * prevent writess of padding hole between
739 		 * orig_gpr2 and fp_regs on s390.
740 		 */
741 		return 0;
742 
743 	} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
744 		/*
745 		 * floating point control reg. is in the thread structure
746 		 */
747 		if (test_fp_ctl(tmp))
748 			return -EINVAL;
749 		child->thread.fpu.fpc = data;
750 
751 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
752 		/*
753 		 * floating point regs. are either in child->thread.fpu
754 		 * or the child->thread.fpu.vxrs array
755 		 */
756 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
757 		if (MACHINE_HAS_VX)
758 			*(__u32 *)((addr_t)
759 				child->thread.fpu.vxrs + 2*offset) = tmp;
760 		else
761 			*(__u32 *)((addr_t)
762 				child->thread.fpu.fprs + offset) = tmp;
763 
764 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
765 		/*
766 		 * Handle access to the per_info structure.
767 		 */
768 		addr -= (addr_t) &dummy32->regs.per_info;
769 		__poke_user_per_compat(child, addr, data);
770 	}
771 
772 	return 0;
773 }
774 
775 static int poke_user_compat(struct task_struct *child,
776 			    addr_t addr, addr_t data)
777 {
778 	if (!is_compat_task() || (addr & 3) ||
779 	    addr > sizeof(struct compat_user) - 3)
780 		return -EIO;
781 
782 	return __poke_user_compat(child, addr, data);
783 }
784 
785 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
786 			compat_ulong_t caddr, compat_ulong_t cdata)
787 {
788 	unsigned long addr = caddr;
789 	unsigned long data = cdata;
790 	compat_ptrace_area parea;
791 	int copied, ret;
792 
793 	switch (request) {
794 	case PTRACE_PEEKUSR:
795 		/* read the word at location addr in the USER area. */
796 		return peek_user_compat(child, addr, data);
797 
798 	case PTRACE_POKEUSR:
799 		/* write the word at location addr in the USER area */
800 		return poke_user_compat(child, addr, data);
801 
802 	case PTRACE_PEEKUSR_AREA:
803 	case PTRACE_POKEUSR_AREA:
804 		if (copy_from_user(&parea, (void __force __user *) addr,
805 							sizeof(parea)))
806 			return -EFAULT;
807 		addr = parea.kernel_addr;
808 		data = parea.process_addr;
809 		copied = 0;
810 		while (copied < parea.len) {
811 			if (request == PTRACE_PEEKUSR_AREA)
812 				ret = peek_user_compat(child, addr, data);
813 			else {
814 				__u32 utmp;
815 				if (get_user(utmp,
816 					     (__u32 __force __user *) data))
817 					return -EFAULT;
818 				ret = poke_user_compat(child, addr, utmp);
819 			}
820 			if (ret)
821 				return ret;
822 			addr += sizeof(unsigned int);
823 			data += sizeof(unsigned int);
824 			copied += sizeof(unsigned int);
825 		}
826 		return 0;
827 	case PTRACE_GET_LAST_BREAK:
828 		put_user(child->thread.last_break,
829 			 (unsigned int __user *) data);
830 		return 0;
831 	}
832 	return compat_ptrace_request(child, request, addr, data);
833 }
834 #endif
835 
836 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
837 {
838 	unsigned long mask = -1UL;
839 
840 	/*
841 	 * The sysc_tracesys code in entry.S stored the system
842 	 * call number to gprs[2].
843 	 */
844 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
845 	    (tracehook_report_syscall_entry(regs) ||
846 	     regs->gprs[2] >= NR_syscalls)) {
847 		/*
848 		 * Tracing decided this syscall should not happen or the
849 		 * debugger stored an invalid system call number. Skip
850 		 * the system call and the system call restart handling.
851 		 */
852 		clear_pt_regs_flag(regs, PIF_SYSCALL);
853 		return -1;
854 	}
855 
856 	/* Do the secure computing check after ptrace. */
857 	if (secure_computing(NULL)) {
858 		/* seccomp failures shouldn't expose any additional code. */
859 		return -1;
860 	}
861 
862 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
863 		trace_sys_enter(regs, regs->gprs[2]);
864 
865 	if (is_compat_task())
866 		mask = 0xffffffff;
867 
868 	audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
869 			    regs->gprs[3] &mask, regs->gprs[4] &mask,
870 			    regs->gprs[5] &mask);
871 
872 	return regs->gprs[2];
873 }
874 
875 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
876 {
877 	audit_syscall_exit(regs);
878 
879 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
880 		trace_sys_exit(regs, regs->gprs[2]);
881 
882 	if (test_thread_flag(TIF_SYSCALL_TRACE))
883 		tracehook_report_syscall_exit(regs, 0);
884 }
885 
886 /*
887  * user_regset definitions.
888  */
889 
890 static int s390_regs_get(struct task_struct *target,
891 			 const struct user_regset *regset,
892 			 unsigned int pos, unsigned int count,
893 			 void *kbuf, void __user *ubuf)
894 {
895 	if (target == current)
896 		save_access_regs(target->thread.acrs);
897 
898 	if (kbuf) {
899 		unsigned long *k = kbuf;
900 		while (count > 0) {
901 			*k++ = __peek_user(target, pos);
902 			count -= sizeof(*k);
903 			pos += sizeof(*k);
904 		}
905 	} else {
906 		unsigned long __user *u = ubuf;
907 		while (count > 0) {
908 			if (__put_user(__peek_user(target, pos), u++))
909 				return -EFAULT;
910 			count -= sizeof(*u);
911 			pos += sizeof(*u);
912 		}
913 	}
914 	return 0;
915 }
916 
917 static int s390_regs_set(struct task_struct *target,
918 			 const struct user_regset *regset,
919 			 unsigned int pos, unsigned int count,
920 			 const void *kbuf, const void __user *ubuf)
921 {
922 	int rc = 0;
923 
924 	if (target == current)
925 		save_access_regs(target->thread.acrs);
926 
927 	if (kbuf) {
928 		const unsigned long *k = kbuf;
929 		while (count > 0 && !rc) {
930 			rc = __poke_user(target, pos, *k++);
931 			count -= sizeof(*k);
932 			pos += sizeof(*k);
933 		}
934 	} else {
935 		const unsigned long  __user *u = ubuf;
936 		while (count > 0 && !rc) {
937 			unsigned long word;
938 			rc = __get_user(word, u++);
939 			if (rc)
940 				break;
941 			rc = __poke_user(target, pos, word);
942 			count -= sizeof(*u);
943 			pos += sizeof(*u);
944 		}
945 	}
946 
947 	if (rc == 0 && target == current)
948 		restore_access_regs(target->thread.acrs);
949 
950 	return rc;
951 }
952 
953 static int s390_fpregs_get(struct task_struct *target,
954 			   const struct user_regset *regset, unsigned int pos,
955 			   unsigned int count, void *kbuf, void __user *ubuf)
956 {
957 	_s390_fp_regs fp_regs;
958 
959 	if (target == current)
960 		save_fpu_regs();
961 
962 	fp_regs.fpc = target->thread.fpu.fpc;
963 	fpregs_store(&fp_regs, &target->thread.fpu);
964 
965 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
966 				   &fp_regs, 0, -1);
967 }
968 
969 static int s390_fpregs_set(struct task_struct *target,
970 			   const struct user_regset *regset, unsigned int pos,
971 			   unsigned int count, const void *kbuf,
972 			   const void __user *ubuf)
973 {
974 	int rc = 0;
975 	freg_t fprs[__NUM_FPRS];
976 
977 	if (target == current)
978 		save_fpu_regs();
979 
980 	if (MACHINE_HAS_VX)
981 		convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
982 	else
983 		memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
984 
985 	/* If setting FPC, must validate it first. */
986 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
987 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
988 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
989 					0, offsetof(s390_fp_regs, fprs));
990 		if (rc)
991 			return rc;
992 		if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
993 			return -EINVAL;
994 		target->thread.fpu.fpc = ufpc[0];
995 	}
996 
997 	if (rc == 0 && count > 0)
998 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
999 					fprs, offsetof(s390_fp_regs, fprs), -1);
1000 	if (rc)
1001 		return rc;
1002 
1003 	if (MACHINE_HAS_VX)
1004 		convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1005 	else
1006 		memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1007 
1008 	return rc;
1009 }
1010 
1011 static int s390_last_break_get(struct task_struct *target,
1012 			       const struct user_regset *regset,
1013 			       unsigned int pos, unsigned int count,
1014 			       void *kbuf, void __user *ubuf)
1015 {
1016 	if (count > 0) {
1017 		if (kbuf) {
1018 			unsigned long *k = kbuf;
1019 			*k = target->thread.last_break;
1020 		} else {
1021 			unsigned long  __user *u = ubuf;
1022 			if (__put_user(target->thread.last_break, u))
1023 				return -EFAULT;
1024 		}
1025 	}
1026 	return 0;
1027 }
1028 
1029 static int s390_last_break_set(struct task_struct *target,
1030 			       const struct user_regset *regset,
1031 			       unsigned int pos, unsigned int count,
1032 			       const void *kbuf, const void __user *ubuf)
1033 {
1034 	return 0;
1035 }
1036 
1037 static int s390_tdb_get(struct task_struct *target,
1038 			const struct user_regset *regset,
1039 			unsigned int pos, unsigned int count,
1040 			void *kbuf, void __user *ubuf)
1041 {
1042 	struct pt_regs *regs = task_pt_regs(target);
1043 	unsigned char *data;
1044 
1045 	if (!(regs->int_code & 0x200))
1046 		return -ENODATA;
1047 	data = target->thread.trap_tdb;
1048 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1049 }
1050 
1051 static int s390_tdb_set(struct task_struct *target,
1052 			const struct user_regset *regset,
1053 			unsigned int pos, unsigned int count,
1054 			const void *kbuf, const void __user *ubuf)
1055 {
1056 	return 0;
1057 }
1058 
1059 static int s390_vxrs_low_get(struct task_struct *target,
1060 			     const struct user_regset *regset,
1061 			     unsigned int pos, unsigned int count,
1062 			     void *kbuf, void __user *ubuf)
1063 {
1064 	__u64 vxrs[__NUM_VXRS_LOW];
1065 	int i;
1066 
1067 	if (!MACHINE_HAS_VX)
1068 		return -ENODEV;
1069 	if (target == current)
1070 		save_fpu_regs();
1071 	for (i = 0; i < __NUM_VXRS_LOW; i++)
1072 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1073 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1074 }
1075 
1076 static int s390_vxrs_low_set(struct task_struct *target,
1077 			     const struct user_regset *regset,
1078 			     unsigned int pos, unsigned int count,
1079 			     const void *kbuf, const void __user *ubuf)
1080 {
1081 	__u64 vxrs[__NUM_VXRS_LOW];
1082 	int i, rc;
1083 
1084 	if (!MACHINE_HAS_VX)
1085 		return -ENODEV;
1086 	if (target == current)
1087 		save_fpu_regs();
1088 
1089 	for (i = 0; i < __NUM_VXRS_LOW; i++)
1090 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1091 
1092 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1093 	if (rc == 0)
1094 		for (i = 0; i < __NUM_VXRS_LOW; i++)
1095 			*((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1096 
1097 	return rc;
1098 }
1099 
1100 static int s390_vxrs_high_get(struct task_struct *target,
1101 			      const struct user_regset *regset,
1102 			      unsigned int pos, unsigned int count,
1103 			      void *kbuf, void __user *ubuf)
1104 {
1105 	__vector128 vxrs[__NUM_VXRS_HIGH];
1106 
1107 	if (!MACHINE_HAS_VX)
1108 		return -ENODEV;
1109 	if (target == current)
1110 		save_fpu_regs();
1111 	memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1112 
1113 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1114 }
1115 
1116 static int s390_vxrs_high_set(struct task_struct *target,
1117 			      const struct user_regset *regset,
1118 			      unsigned int pos, unsigned int count,
1119 			      const void *kbuf, const void __user *ubuf)
1120 {
1121 	int rc;
1122 
1123 	if (!MACHINE_HAS_VX)
1124 		return -ENODEV;
1125 	if (target == current)
1126 		save_fpu_regs();
1127 
1128 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1129 				target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1130 	return rc;
1131 }
1132 
1133 static int s390_system_call_get(struct task_struct *target,
1134 				const struct user_regset *regset,
1135 				unsigned int pos, unsigned int count,
1136 				void *kbuf, void __user *ubuf)
1137 {
1138 	unsigned int *data = &target->thread.system_call;
1139 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1140 				   data, 0, sizeof(unsigned int));
1141 }
1142 
1143 static int s390_system_call_set(struct task_struct *target,
1144 				const struct user_regset *regset,
1145 				unsigned int pos, unsigned int count,
1146 				const void *kbuf, const void __user *ubuf)
1147 {
1148 	unsigned int *data = &target->thread.system_call;
1149 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1150 				  data, 0, sizeof(unsigned int));
1151 }
1152 
1153 static int s390_gs_cb_get(struct task_struct *target,
1154 			  const struct user_regset *regset,
1155 			  unsigned int pos, unsigned int count,
1156 			  void *kbuf, void __user *ubuf)
1157 {
1158 	struct gs_cb *data = target->thread.gs_cb;
1159 
1160 	if (!MACHINE_HAS_GS)
1161 		return -ENODEV;
1162 	if (!data)
1163 		return -ENODATA;
1164 	if (target == current)
1165 		save_gs_cb(data);
1166 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1167 				   data, 0, sizeof(struct gs_cb));
1168 }
1169 
1170 static int s390_gs_cb_set(struct task_struct *target,
1171 			  const struct user_regset *regset,
1172 			  unsigned int pos, unsigned int count,
1173 			  const void *kbuf, const void __user *ubuf)
1174 {
1175 	struct gs_cb *data = target->thread.gs_cb;
1176 	int rc;
1177 
1178 	if (!MACHINE_HAS_GS)
1179 		return -ENODEV;
1180 	if (!data) {
1181 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1182 		if (!data)
1183 			return -ENOMEM;
1184 		data->gsd = 25;
1185 		target->thread.gs_cb = data;
1186 		if (target == current)
1187 			__ctl_set_bit(2, 4);
1188 	} else if (target == current) {
1189 		save_gs_cb(data);
1190 	}
1191 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1192 				data, 0, sizeof(struct gs_cb));
1193 	if (target == current)
1194 		restore_gs_cb(data);
1195 	return rc;
1196 }
1197 
1198 static int s390_gs_bc_get(struct task_struct *target,
1199 			  const struct user_regset *regset,
1200 			  unsigned int pos, unsigned int count,
1201 			  void *kbuf, void __user *ubuf)
1202 {
1203 	struct gs_cb *data = target->thread.gs_bc_cb;
1204 
1205 	if (!MACHINE_HAS_GS)
1206 		return -ENODEV;
1207 	if (!data)
1208 		return -ENODATA;
1209 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1210 				   data, 0, sizeof(struct gs_cb));
1211 }
1212 
1213 static int s390_gs_bc_set(struct task_struct *target,
1214 			  const struct user_regset *regset,
1215 			  unsigned int pos, unsigned int count,
1216 			  const void *kbuf, const void __user *ubuf)
1217 {
1218 	struct gs_cb *data = target->thread.gs_bc_cb;
1219 
1220 	if (!MACHINE_HAS_GS)
1221 		return -ENODEV;
1222 	if (!data) {
1223 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1224 		if (!data)
1225 			return -ENOMEM;
1226 		target->thread.gs_bc_cb = data;
1227 	}
1228 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1229 				  data, 0, sizeof(struct gs_cb));
1230 }
1231 
1232 static const struct user_regset s390_regsets[] = {
1233 	{
1234 		.core_note_type = NT_PRSTATUS,
1235 		.n = sizeof(s390_regs) / sizeof(long),
1236 		.size = sizeof(long),
1237 		.align = sizeof(long),
1238 		.get = s390_regs_get,
1239 		.set = s390_regs_set,
1240 	},
1241 	{
1242 		.core_note_type = NT_PRFPREG,
1243 		.n = sizeof(s390_fp_regs) / sizeof(long),
1244 		.size = sizeof(long),
1245 		.align = sizeof(long),
1246 		.get = s390_fpregs_get,
1247 		.set = s390_fpregs_set,
1248 	},
1249 	{
1250 		.core_note_type = NT_S390_SYSTEM_CALL,
1251 		.n = 1,
1252 		.size = sizeof(unsigned int),
1253 		.align = sizeof(unsigned int),
1254 		.get = s390_system_call_get,
1255 		.set = s390_system_call_set,
1256 	},
1257 	{
1258 		.core_note_type = NT_S390_LAST_BREAK,
1259 		.n = 1,
1260 		.size = sizeof(long),
1261 		.align = sizeof(long),
1262 		.get = s390_last_break_get,
1263 		.set = s390_last_break_set,
1264 	},
1265 	{
1266 		.core_note_type = NT_S390_TDB,
1267 		.n = 1,
1268 		.size = 256,
1269 		.align = 1,
1270 		.get = s390_tdb_get,
1271 		.set = s390_tdb_set,
1272 	},
1273 	{
1274 		.core_note_type = NT_S390_VXRS_LOW,
1275 		.n = __NUM_VXRS_LOW,
1276 		.size = sizeof(__u64),
1277 		.align = sizeof(__u64),
1278 		.get = s390_vxrs_low_get,
1279 		.set = s390_vxrs_low_set,
1280 	},
1281 	{
1282 		.core_note_type = NT_S390_VXRS_HIGH,
1283 		.n = __NUM_VXRS_HIGH,
1284 		.size = sizeof(__vector128),
1285 		.align = sizeof(__vector128),
1286 		.get = s390_vxrs_high_get,
1287 		.set = s390_vxrs_high_set,
1288 	},
1289 	{
1290 		.core_note_type = NT_S390_GS_CB,
1291 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1292 		.size = sizeof(__u64),
1293 		.align = sizeof(__u64),
1294 		.get = s390_gs_cb_get,
1295 		.set = s390_gs_cb_set,
1296 	},
1297 	{
1298 		.core_note_type = NT_S390_GS_BC,
1299 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1300 		.size = sizeof(__u64),
1301 		.align = sizeof(__u64),
1302 		.get = s390_gs_bc_get,
1303 		.set = s390_gs_bc_set,
1304 	},
1305 };
1306 
1307 static const struct user_regset_view user_s390_view = {
1308 	.name = UTS_MACHINE,
1309 	.e_machine = EM_S390,
1310 	.regsets = s390_regsets,
1311 	.n = ARRAY_SIZE(s390_regsets)
1312 };
1313 
1314 #ifdef CONFIG_COMPAT
1315 static int s390_compat_regs_get(struct task_struct *target,
1316 				const struct user_regset *regset,
1317 				unsigned int pos, unsigned int count,
1318 				void *kbuf, void __user *ubuf)
1319 {
1320 	if (target == current)
1321 		save_access_regs(target->thread.acrs);
1322 
1323 	if (kbuf) {
1324 		compat_ulong_t *k = kbuf;
1325 		while (count > 0) {
1326 			*k++ = __peek_user_compat(target, pos);
1327 			count -= sizeof(*k);
1328 			pos += sizeof(*k);
1329 		}
1330 	} else {
1331 		compat_ulong_t __user *u = ubuf;
1332 		while (count > 0) {
1333 			if (__put_user(__peek_user_compat(target, pos), u++))
1334 				return -EFAULT;
1335 			count -= sizeof(*u);
1336 			pos += sizeof(*u);
1337 		}
1338 	}
1339 	return 0;
1340 }
1341 
1342 static int s390_compat_regs_set(struct task_struct *target,
1343 				const struct user_regset *regset,
1344 				unsigned int pos, unsigned int count,
1345 				const void *kbuf, const void __user *ubuf)
1346 {
1347 	int rc = 0;
1348 
1349 	if (target == current)
1350 		save_access_regs(target->thread.acrs);
1351 
1352 	if (kbuf) {
1353 		const compat_ulong_t *k = kbuf;
1354 		while (count > 0 && !rc) {
1355 			rc = __poke_user_compat(target, pos, *k++);
1356 			count -= sizeof(*k);
1357 			pos += sizeof(*k);
1358 		}
1359 	} else {
1360 		const compat_ulong_t  __user *u = ubuf;
1361 		while (count > 0 && !rc) {
1362 			compat_ulong_t word;
1363 			rc = __get_user(word, u++);
1364 			if (rc)
1365 				break;
1366 			rc = __poke_user_compat(target, pos, word);
1367 			count -= sizeof(*u);
1368 			pos += sizeof(*u);
1369 		}
1370 	}
1371 
1372 	if (rc == 0 && target == current)
1373 		restore_access_regs(target->thread.acrs);
1374 
1375 	return rc;
1376 }
1377 
1378 static int s390_compat_regs_high_get(struct task_struct *target,
1379 				     const struct user_regset *regset,
1380 				     unsigned int pos, unsigned int count,
1381 				     void *kbuf, void __user *ubuf)
1382 {
1383 	compat_ulong_t *gprs_high;
1384 
1385 	gprs_high = (compat_ulong_t *)
1386 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1387 	if (kbuf) {
1388 		compat_ulong_t *k = kbuf;
1389 		while (count > 0) {
1390 			*k++ = *gprs_high;
1391 			gprs_high += 2;
1392 			count -= sizeof(*k);
1393 		}
1394 	} else {
1395 		compat_ulong_t __user *u = ubuf;
1396 		while (count > 0) {
1397 			if (__put_user(*gprs_high, u++))
1398 				return -EFAULT;
1399 			gprs_high += 2;
1400 			count -= sizeof(*u);
1401 		}
1402 	}
1403 	return 0;
1404 }
1405 
1406 static int s390_compat_regs_high_set(struct task_struct *target,
1407 				     const struct user_regset *regset,
1408 				     unsigned int pos, unsigned int count,
1409 				     const void *kbuf, const void __user *ubuf)
1410 {
1411 	compat_ulong_t *gprs_high;
1412 	int rc = 0;
1413 
1414 	gprs_high = (compat_ulong_t *)
1415 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1416 	if (kbuf) {
1417 		const compat_ulong_t *k = kbuf;
1418 		while (count > 0) {
1419 			*gprs_high = *k++;
1420 			*gprs_high += 2;
1421 			count -= sizeof(*k);
1422 		}
1423 	} else {
1424 		const compat_ulong_t  __user *u = ubuf;
1425 		while (count > 0 && !rc) {
1426 			unsigned long word;
1427 			rc = __get_user(word, u++);
1428 			if (rc)
1429 				break;
1430 			*gprs_high = word;
1431 			*gprs_high += 2;
1432 			count -= sizeof(*u);
1433 		}
1434 	}
1435 
1436 	return rc;
1437 }
1438 
1439 static int s390_compat_last_break_get(struct task_struct *target,
1440 				      const struct user_regset *regset,
1441 				      unsigned int pos, unsigned int count,
1442 				      void *kbuf, void __user *ubuf)
1443 {
1444 	compat_ulong_t last_break;
1445 
1446 	if (count > 0) {
1447 		last_break = target->thread.last_break;
1448 		if (kbuf) {
1449 			unsigned long *k = kbuf;
1450 			*k = last_break;
1451 		} else {
1452 			unsigned long  __user *u = ubuf;
1453 			if (__put_user(last_break, u))
1454 				return -EFAULT;
1455 		}
1456 	}
1457 	return 0;
1458 }
1459 
1460 static int s390_compat_last_break_set(struct task_struct *target,
1461 				      const struct user_regset *regset,
1462 				      unsigned int pos, unsigned int count,
1463 				      const void *kbuf, const void __user *ubuf)
1464 {
1465 	return 0;
1466 }
1467 
1468 static const struct user_regset s390_compat_regsets[] = {
1469 	{
1470 		.core_note_type = NT_PRSTATUS,
1471 		.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1472 		.size = sizeof(compat_long_t),
1473 		.align = sizeof(compat_long_t),
1474 		.get = s390_compat_regs_get,
1475 		.set = s390_compat_regs_set,
1476 	},
1477 	{
1478 		.core_note_type = NT_PRFPREG,
1479 		.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1480 		.size = sizeof(compat_long_t),
1481 		.align = sizeof(compat_long_t),
1482 		.get = s390_fpregs_get,
1483 		.set = s390_fpregs_set,
1484 	},
1485 	{
1486 		.core_note_type = NT_S390_SYSTEM_CALL,
1487 		.n = 1,
1488 		.size = sizeof(compat_uint_t),
1489 		.align = sizeof(compat_uint_t),
1490 		.get = s390_system_call_get,
1491 		.set = s390_system_call_set,
1492 	},
1493 	{
1494 		.core_note_type = NT_S390_LAST_BREAK,
1495 		.n = 1,
1496 		.size = sizeof(long),
1497 		.align = sizeof(long),
1498 		.get = s390_compat_last_break_get,
1499 		.set = s390_compat_last_break_set,
1500 	},
1501 	{
1502 		.core_note_type = NT_S390_TDB,
1503 		.n = 1,
1504 		.size = 256,
1505 		.align = 1,
1506 		.get = s390_tdb_get,
1507 		.set = s390_tdb_set,
1508 	},
1509 	{
1510 		.core_note_type = NT_S390_VXRS_LOW,
1511 		.n = __NUM_VXRS_LOW,
1512 		.size = sizeof(__u64),
1513 		.align = sizeof(__u64),
1514 		.get = s390_vxrs_low_get,
1515 		.set = s390_vxrs_low_set,
1516 	},
1517 	{
1518 		.core_note_type = NT_S390_VXRS_HIGH,
1519 		.n = __NUM_VXRS_HIGH,
1520 		.size = sizeof(__vector128),
1521 		.align = sizeof(__vector128),
1522 		.get = s390_vxrs_high_get,
1523 		.set = s390_vxrs_high_set,
1524 	},
1525 	{
1526 		.core_note_type = NT_S390_HIGH_GPRS,
1527 		.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1528 		.size = sizeof(compat_long_t),
1529 		.align = sizeof(compat_long_t),
1530 		.get = s390_compat_regs_high_get,
1531 		.set = s390_compat_regs_high_set,
1532 	},
1533 	{
1534 		.core_note_type = NT_S390_GS_CB,
1535 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1536 		.size = sizeof(__u64),
1537 		.align = sizeof(__u64),
1538 		.get = s390_gs_cb_get,
1539 		.set = s390_gs_cb_set,
1540 	},
1541 };
1542 
1543 static const struct user_regset_view user_s390_compat_view = {
1544 	.name = "s390",
1545 	.e_machine = EM_S390,
1546 	.regsets = s390_compat_regsets,
1547 	.n = ARRAY_SIZE(s390_compat_regsets)
1548 };
1549 #endif
1550 
1551 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1552 {
1553 #ifdef CONFIG_COMPAT
1554 	if (test_tsk_thread_flag(task, TIF_31BIT))
1555 		return &user_s390_compat_view;
1556 #endif
1557 	return &user_s390_view;
1558 }
1559 
1560 static const char *gpr_names[NUM_GPRS] = {
1561 	"r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1562 	"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1563 };
1564 
1565 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1566 {
1567 	if (offset >= NUM_GPRS)
1568 		return 0;
1569 	return regs->gprs[offset];
1570 }
1571 
1572 int regs_query_register_offset(const char *name)
1573 {
1574 	unsigned long offset;
1575 
1576 	if (!name || *name != 'r')
1577 		return -EINVAL;
1578 	if (kstrtoul(name + 1, 10, &offset))
1579 		return -EINVAL;
1580 	if (offset >= NUM_GPRS)
1581 		return -EINVAL;
1582 	return offset;
1583 }
1584 
1585 const char *regs_query_register_name(unsigned int offset)
1586 {
1587 	if (offset >= NUM_GPRS)
1588 		return NULL;
1589 	return gpr_names[offset];
1590 }
1591 
1592 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1593 {
1594 	unsigned long ksp = kernel_stack_pointer(regs);
1595 
1596 	return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1597 }
1598 
1599 /**
1600  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1601  * @regs:pt_regs which contains kernel stack pointer.
1602  * @n:stack entry number.
1603  *
1604  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1605  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1606  * this returns 0.
1607  */
1608 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1609 {
1610 	unsigned long addr;
1611 
1612 	addr = kernel_stack_pointer(regs) + n * sizeof(long);
1613 	if (!regs_within_kernel_stack(regs, addr))
1614 		return 0;
1615 	return *(unsigned long *)addr;
1616 }
1617