xref: /linux/arch/s390/kernel/kprobes.c (revision b3b77c8caef1750ebeea1054e39e358550ea9f55)
1 /*
2  *  Kernel Probes (KProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2002, 2006
19  *
20  * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21  */
22 
23 #include <linux/kprobes.h>
24 #include <linux/ptrace.h>
25 #include <linux/preempt.h>
26 #include <linux/stop_machine.h>
27 #include <linux/kdebug.h>
28 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
30 #include <asm/sections.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 
34 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
35 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
36 
37 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
38 
39 int __kprobes arch_prepare_kprobe(struct kprobe *p)
40 {
41 	/* Make sure the probe isn't going on a difficult instruction */
42 	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
43 		return -EINVAL;
44 
45 	if ((unsigned long)p->addr & 0x01)
46 		return -EINVAL;
47 
48 	/* Use the get_insn_slot() facility for correctness */
49 	if (!(p->ainsn.insn = get_insn_slot()))
50 		return -ENOMEM;
51 
52 	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
53 
54 	get_instruction_type(&p->ainsn);
55 	p->opcode = *p->addr;
56 	return 0;
57 }
58 
59 int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
60 {
61 	switch (*(__u8 *) instruction) {
62 	case 0x0c:	/* bassm */
63 	case 0x0b:	/* bsm	 */
64 	case 0x83:	/* diag  */
65 	case 0x44:	/* ex	 */
66 		return -EINVAL;
67 	}
68 	switch (*(__u16 *) instruction) {
69 	case 0x0101:	/* pr	 */
70 	case 0xb25a:	/* bsa	 */
71 	case 0xb240:	/* bakr  */
72 	case 0xb258:	/* bsg	 */
73 	case 0xb218:	/* pc	 */
74 	case 0xb228:	/* pt	 */
75 		return -EINVAL;
76 	}
77 	return 0;
78 }
79 
80 void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
81 {
82 	/* default fixup method */
83 	ainsn->fixup = FIXUP_PSW_NORMAL;
84 
85 	/* save r1 operand */
86 	ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
87 
88 	/* save the instruction length (pop 5-5) in bytes */
89 	switch (*(__u8 *) (ainsn->insn) >> 6) {
90 	case 0:
91 		ainsn->ilen = 2;
92 		break;
93 	case 1:
94 	case 2:
95 		ainsn->ilen = 4;
96 		break;
97 	case 3:
98 		ainsn->ilen = 6;
99 		break;
100 	}
101 
102 	switch (*(__u8 *) ainsn->insn) {
103 	case 0x05:	/* balr	*/
104 	case 0x0d:	/* basr */
105 		ainsn->fixup = FIXUP_RETURN_REGISTER;
106 		/* if r2 = 0, no branch will be taken */
107 		if ((*ainsn->insn & 0x0f) == 0)
108 			ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
109 		break;
110 	case 0x06:	/* bctr	*/
111 	case 0x07:	/* bcr	*/
112 		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
113 		break;
114 	case 0x45:	/* bal	*/
115 	case 0x4d:	/* bas	*/
116 		ainsn->fixup = FIXUP_RETURN_REGISTER;
117 		break;
118 	case 0x47:	/* bc	*/
119 	case 0x46:	/* bct	*/
120 	case 0x86:	/* bxh	*/
121 	case 0x87:	/* bxle	*/
122 		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
123 		break;
124 	case 0x82:	/* lpsw	*/
125 		ainsn->fixup = FIXUP_NOT_REQUIRED;
126 		break;
127 	case 0xb2:	/* lpswe */
128 		if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
129 			ainsn->fixup = FIXUP_NOT_REQUIRED;
130 		}
131 		break;
132 	case 0xa7:	/* bras	*/
133 		if ((*ainsn->insn & 0x0f) == 0x05) {
134 			ainsn->fixup |= FIXUP_RETURN_REGISTER;
135 		}
136 		break;
137 	case 0xc0:
138 		if ((*ainsn->insn & 0x0f) == 0x00  /* larl  */
139 			|| (*ainsn->insn & 0x0f) == 0x05) /* brasl */
140 		ainsn->fixup |= FIXUP_RETURN_REGISTER;
141 		break;
142 	case 0xeb:
143 		if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 ||	/* bxhg  */
144 			*(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
145 			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
146 		}
147 		break;
148 	case 0xe3:	/* bctg	*/
149 		if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
150 			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
151 		}
152 		break;
153 	}
154 }
155 
156 static int __kprobes swap_instruction(void *aref)
157 {
158 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
159 	unsigned long status = kcb->kprobe_status;
160 	struct ins_replace_args *args = aref;
161 	int rc;
162 
163 	kcb->kprobe_status = KPROBE_SWAP_INST;
164 	rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
165 	kcb->kprobe_status = status;
166 	return rc;
167 }
168 
169 void __kprobes arch_arm_kprobe(struct kprobe *p)
170 {
171 	struct ins_replace_args args;
172 
173 	args.ptr = p->addr;
174 	args.old = p->opcode;
175 	args.new = BREAKPOINT_INSTRUCTION;
176 	stop_machine(swap_instruction, &args, NULL);
177 }
178 
179 void __kprobes arch_disarm_kprobe(struct kprobe *p)
180 {
181 	struct ins_replace_args args;
182 
183 	args.ptr = p->addr;
184 	args.old = BREAKPOINT_INSTRUCTION;
185 	args.new = p->opcode;
186 	stop_machine(swap_instruction, &args, NULL);
187 }
188 
189 void __kprobes arch_remove_kprobe(struct kprobe *p)
190 {
191 	if (p->ainsn.insn) {
192 		free_insn_slot(p->ainsn.insn, 0);
193 		p->ainsn.insn = NULL;
194 	}
195 }
196 
197 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
198 {
199 	per_cr_bits kprobe_per_regs[1];
200 
201 	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
202 	regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
203 
204 	/* Set up the per control reg info, will pass to lctl */
205 	kprobe_per_regs[0].em_instruction_fetch = 1;
206 	kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
207 	kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
208 
209 	/* Set the PER control regs, turns on single step for this address */
210 	__ctl_load(kprobe_per_regs, 9, 11);
211 	regs->psw.mask |= PSW_MASK_PER;
212 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
213 }
214 
215 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
216 {
217 	kcb->prev_kprobe.kp = kprobe_running();
218 	kcb->prev_kprobe.status = kcb->kprobe_status;
219 	kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
220 	memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
221 					sizeof(kcb->kprobe_saved_ctl));
222 }
223 
224 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
225 {
226 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
227 	kcb->kprobe_status = kcb->prev_kprobe.status;
228 	kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
229 	memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
230 					sizeof(kcb->kprobe_saved_ctl));
231 }
232 
233 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
234 						struct kprobe_ctlblk *kcb)
235 {
236 	__get_cpu_var(current_kprobe) = p;
237 	/* Save the interrupt and per flags */
238 	kcb->kprobe_saved_imask = regs->psw.mask &
239 	    (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
240 	/* Save the control regs that govern PER */
241 	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
242 }
243 
244 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
245 					struct pt_regs *regs)
246 {
247 	ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
248 
249 	/* Replace the return addr with trampoline addr */
250 	regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
251 }
252 
253 static int __kprobes kprobe_handler(struct pt_regs *regs)
254 {
255 	struct kprobe *p;
256 	int ret = 0;
257 	unsigned long *addr = (unsigned long *)
258 		((regs->psw.addr & PSW_ADDR_INSN) - 2);
259 	struct kprobe_ctlblk *kcb;
260 
261 	/*
262 	 * We don't want to be preempted for the entire
263 	 * duration of kprobe processing
264 	 */
265 	preempt_disable();
266 	kcb = get_kprobe_ctlblk();
267 
268 	/* Check we're not actually recursing */
269 	if (kprobe_running()) {
270 		p = get_kprobe(addr);
271 		if (p) {
272 			if (kcb->kprobe_status == KPROBE_HIT_SS &&
273 			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
274 				regs->psw.mask &= ~PSW_MASK_PER;
275 				regs->psw.mask |= kcb->kprobe_saved_imask;
276 				goto no_kprobe;
277 			}
278 			/* We have reentered the kprobe_handler(), since
279 			 * another probe was hit while within the handler.
280 			 * We here save the original kprobes variables and
281 			 * just single step on the instruction of the new probe
282 			 * without calling any user handlers.
283 			 */
284 			save_previous_kprobe(kcb);
285 			set_current_kprobe(p, regs, kcb);
286 			kprobes_inc_nmissed_count(p);
287 			prepare_singlestep(p, regs);
288 			kcb->kprobe_status = KPROBE_REENTER;
289 			return 1;
290 		} else {
291 			p = __get_cpu_var(current_kprobe);
292 			if (p->break_handler && p->break_handler(p, regs)) {
293 				goto ss_probe;
294 			}
295 		}
296 		goto no_kprobe;
297 	}
298 
299 	p = get_kprobe(addr);
300 	if (!p)
301 		/*
302 		 * No kprobe at this address. The fault has not been
303 		 * caused by a kprobe breakpoint. The race of breakpoint
304 		 * vs. kprobe remove does not exist because on s390 we
305 		 * use stop_machine to arm/disarm the breakpoints.
306 		 */
307 		goto no_kprobe;
308 
309 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
310 	set_current_kprobe(p, regs, kcb);
311 	if (p->pre_handler && p->pre_handler(p, regs))
312 		/* handler has already set things up, so skip ss setup */
313 		return 1;
314 
315 ss_probe:
316 	prepare_singlestep(p, regs);
317 	kcb->kprobe_status = KPROBE_HIT_SS;
318 	return 1;
319 
320 no_kprobe:
321 	preempt_enable_no_resched();
322 	return ret;
323 }
324 
325 /*
326  * Function return probe trampoline:
327  *	- init_kprobes() establishes a probepoint here
328  *	- When the probed function returns, this probe
329  *		causes the handlers to fire
330  */
331 static void __used kretprobe_trampoline_holder(void)
332 {
333 	asm volatile(".global kretprobe_trampoline\n"
334 		     "kretprobe_trampoline: bcr 0,0\n");
335 }
336 
337 /*
338  * Called when the probe at kretprobe trampoline is hit
339  */
340 static int __kprobes trampoline_probe_handler(struct kprobe *p,
341 					      struct pt_regs *regs)
342 {
343 	struct kretprobe_instance *ri = NULL;
344 	struct hlist_head *head, empty_rp;
345 	struct hlist_node *node, *tmp;
346 	unsigned long flags, orig_ret_address = 0;
347 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
348 
349 	INIT_HLIST_HEAD(&empty_rp);
350 	kretprobe_hash_lock(current, &head, &flags);
351 
352 	/*
353 	 * It is possible to have multiple instances associated with a given
354 	 * task either because an multiple functions in the call path
355 	 * have a return probe installed on them, and/or more than one return
356 	 * return probe was registered for a target function.
357 	 *
358 	 * We can handle this because:
359 	 *     - instances are always inserted at the head of the list
360 	 *     - when multiple return probes are registered for the same
361 	 *	 function, the first instance's ret_addr will point to the
362 	 *	 real return address, and all the rest will point to
363 	 *	 kretprobe_trampoline
364 	 */
365 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
366 		if (ri->task != current)
367 			/* another task is sharing our hash bucket */
368 			continue;
369 
370 		if (ri->rp && ri->rp->handler)
371 			ri->rp->handler(ri, regs);
372 
373 		orig_ret_address = (unsigned long)ri->ret_addr;
374 		recycle_rp_inst(ri, &empty_rp);
375 
376 		if (orig_ret_address != trampoline_address) {
377 			/*
378 			 * This is the real return address. Any other
379 			 * instances associated with this task are for
380 			 * other calls deeper on the call stack
381 			 */
382 			break;
383 		}
384 	}
385 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
386 	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
387 
388 	reset_current_kprobe();
389 	kretprobe_hash_unlock(current, &flags);
390 	preempt_enable_no_resched();
391 
392 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
393 		hlist_del(&ri->hlist);
394 		kfree(ri);
395 	}
396 	/*
397 	 * By returning a non-zero value, we are telling
398 	 * kprobe_handler() that we don't want the post_handler
399 	 * to run (and have re-enabled preemption)
400 	 */
401 	return 1;
402 }
403 
404 /*
405  * Called after single-stepping.  p->addr is the address of the
406  * instruction whose first byte has been replaced by the "breakpoint"
407  * instruction.  To avoid the SMP problems that can occur when we
408  * temporarily put back the original opcode to single-step, we
409  * single-stepped a copy of the instruction.  The address of this
410  * copy is p->ainsn.insn.
411  */
412 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
413 {
414 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
415 
416 	regs->psw.addr &= PSW_ADDR_INSN;
417 
418 	if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
419 		regs->psw.addr = (unsigned long)p->addr +
420 				((unsigned long)regs->psw.addr -
421 				 (unsigned long)p->ainsn.insn);
422 
423 	if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
424 		if ((unsigned long)regs->psw.addr -
425 		    (unsigned long)p->ainsn.insn == p->ainsn.ilen)
426 			regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
427 
428 	if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
429 		regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
430 						(regs->gprs[p->ainsn.reg] -
431 						(unsigned long)p->ainsn.insn))
432 						| PSW_ADDR_AMODE;
433 
434 	regs->psw.addr |= PSW_ADDR_AMODE;
435 	/* turn off PER mode */
436 	regs->psw.mask &= ~PSW_MASK_PER;
437 	/* Restore the original per control regs */
438 	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
439 	regs->psw.mask |= kcb->kprobe_saved_imask;
440 }
441 
442 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
443 {
444 	struct kprobe *cur = kprobe_running();
445 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
446 
447 	if (!cur)
448 		return 0;
449 
450 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
451 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
452 		cur->post_handler(cur, regs, 0);
453 	}
454 
455 	resume_execution(cur, regs);
456 
457 	/*Restore back the original saved kprobes variables and continue. */
458 	if (kcb->kprobe_status == KPROBE_REENTER) {
459 		restore_previous_kprobe(kcb);
460 		goto out;
461 	}
462 	reset_current_kprobe();
463 out:
464 	preempt_enable_no_resched();
465 
466 	/*
467 	 * if somebody else is singlestepping across a probe point, psw mask
468 	 * will have PER set, in which case, continue the remaining processing
469 	 * of do_single_step, as if this is not a probe hit.
470 	 */
471 	if (regs->psw.mask & PSW_MASK_PER) {
472 		return 0;
473 	}
474 
475 	return 1;
476 }
477 
478 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
479 {
480 	struct kprobe *cur = kprobe_running();
481 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
482 	const struct exception_table_entry *entry;
483 
484 	switch(kcb->kprobe_status) {
485 	case KPROBE_SWAP_INST:
486 		/* We are here because the instruction replacement failed */
487 		return 0;
488 	case KPROBE_HIT_SS:
489 	case KPROBE_REENTER:
490 		/*
491 		 * We are here because the instruction being single
492 		 * stepped caused a page fault. We reset the current
493 		 * kprobe and the nip points back to the probe address
494 		 * and allow the page fault handler to continue as a
495 		 * normal page fault.
496 		 */
497 		regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
498 		regs->psw.mask &= ~PSW_MASK_PER;
499 		regs->psw.mask |= kcb->kprobe_saved_imask;
500 		if (kcb->kprobe_status == KPROBE_REENTER)
501 			restore_previous_kprobe(kcb);
502 		else
503 			reset_current_kprobe();
504 		preempt_enable_no_resched();
505 		break;
506 	case KPROBE_HIT_ACTIVE:
507 	case KPROBE_HIT_SSDONE:
508 		/*
509 		 * We increment the nmissed count for accounting,
510 		 * we can also use npre/npostfault count for accouting
511 		 * these specific fault cases.
512 		 */
513 		kprobes_inc_nmissed_count(cur);
514 
515 		/*
516 		 * We come here because instructions in the pre/post
517 		 * handler caused the page_fault, this could happen
518 		 * if handler tries to access user space by
519 		 * copy_from_user(), get_user() etc. Let the
520 		 * user-specified handler try to fix it first.
521 		 */
522 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
523 			return 1;
524 
525 		/*
526 		 * In case the user-specified fault handler returned
527 		 * zero, try to fix up.
528 		 */
529 		entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
530 		if (entry) {
531 			regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
532 			return 1;
533 		}
534 
535 		/*
536 		 * fixup_exception() could not handle it,
537 		 * Let do_page_fault() fix it.
538 		 */
539 		break;
540 	default:
541 		break;
542 	}
543 	return 0;
544 }
545 
546 /*
547  * Wrapper routine to for handling exceptions.
548  */
549 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
550 				       unsigned long val, void *data)
551 {
552 	struct die_args *args = (struct die_args *)data;
553 	int ret = NOTIFY_DONE;
554 
555 	switch (val) {
556 	case DIE_BPT:
557 		if (kprobe_handler(args->regs))
558 			ret = NOTIFY_STOP;
559 		break;
560 	case DIE_SSTEP:
561 		if (post_kprobe_handler(args->regs))
562 			ret = NOTIFY_STOP;
563 		break;
564 	case DIE_TRAP:
565 		/* kprobe_running() needs smp_processor_id() */
566 		preempt_disable();
567 		if (kprobe_running() &&
568 		    kprobe_fault_handler(args->regs, args->trapnr))
569 			ret = NOTIFY_STOP;
570 		preempt_enable();
571 		break;
572 	default:
573 		break;
574 	}
575 	return ret;
576 }
577 
578 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
579 {
580 	struct jprobe *jp = container_of(p, struct jprobe, kp);
581 	unsigned long addr;
582 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
583 
584 	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
585 
586 	/* setup return addr to the jprobe handler routine */
587 	regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
588 
589 	/* r14 is the function return address */
590 	kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
591 	/* r15 is the stack pointer */
592 	kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
593 	addr = (unsigned long)kcb->jprobe_saved_r15;
594 
595 	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
596 	       MIN_STACK_SIZE(addr));
597 	return 1;
598 }
599 
600 void __kprobes jprobe_return(void)
601 {
602 	asm volatile(".word 0x0002");
603 }
604 
605 void __kprobes jprobe_return_end(void)
606 {
607 	asm volatile("bcr 0,0");
608 }
609 
610 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
611 {
612 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
613 	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
614 
615 	/* Put the regs back */
616 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
617 	/* put the stack back */
618 	memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
619 	       MIN_STACK_SIZE(stack_addr));
620 	preempt_enable_no_resched();
621 	return 1;
622 }
623 
624 static struct kprobe trampoline_p = {
625 	.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
626 	.pre_handler = trampoline_probe_handler
627 };
628 
629 int __init arch_init_kprobes(void)
630 {
631 	return register_kprobe(&trampoline_p);
632 }
633 
634 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
635 {
636 	if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline)
637 		return 1;
638 	return 0;
639 }
640