xref: /linux/arch/x86/kernel/process_64.c (revision 308d3165d8b2b98d3dc3d97d6662062735daea67)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  *
7  *  X86-64 port
8  *	Andi Kleen.
9  *
10  *	CPU hotplug support - ashok.raj@intel.com
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of process handling..
15  */
16 
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/ftrace.h>
38 
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
44 #include <asm/desc.h>
45 #include <asm/proto.h>
46 #include <asm/ia32.h>
47 #include <asm/syscalls.h>
48 #include <asm/debugreg.h>
49 #include <asm/switch_to.h>
50 #include <asm/xen/hypervisor.h>
51 #include <asm/vdso.h>
52 
53 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
54 
55 /* Prints also some state that isn't saved in the pt_regs */
56 void __show_regs(struct pt_regs *regs, int all)
57 {
58 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
59 	unsigned long d0, d1, d2, d3, d6, d7;
60 	unsigned int fsindex, gsindex;
61 	unsigned int ds, cs, es;
62 
63 	printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs & 0xffff,
64 		(void *)regs->ip);
65 	printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
66 		regs->sp, regs->flags);
67 	if (regs->orig_ax != -1)
68 		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
69 	else
70 		pr_cont("\n");
71 
72 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
73 	       regs->ax, regs->bx, regs->cx);
74 	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
75 	       regs->dx, regs->si, regs->di);
76 	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
77 	       regs->bp, regs->r8, regs->r9);
78 	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
79 	       regs->r10, regs->r11, regs->r12);
80 	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
81 	       regs->r13, regs->r14, regs->r15);
82 
83 	asm("movl %%ds,%0" : "=r" (ds));
84 	asm("movl %%cs,%0" : "=r" (cs));
85 	asm("movl %%es,%0" : "=r" (es));
86 	asm("movl %%fs,%0" : "=r" (fsindex));
87 	asm("movl %%gs,%0" : "=r" (gsindex));
88 
89 	rdmsrl(MSR_FS_BASE, fs);
90 	rdmsrl(MSR_GS_BASE, gs);
91 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
92 
93 	if (!all)
94 		return;
95 
96 	cr0 = read_cr0();
97 	cr2 = read_cr2();
98 	cr3 = read_cr3();
99 	cr4 = __read_cr4();
100 
101 	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
102 	       fs, fsindex, gs, gsindex, shadowgs);
103 	printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
104 			es, cr0);
105 	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
106 			cr4);
107 
108 	get_debugreg(d0, 0);
109 	get_debugreg(d1, 1);
110 	get_debugreg(d2, 2);
111 	get_debugreg(d3, 3);
112 	get_debugreg(d6, 6);
113 	get_debugreg(d7, 7);
114 
115 	/* Only print out debug registers if they are in their non-default state. */
116 	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
117 	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
118 		printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
119 		       d0, d1, d2);
120 		printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
121 		       d3, d6, d7);
122 	}
123 
124 	if (boot_cpu_has(X86_FEATURE_OSPKE))
125 		printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
126 }
127 
128 void release_thread(struct task_struct *dead_task)
129 {
130 	if (dead_task->mm) {
131 #ifdef CONFIG_MODIFY_LDT_SYSCALL
132 		if (dead_task->mm->context.ldt) {
133 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
134 				dead_task->comm,
135 				dead_task->mm->context.ldt->entries,
136 				dead_task->mm->context.ldt->size);
137 			BUG();
138 		}
139 #endif
140 	}
141 }
142 
143 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
144 		unsigned long arg, struct task_struct *p, unsigned long tls)
145 {
146 	int err;
147 	struct pt_regs *childregs;
148 	struct fork_frame *fork_frame;
149 	struct inactive_task_frame *frame;
150 	struct task_struct *me = current;
151 
152 	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
153 	childregs = task_pt_regs(p);
154 	fork_frame = container_of(childregs, struct fork_frame, regs);
155 	frame = &fork_frame->frame;
156 	frame->bp = 0;
157 	frame->ret_addr = (unsigned long) ret_from_fork;
158 	p->thread.sp = (unsigned long) fork_frame;
159 	p->thread.io_bitmap_ptr = NULL;
160 
161 	savesegment(gs, p->thread.gsindex);
162 	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
163 	savesegment(fs, p->thread.fsindex);
164 	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
165 	savesegment(es, p->thread.es);
166 	savesegment(ds, p->thread.ds);
167 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
168 
169 	if (unlikely(p->flags & PF_KTHREAD)) {
170 		/* kernel thread */
171 		memset(childregs, 0, sizeof(struct pt_regs));
172 		frame->bx = sp;		/* function */
173 		frame->r12 = arg;
174 		return 0;
175 	}
176 	frame->bx = 0;
177 	*childregs = *current_pt_regs();
178 
179 	childregs->ax = 0;
180 	if (sp)
181 		childregs->sp = sp;
182 
183 	err = -ENOMEM;
184 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
185 		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
186 						  IO_BITMAP_BYTES, GFP_KERNEL);
187 		if (!p->thread.io_bitmap_ptr) {
188 			p->thread.io_bitmap_max = 0;
189 			return -ENOMEM;
190 		}
191 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
192 	}
193 
194 	/*
195 	 * Set a new TLS for the child thread?
196 	 */
197 	if (clone_flags & CLONE_SETTLS) {
198 #ifdef CONFIG_IA32_EMULATION
199 		if (in_ia32_syscall())
200 			err = do_set_thread_area(p, -1,
201 				(struct user_desc __user *)tls, 0);
202 		else
203 #endif
204 			err = do_arch_prctl(p, ARCH_SET_FS, tls);
205 		if (err)
206 			goto out;
207 	}
208 	err = 0;
209 out:
210 	if (err && p->thread.io_bitmap_ptr) {
211 		kfree(p->thread.io_bitmap_ptr);
212 		p->thread.io_bitmap_max = 0;
213 	}
214 
215 	return err;
216 }
217 
218 static void
219 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
220 		    unsigned long new_sp,
221 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
222 {
223 	loadsegment(fs, 0);
224 	loadsegment(es, _ds);
225 	loadsegment(ds, _ds);
226 	load_gs_index(0);
227 	regs->ip		= new_ip;
228 	regs->sp		= new_sp;
229 	regs->cs		= _cs;
230 	regs->ss		= _ss;
231 	regs->flags		= X86_EFLAGS_IF;
232 	force_iret();
233 }
234 
235 void
236 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
237 {
238 	start_thread_common(regs, new_ip, new_sp,
239 			    __USER_CS, __USER_DS, 0);
240 }
241 
242 #ifdef CONFIG_COMPAT
243 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
244 {
245 	start_thread_common(regs, new_ip, new_sp,
246 			    test_thread_flag(TIF_X32)
247 			    ? __USER_CS : __USER32_CS,
248 			    __USER_DS, __USER_DS);
249 }
250 #endif
251 
252 /*
253  *	switch_to(x,y) should switch tasks from x to y.
254  *
255  * This could still be optimized:
256  * - fold all the options into a flag word and test it with a single test.
257  * - could test fs/gs bitsliced
258  *
259  * Kprobes not supported here. Set the probe on schedule instead.
260  * Function graph tracer not supported too.
261  */
262 __visible __notrace_funcgraph struct task_struct *
263 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
264 {
265 	struct thread_struct *prev = &prev_p->thread;
266 	struct thread_struct *next = &next_p->thread;
267 	struct fpu *prev_fpu = &prev->fpu;
268 	struct fpu *next_fpu = &next->fpu;
269 	int cpu = smp_processor_id();
270 	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
271 	unsigned prev_fsindex, prev_gsindex;
272 
273 	switch_fpu_prepare(prev_fpu, cpu);
274 
275 	/* We must save %fs and %gs before load_TLS() because
276 	 * %fs and %gs may be cleared by load_TLS().
277 	 *
278 	 * (e.g. xen_load_tls())
279 	 */
280 	savesegment(fs, prev_fsindex);
281 	savesegment(gs, prev_gsindex);
282 
283 	/*
284 	 * Load TLS before restoring any segments so that segment loads
285 	 * reference the correct GDT entries.
286 	 */
287 	load_TLS(next, cpu);
288 
289 	/*
290 	 * Leave lazy mode, flushing any hypercalls made here.  This
291 	 * must be done after loading TLS entries in the GDT but before
292 	 * loading segments that might reference them, and and it must
293 	 * be done before fpu__restore(), so the TS bit is up to
294 	 * date.
295 	 */
296 	arch_end_context_switch(next_p);
297 
298 	/* Switch DS and ES.
299 	 *
300 	 * Reading them only returns the selectors, but writing them (if
301 	 * nonzero) loads the full descriptor from the GDT or LDT.  The
302 	 * LDT for next is loaded in switch_mm, and the GDT is loaded
303 	 * above.
304 	 *
305 	 * We therefore need to write new values to the segment
306 	 * registers on every context switch unless both the new and old
307 	 * values are zero.
308 	 *
309 	 * Note that we don't need to do anything for CS and SS, as
310 	 * those are saved and restored as part of pt_regs.
311 	 */
312 	savesegment(es, prev->es);
313 	if (unlikely(next->es | prev->es))
314 		loadsegment(es, next->es);
315 
316 	savesegment(ds, prev->ds);
317 	if (unlikely(next->ds | prev->ds))
318 		loadsegment(ds, next->ds);
319 
320 	/*
321 	 * Switch FS and GS.
322 	 *
323 	 * These are even more complicated than DS and ES: they have
324 	 * 64-bit bases are that controlled by arch_prctl.  The bases
325 	 * don't necessarily match the selectors, as user code can do
326 	 * any number of things to cause them to be inconsistent.
327 	 *
328 	 * We don't promise to preserve the bases if the selectors are
329 	 * nonzero.  We also don't promise to preserve the base if the
330 	 * selector is zero and the base doesn't match whatever was
331 	 * most recently passed to ARCH_SET_FS/GS.  (If/when the
332 	 * FSGSBASE instructions are enabled, we'll need to offer
333 	 * stronger guarantees.)
334 	 *
335 	 * As an invariant,
336 	 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
337 	 * impossible.
338 	 */
339 	if (next->fsindex) {
340 		/* Loading a nonzero value into FS sets the index and base. */
341 		loadsegment(fs, next->fsindex);
342 	} else {
343 		if (next->fsbase) {
344 			/* Next index is zero but next base is nonzero. */
345 			if (prev_fsindex)
346 				loadsegment(fs, 0);
347 			wrmsrl(MSR_FS_BASE, next->fsbase);
348 		} else {
349 			/* Next base and index are both zero. */
350 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
351 				/*
352 				 * We don't know the previous base and can't
353 				 * find out without RDMSR.  Forcibly clear it.
354 				 */
355 				loadsegment(fs, __USER_DS);
356 				loadsegment(fs, 0);
357 			} else {
358 				/*
359 				 * If the previous index is zero and ARCH_SET_FS
360 				 * didn't change the base, then the base is
361 				 * also zero and we don't need to do anything.
362 				 */
363 				if (prev->fsbase || prev_fsindex)
364 					loadsegment(fs, 0);
365 			}
366 		}
367 	}
368 	/*
369 	 * Save the old state and preserve the invariant.
370 	 * NB: if prev_fsindex == 0, then we can't reliably learn the base
371 	 * without RDMSR because Intel user code can zero it without telling
372 	 * us and AMD user code can program any 32-bit value without telling
373 	 * us.
374 	 */
375 	if (prev_fsindex)
376 		prev->fsbase = 0;
377 	prev->fsindex = prev_fsindex;
378 
379 	if (next->gsindex) {
380 		/* Loading a nonzero value into GS sets the index and base. */
381 		load_gs_index(next->gsindex);
382 	} else {
383 		if (next->gsbase) {
384 			/* Next index is zero but next base is nonzero. */
385 			if (prev_gsindex)
386 				load_gs_index(0);
387 			wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
388 		} else {
389 			/* Next base and index are both zero. */
390 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
391 				/*
392 				 * We don't know the previous base and can't
393 				 * find out without RDMSR.  Forcibly clear it.
394 				 *
395 				 * This contains a pointless SWAPGS pair.
396 				 * Fixing it would involve an explicit check
397 				 * for Xen or a new pvop.
398 				 */
399 				load_gs_index(__USER_DS);
400 				load_gs_index(0);
401 			} else {
402 				/*
403 				 * If the previous index is zero and ARCH_SET_GS
404 				 * didn't change the base, then the base is
405 				 * also zero and we don't need to do anything.
406 				 */
407 				if (prev->gsbase || prev_gsindex)
408 					load_gs_index(0);
409 			}
410 		}
411 	}
412 	/*
413 	 * Save the old state and preserve the invariant.
414 	 * NB: if prev_gsindex == 0, then we can't reliably learn the base
415 	 * without RDMSR because Intel user code can zero it without telling
416 	 * us and AMD user code can program any 32-bit value without telling
417 	 * us.
418 	 */
419 	if (prev_gsindex)
420 		prev->gsbase = 0;
421 	prev->gsindex = prev_gsindex;
422 
423 	switch_fpu_finish(next_fpu, cpu);
424 
425 	/*
426 	 * Switch the PDA and FPU contexts.
427 	 */
428 	this_cpu_write(current_task, next_p);
429 
430 	/* Reload esp0 and ss1.  This changes current_thread_info(). */
431 	load_sp0(tss, next);
432 
433 	/*
434 	 * Now maybe reload the debug registers and handle I/O bitmaps
435 	 */
436 	if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
437 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
438 		__switch_to_xtra(prev_p, next_p, tss);
439 
440 #ifdef CONFIG_XEN
441 	/*
442 	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
443 	 * current_pt_regs()->flags may not match the current task's
444 	 * intended IOPL.  We need to switch it manually.
445 	 */
446 	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
447 		     prev->iopl != next->iopl))
448 		xen_set_iopl_mask(next->iopl);
449 #endif
450 
451 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
452 		/*
453 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
454 		 * does not update the cached descriptor.  As a result, if we
455 		 * do SYSRET while SS is NULL, we'll end up in user mode with
456 		 * SS apparently equal to __USER_DS but actually unusable.
457 		 *
458 		 * The straightforward workaround would be to fix it up just
459 		 * before SYSRET, but that would slow down the system call
460 		 * fast paths.  Instead, we ensure that SS is never NULL in
461 		 * system call context.  We do this by replacing NULL SS
462 		 * selectors at every context switch.  SYSCALL sets up a valid
463 		 * SS, so the only way to get NULL is to re-enter the kernel
464 		 * from CPL 3 through an interrupt.  Since that can't happen
465 		 * in the same task as a running syscall, we are guaranteed to
466 		 * context switch between every interrupt vector entry and a
467 		 * subsequent SYSRET.
468 		 *
469 		 * We read SS first because SS reads are much faster than
470 		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
471 		 * it previously had a different non-NULL value.
472 		 */
473 		unsigned short ss_sel;
474 		savesegment(ss, ss_sel);
475 		if (ss_sel != __KERNEL_DS)
476 			loadsegment(ss, __KERNEL_DS);
477 	}
478 
479 	return prev_p;
480 }
481 
482 void set_personality_64bit(void)
483 {
484 	/* inherit personality from parent */
485 
486 	/* Make sure to be in 64bit mode */
487 	clear_thread_flag(TIF_IA32);
488 	clear_thread_flag(TIF_ADDR32);
489 	clear_thread_flag(TIF_X32);
490 
491 	/* Ensure the corresponding mm is not marked. */
492 	if (current->mm)
493 		current->mm->context.ia32_compat = 0;
494 
495 	/* TBD: overwrites user setup. Should have two bits.
496 	   But 64bit processes have always behaved this way,
497 	   so it's not too bad. The main problem is just that
498 	   32bit childs are affected again. */
499 	current->personality &= ~READ_IMPLIES_EXEC;
500 }
501 
502 void set_personality_ia32(bool x32)
503 {
504 	/* inherit personality from parent */
505 
506 	/* Make sure to be in 32bit mode */
507 	set_thread_flag(TIF_ADDR32);
508 
509 	/* Mark the associated mm as containing 32-bit tasks. */
510 	if (x32) {
511 		clear_thread_flag(TIF_IA32);
512 		set_thread_flag(TIF_X32);
513 		if (current->mm)
514 			current->mm->context.ia32_compat = TIF_X32;
515 		current->personality &= ~READ_IMPLIES_EXEC;
516 		/* in_compat_syscall() uses the presence of the x32
517 		   syscall bit flag to determine compat status */
518 		current->thread.status &= ~TS_COMPAT;
519 	} else {
520 		set_thread_flag(TIF_IA32);
521 		clear_thread_flag(TIF_X32);
522 		if (current->mm)
523 			current->mm->context.ia32_compat = TIF_IA32;
524 		current->personality |= force_personality32;
525 		/* Prepare the first "return" to user space */
526 		current->thread.status |= TS_COMPAT;
527 	}
528 }
529 EXPORT_SYMBOL_GPL(set_personality_ia32);
530 
531 #ifdef CONFIG_CHECKPOINT_RESTORE
532 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
533 {
534 	int ret;
535 
536 	ret = map_vdso_once(image, addr);
537 	if (ret)
538 		return ret;
539 
540 	return (long)image->size;
541 }
542 #endif
543 
544 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
545 {
546 	int ret = 0;
547 	int doit = task == current;
548 	int cpu;
549 
550 	switch (code) {
551 	case ARCH_SET_GS:
552 		if (addr >= TASK_SIZE_MAX)
553 			return -EPERM;
554 		cpu = get_cpu();
555 		task->thread.gsindex = 0;
556 		task->thread.gsbase = addr;
557 		if (doit) {
558 			load_gs_index(0);
559 			ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
560 		}
561 		put_cpu();
562 		break;
563 	case ARCH_SET_FS:
564 		/* Not strictly needed for fs, but do it for symmetry
565 		   with gs */
566 		if (addr >= TASK_SIZE_MAX)
567 			return -EPERM;
568 		cpu = get_cpu();
569 		task->thread.fsindex = 0;
570 		task->thread.fsbase = addr;
571 		if (doit) {
572 			/* set the selector to 0 to not confuse __switch_to */
573 			loadsegment(fs, 0);
574 			ret = wrmsrl_safe(MSR_FS_BASE, addr);
575 		}
576 		put_cpu();
577 		break;
578 	case ARCH_GET_FS: {
579 		unsigned long base;
580 		if (doit)
581 			rdmsrl(MSR_FS_BASE, base);
582 		else
583 			base = task->thread.fsbase;
584 		ret = put_user(base, (unsigned long __user *)addr);
585 		break;
586 	}
587 	case ARCH_GET_GS: {
588 		unsigned long base;
589 		if (doit)
590 			rdmsrl(MSR_KERNEL_GS_BASE, base);
591 		else
592 			base = task->thread.gsbase;
593 		ret = put_user(base, (unsigned long __user *)addr);
594 		break;
595 	}
596 
597 #ifdef CONFIG_CHECKPOINT_RESTORE
598 # ifdef CONFIG_X86_X32_ABI
599 	case ARCH_MAP_VDSO_X32:
600 		return prctl_map_vdso(&vdso_image_x32, addr);
601 # endif
602 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
603 	case ARCH_MAP_VDSO_32:
604 		return prctl_map_vdso(&vdso_image_32, addr);
605 # endif
606 	case ARCH_MAP_VDSO_64:
607 		return prctl_map_vdso(&vdso_image_64, addr);
608 #endif
609 
610 	default:
611 		ret = -EINVAL;
612 		break;
613 	}
614 
615 	return ret;
616 }
617 
618 long sys_arch_prctl(int code, unsigned long addr)
619 {
620 	return do_arch_prctl(current, code, addr);
621 }
622 
623 unsigned long KSTK_ESP(struct task_struct *task)
624 {
625 	return task_pt_regs(task)->sp;
626 }
627