xref: /linux/arch/x86/kernel/process_64.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  *
7  *  X86-64 port
8  *	Andi Kleen.
9  *
10  *	CPU hotplug support - ashok.raj@intel.com
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of process handling..
15  */
16 
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/fs.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
38 #include <linux/io.h>
39 #include <linux/ftrace.h>
40 
41 #include <asm/pgtable.h>
42 #include <asm/processor.h>
43 #include <asm/fpu/internal.h>
44 #include <asm/mmu_context.h>
45 #include <asm/prctl.h>
46 #include <asm/desc.h>
47 #include <asm/proto.h>
48 #include <asm/ia32.h>
49 #include <asm/syscalls.h>
50 #include <asm/debugreg.h>
51 #include <asm/switch_to.h>
52 #include <asm/xen/hypervisor.h>
53 #include <asm/vdso.h>
54 #include <asm/intel_rdt.h>
55 
56 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
57 
58 /* Prints also some state that isn't saved in the pt_regs */
59 void __show_regs(struct pt_regs *regs, int all)
60 {
61 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
62 	unsigned long d0, d1, d2, d3, d6, d7;
63 	unsigned int fsindex, gsindex;
64 	unsigned int ds, cs, es;
65 
66 	printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs & 0xffff,
67 		(void *)regs->ip);
68 	printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
69 		regs->sp, regs->flags);
70 	if (regs->orig_ax != -1)
71 		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
72 	else
73 		pr_cont("\n");
74 
75 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
76 	       regs->ax, regs->bx, regs->cx);
77 	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
78 	       regs->dx, regs->si, regs->di);
79 	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
80 	       regs->bp, regs->r8, regs->r9);
81 	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
82 	       regs->r10, regs->r11, regs->r12);
83 	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
84 	       regs->r13, regs->r14, regs->r15);
85 
86 	asm("movl %%ds,%0" : "=r" (ds));
87 	asm("movl %%cs,%0" : "=r" (cs));
88 	asm("movl %%es,%0" : "=r" (es));
89 	asm("movl %%fs,%0" : "=r" (fsindex));
90 	asm("movl %%gs,%0" : "=r" (gsindex));
91 
92 	rdmsrl(MSR_FS_BASE, fs);
93 	rdmsrl(MSR_GS_BASE, gs);
94 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
95 
96 	if (!all)
97 		return;
98 
99 	cr0 = read_cr0();
100 	cr2 = read_cr2();
101 	cr3 = read_cr3();
102 	cr4 = __read_cr4();
103 
104 	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
105 	       fs, fsindex, gs, gsindex, shadowgs);
106 	printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
107 			es, cr0);
108 	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
109 			cr4);
110 
111 	get_debugreg(d0, 0);
112 	get_debugreg(d1, 1);
113 	get_debugreg(d2, 2);
114 	get_debugreg(d3, 3);
115 	get_debugreg(d6, 6);
116 	get_debugreg(d7, 7);
117 
118 	/* Only print out debug registers if they are in their non-default state. */
119 	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
120 	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
121 		printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
122 		       d0, d1, d2);
123 		printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
124 		       d3, d6, d7);
125 	}
126 
127 	if (boot_cpu_has(X86_FEATURE_OSPKE))
128 		printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
129 }
130 
131 void release_thread(struct task_struct *dead_task)
132 {
133 	if (dead_task->mm) {
134 #ifdef CONFIG_MODIFY_LDT_SYSCALL
135 		if (dead_task->mm->context.ldt) {
136 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
137 				dead_task->comm,
138 				dead_task->mm->context.ldt->entries,
139 				dead_task->mm->context.ldt->size);
140 			BUG();
141 		}
142 #endif
143 	}
144 }
145 
146 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
147 		unsigned long arg, struct task_struct *p, unsigned long tls)
148 {
149 	int err;
150 	struct pt_regs *childregs;
151 	struct fork_frame *fork_frame;
152 	struct inactive_task_frame *frame;
153 	struct task_struct *me = current;
154 
155 	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
156 	childregs = task_pt_regs(p);
157 	fork_frame = container_of(childregs, struct fork_frame, regs);
158 	frame = &fork_frame->frame;
159 	frame->bp = 0;
160 	frame->ret_addr = (unsigned long) ret_from_fork;
161 	p->thread.sp = (unsigned long) fork_frame;
162 	p->thread.io_bitmap_ptr = NULL;
163 
164 	savesegment(gs, p->thread.gsindex);
165 	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
166 	savesegment(fs, p->thread.fsindex);
167 	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
168 	savesegment(es, p->thread.es);
169 	savesegment(ds, p->thread.ds);
170 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
171 
172 	if (unlikely(p->flags & PF_KTHREAD)) {
173 		/* kernel thread */
174 		memset(childregs, 0, sizeof(struct pt_regs));
175 		frame->bx = sp;		/* function */
176 		frame->r12 = arg;
177 		return 0;
178 	}
179 	frame->bx = 0;
180 	*childregs = *current_pt_regs();
181 
182 	childregs->ax = 0;
183 	if (sp)
184 		childregs->sp = sp;
185 
186 	err = -ENOMEM;
187 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
188 		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
189 						  IO_BITMAP_BYTES, GFP_KERNEL);
190 		if (!p->thread.io_bitmap_ptr) {
191 			p->thread.io_bitmap_max = 0;
192 			return -ENOMEM;
193 		}
194 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
195 	}
196 
197 	/*
198 	 * Set a new TLS for the child thread?
199 	 */
200 	if (clone_flags & CLONE_SETTLS) {
201 #ifdef CONFIG_IA32_EMULATION
202 		if (in_ia32_syscall())
203 			err = do_set_thread_area(p, -1,
204 				(struct user_desc __user *)tls, 0);
205 		else
206 #endif
207 			err = do_arch_prctl(p, ARCH_SET_FS, tls);
208 		if (err)
209 			goto out;
210 	}
211 	err = 0;
212 out:
213 	if (err && p->thread.io_bitmap_ptr) {
214 		kfree(p->thread.io_bitmap_ptr);
215 		p->thread.io_bitmap_max = 0;
216 	}
217 
218 	return err;
219 }
220 
221 static void
222 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
223 		    unsigned long new_sp,
224 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
225 {
226 	loadsegment(fs, 0);
227 	loadsegment(es, _ds);
228 	loadsegment(ds, _ds);
229 	load_gs_index(0);
230 	regs->ip		= new_ip;
231 	regs->sp		= new_sp;
232 	regs->cs		= _cs;
233 	regs->ss		= _ss;
234 	regs->flags		= X86_EFLAGS_IF;
235 	force_iret();
236 }
237 
238 void
239 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
240 {
241 	start_thread_common(regs, new_ip, new_sp,
242 			    __USER_CS, __USER_DS, 0);
243 }
244 
245 #ifdef CONFIG_COMPAT
246 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
247 {
248 	start_thread_common(regs, new_ip, new_sp,
249 			    test_thread_flag(TIF_X32)
250 			    ? __USER_CS : __USER32_CS,
251 			    __USER_DS, __USER_DS);
252 }
253 #endif
254 
255 /*
256  *	switch_to(x,y) should switch tasks from x to y.
257  *
258  * This could still be optimized:
259  * - fold all the options into a flag word and test it with a single test.
260  * - could test fs/gs bitsliced
261  *
262  * Kprobes not supported here. Set the probe on schedule instead.
263  * Function graph tracer not supported too.
264  */
265 __visible __notrace_funcgraph struct task_struct *
266 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
267 {
268 	struct thread_struct *prev = &prev_p->thread;
269 	struct thread_struct *next = &next_p->thread;
270 	struct fpu *prev_fpu = &prev->fpu;
271 	struct fpu *next_fpu = &next->fpu;
272 	int cpu = smp_processor_id();
273 	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
274 	unsigned prev_fsindex, prev_gsindex;
275 
276 	switch_fpu_prepare(prev_fpu, cpu);
277 
278 	/* We must save %fs and %gs before load_TLS() because
279 	 * %fs and %gs may be cleared by load_TLS().
280 	 *
281 	 * (e.g. xen_load_tls())
282 	 */
283 	savesegment(fs, prev_fsindex);
284 	savesegment(gs, prev_gsindex);
285 
286 	/*
287 	 * Load TLS before restoring any segments so that segment loads
288 	 * reference the correct GDT entries.
289 	 */
290 	load_TLS(next, cpu);
291 
292 	/*
293 	 * Leave lazy mode, flushing any hypercalls made here.  This
294 	 * must be done after loading TLS entries in the GDT but before
295 	 * loading segments that might reference them, and and it must
296 	 * be done before fpu__restore(), so the TS bit is up to
297 	 * date.
298 	 */
299 	arch_end_context_switch(next_p);
300 
301 	/* Switch DS and ES.
302 	 *
303 	 * Reading them only returns the selectors, but writing them (if
304 	 * nonzero) loads the full descriptor from the GDT or LDT.  The
305 	 * LDT for next is loaded in switch_mm, and the GDT is loaded
306 	 * above.
307 	 *
308 	 * We therefore need to write new values to the segment
309 	 * registers on every context switch unless both the new and old
310 	 * values are zero.
311 	 *
312 	 * Note that we don't need to do anything for CS and SS, as
313 	 * those are saved and restored as part of pt_regs.
314 	 */
315 	savesegment(es, prev->es);
316 	if (unlikely(next->es | prev->es))
317 		loadsegment(es, next->es);
318 
319 	savesegment(ds, prev->ds);
320 	if (unlikely(next->ds | prev->ds))
321 		loadsegment(ds, next->ds);
322 
323 	/*
324 	 * Switch FS and GS.
325 	 *
326 	 * These are even more complicated than DS and ES: they have
327 	 * 64-bit bases are that controlled by arch_prctl.  The bases
328 	 * don't necessarily match the selectors, as user code can do
329 	 * any number of things to cause them to be inconsistent.
330 	 *
331 	 * We don't promise to preserve the bases if the selectors are
332 	 * nonzero.  We also don't promise to preserve the base if the
333 	 * selector is zero and the base doesn't match whatever was
334 	 * most recently passed to ARCH_SET_FS/GS.  (If/when the
335 	 * FSGSBASE instructions are enabled, we'll need to offer
336 	 * stronger guarantees.)
337 	 *
338 	 * As an invariant,
339 	 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
340 	 * impossible.
341 	 */
342 	if (next->fsindex) {
343 		/* Loading a nonzero value into FS sets the index and base. */
344 		loadsegment(fs, next->fsindex);
345 	} else {
346 		if (next->fsbase) {
347 			/* Next index is zero but next base is nonzero. */
348 			if (prev_fsindex)
349 				loadsegment(fs, 0);
350 			wrmsrl(MSR_FS_BASE, next->fsbase);
351 		} else {
352 			/* Next base and index are both zero. */
353 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
354 				/*
355 				 * We don't know the previous base and can't
356 				 * find out without RDMSR.  Forcibly clear it.
357 				 */
358 				loadsegment(fs, __USER_DS);
359 				loadsegment(fs, 0);
360 			} else {
361 				/*
362 				 * If the previous index is zero and ARCH_SET_FS
363 				 * didn't change the base, then the base is
364 				 * also zero and we don't need to do anything.
365 				 */
366 				if (prev->fsbase || prev_fsindex)
367 					loadsegment(fs, 0);
368 			}
369 		}
370 	}
371 	/*
372 	 * Save the old state and preserve the invariant.
373 	 * NB: if prev_fsindex == 0, then we can't reliably learn the base
374 	 * without RDMSR because Intel user code can zero it without telling
375 	 * us and AMD user code can program any 32-bit value without telling
376 	 * us.
377 	 */
378 	if (prev_fsindex)
379 		prev->fsbase = 0;
380 	prev->fsindex = prev_fsindex;
381 
382 	if (next->gsindex) {
383 		/* Loading a nonzero value into GS sets the index and base. */
384 		load_gs_index(next->gsindex);
385 	} else {
386 		if (next->gsbase) {
387 			/* Next index is zero but next base is nonzero. */
388 			if (prev_gsindex)
389 				load_gs_index(0);
390 			wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
391 		} else {
392 			/* Next base and index are both zero. */
393 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
394 				/*
395 				 * We don't know the previous base and can't
396 				 * find out without RDMSR.  Forcibly clear it.
397 				 *
398 				 * This contains a pointless SWAPGS pair.
399 				 * Fixing it would involve an explicit check
400 				 * for Xen or a new pvop.
401 				 */
402 				load_gs_index(__USER_DS);
403 				load_gs_index(0);
404 			} else {
405 				/*
406 				 * If the previous index is zero and ARCH_SET_GS
407 				 * didn't change the base, then the base is
408 				 * also zero and we don't need to do anything.
409 				 */
410 				if (prev->gsbase || prev_gsindex)
411 					load_gs_index(0);
412 			}
413 		}
414 	}
415 	/*
416 	 * Save the old state and preserve the invariant.
417 	 * NB: if prev_gsindex == 0, then we can't reliably learn the base
418 	 * without RDMSR because Intel user code can zero it without telling
419 	 * us and AMD user code can program any 32-bit value without telling
420 	 * us.
421 	 */
422 	if (prev_gsindex)
423 		prev->gsbase = 0;
424 	prev->gsindex = prev_gsindex;
425 
426 	switch_fpu_finish(next_fpu, cpu);
427 
428 	/*
429 	 * Switch the PDA and FPU contexts.
430 	 */
431 	this_cpu_write(current_task, next_p);
432 
433 	/* Reload esp0 and ss1.  This changes current_thread_info(). */
434 	load_sp0(tss, next);
435 
436 	/*
437 	 * Now maybe reload the debug registers and handle I/O bitmaps
438 	 */
439 	if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
440 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
441 		__switch_to_xtra(prev_p, next_p, tss);
442 
443 #ifdef CONFIG_XEN
444 	/*
445 	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
446 	 * current_pt_regs()->flags may not match the current task's
447 	 * intended IOPL.  We need to switch it manually.
448 	 */
449 	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
450 		     prev->iopl != next->iopl))
451 		xen_set_iopl_mask(next->iopl);
452 #endif
453 
454 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
455 		/*
456 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
457 		 * does not update the cached descriptor.  As a result, if we
458 		 * do SYSRET while SS is NULL, we'll end up in user mode with
459 		 * SS apparently equal to __USER_DS but actually unusable.
460 		 *
461 		 * The straightforward workaround would be to fix it up just
462 		 * before SYSRET, but that would slow down the system call
463 		 * fast paths.  Instead, we ensure that SS is never NULL in
464 		 * system call context.  We do this by replacing NULL SS
465 		 * selectors at every context switch.  SYSCALL sets up a valid
466 		 * SS, so the only way to get NULL is to re-enter the kernel
467 		 * from CPL 3 through an interrupt.  Since that can't happen
468 		 * in the same task as a running syscall, we are guaranteed to
469 		 * context switch between every interrupt vector entry and a
470 		 * subsequent SYSRET.
471 		 *
472 		 * We read SS first because SS reads are much faster than
473 		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
474 		 * it previously had a different non-NULL value.
475 		 */
476 		unsigned short ss_sel;
477 		savesegment(ss, ss_sel);
478 		if (ss_sel != __KERNEL_DS)
479 			loadsegment(ss, __KERNEL_DS);
480 	}
481 
482 	/* Load the Intel cache allocation PQR MSR. */
483 	intel_rdt_sched_in();
484 
485 	return prev_p;
486 }
487 
488 void set_personality_64bit(void)
489 {
490 	/* inherit personality from parent */
491 
492 	/* Make sure to be in 64bit mode */
493 	clear_thread_flag(TIF_IA32);
494 	clear_thread_flag(TIF_ADDR32);
495 	clear_thread_flag(TIF_X32);
496 
497 	/* Ensure the corresponding mm is not marked. */
498 	if (current->mm)
499 		current->mm->context.ia32_compat = 0;
500 
501 	/* TBD: overwrites user setup. Should have two bits.
502 	   But 64bit processes have always behaved this way,
503 	   so it's not too bad. The main problem is just that
504 	   32bit childs are affected again. */
505 	current->personality &= ~READ_IMPLIES_EXEC;
506 }
507 
508 void set_personality_ia32(bool x32)
509 {
510 	/* inherit personality from parent */
511 
512 	/* Make sure to be in 32bit mode */
513 	set_thread_flag(TIF_ADDR32);
514 
515 	/* Mark the associated mm as containing 32-bit tasks. */
516 	if (x32) {
517 		clear_thread_flag(TIF_IA32);
518 		set_thread_flag(TIF_X32);
519 		if (current->mm)
520 			current->mm->context.ia32_compat = TIF_X32;
521 		current->personality &= ~READ_IMPLIES_EXEC;
522 		/* in_compat_syscall() uses the presence of the x32
523 		   syscall bit flag to determine compat status */
524 		current->thread.status &= ~TS_COMPAT;
525 	} else {
526 		set_thread_flag(TIF_IA32);
527 		clear_thread_flag(TIF_X32);
528 		if (current->mm)
529 			current->mm->context.ia32_compat = TIF_IA32;
530 		current->personality |= force_personality32;
531 		/* Prepare the first "return" to user space */
532 		current->thread.status |= TS_COMPAT;
533 	}
534 }
535 EXPORT_SYMBOL_GPL(set_personality_ia32);
536 
537 #ifdef CONFIG_CHECKPOINT_RESTORE
538 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
539 {
540 	int ret;
541 
542 	ret = map_vdso_once(image, addr);
543 	if (ret)
544 		return ret;
545 
546 	return (long)image->size;
547 }
548 #endif
549 
550 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
551 {
552 	int ret = 0;
553 	int doit = task == current;
554 	int cpu;
555 
556 	switch (code) {
557 	case ARCH_SET_GS:
558 		if (addr >= TASK_SIZE_MAX)
559 			return -EPERM;
560 		cpu = get_cpu();
561 		task->thread.gsindex = 0;
562 		task->thread.gsbase = addr;
563 		if (doit) {
564 			load_gs_index(0);
565 			ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
566 		}
567 		put_cpu();
568 		break;
569 	case ARCH_SET_FS:
570 		/* Not strictly needed for fs, but do it for symmetry
571 		   with gs */
572 		if (addr >= TASK_SIZE_MAX)
573 			return -EPERM;
574 		cpu = get_cpu();
575 		task->thread.fsindex = 0;
576 		task->thread.fsbase = addr;
577 		if (doit) {
578 			/* set the selector to 0 to not confuse __switch_to */
579 			loadsegment(fs, 0);
580 			ret = wrmsrl_safe(MSR_FS_BASE, addr);
581 		}
582 		put_cpu();
583 		break;
584 	case ARCH_GET_FS: {
585 		unsigned long base;
586 		if (doit)
587 			rdmsrl(MSR_FS_BASE, base);
588 		else
589 			base = task->thread.fsbase;
590 		ret = put_user(base, (unsigned long __user *)addr);
591 		break;
592 	}
593 	case ARCH_GET_GS: {
594 		unsigned long base;
595 		if (doit)
596 			rdmsrl(MSR_KERNEL_GS_BASE, base);
597 		else
598 			base = task->thread.gsbase;
599 		ret = put_user(base, (unsigned long __user *)addr);
600 		break;
601 	}
602 
603 #ifdef CONFIG_CHECKPOINT_RESTORE
604 # ifdef CONFIG_X86_X32_ABI
605 	case ARCH_MAP_VDSO_X32:
606 		return prctl_map_vdso(&vdso_image_x32, addr);
607 # endif
608 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
609 	case ARCH_MAP_VDSO_32:
610 		return prctl_map_vdso(&vdso_image_32, addr);
611 # endif
612 	case ARCH_MAP_VDSO_64:
613 		return prctl_map_vdso(&vdso_image_64, addr);
614 #endif
615 
616 	default:
617 		ret = -EINVAL;
618 		break;
619 	}
620 
621 	return ret;
622 }
623 
624 long sys_arch_prctl(int code, unsigned long addr)
625 {
626 	return do_arch_prctl(current, code, addr);
627 }
628 
629 unsigned long KSTK_ESP(struct task_struct *task)
630 {
631 	return task_pt_regs(task)->sp;
632 }
633