xref: /linux/arch/x86/kernel/process_64.c (revision 2decec48b0fd28ffdbf4cc684bd04e735f0839dd)
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  *
7  *  X86-64 port
8  *	Andi Kleen.
9  *
10  *	CPU hotplug support - ashok.raj@intel.com
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of process handling..
15  */
16 
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/fs.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
38 #include <linux/io.h>
39 #include <linux/ftrace.h>
40 #include <linux/syscalls.h>
41 
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/fpu/internal.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
47 #include <asm/desc.h>
48 #include <asm/proto.h>
49 #include <asm/ia32.h>
50 #include <asm/syscalls.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
54 #include <asm/vdso.h>
55 #include <asm/resctrl_sched.h>
56 #include <asm/unistd.h>
57 #include <asm/fsgsbase.h>
58 #ifdef CONFIG_IA32_EMULATION
59 /* Not included via unistd.h */
60 #include <asm/unistd_32_ia32.h>
61 #endif
62 
63 #include "process.h"
64 
65 /* Prints also some state that isn't saved in the pt_regs */
66 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
67 {
68 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
69 	unsigned long d0, d1, d2, d3, d6, d7;
70 	unsigned int fsindex, gsindex;
71 	unsigned int ds, es;
72 
73 	show_iret_regs(regs);
74 
75 	if (regs->orig_ax != -1)
76 		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
77 	else
78 		pr_cont("\n");
79 
80 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
81 	       regs->ax, regs->bx, regs->cx);
82 	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
83 	       regs->dx, regs->si, regs->di);
84 	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
85 	       regs->bp, regs->r8, regs->r9);
86 	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
87 	       regs->r10, regs->r11, regs->r12);
88 	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
89 	       regs->r13, regs->r14, regs->r15);
90 
91 	if (mode == SHOW_REGS_SHORT)
92 		return;
93 
94 	if (mode == SHOW_REGS_USER) {
95 		rdmsrl(MSR_FS_BASE, fs);
96 		rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
97 		printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
98 		       fs, shadowgs);
99 		return;
100 	}
101 
102 	asm("movl %%ds,%0" : "=r" (ds));
103 	asm("movl %%es,%0" : "=r" (es));
104 	asm("movl %%fs,%0" : "=r" (fsindex));
105 	asm("movl %%gs,%0" : "=r" (gsindex));
106 
107 	rdmsrl(MSR_FS_BASE, fs);
108 	rdmsrl(MSR_GS_BASE, gs);
109 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
110 
111 	cr0 = read_cr0();
112 	cr2 = read_cr2();
113 	cr3 = __read_cr3();
114 	cr4 = __read_cr4();
115 
116 	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
117 	       fs, fsindex, gs, gsindex, shadowgs);
118 	printk(KERN_DEFAULT "CS:  %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
119 			es, cr0);
120 	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
121 			cr4);
122 
123 	get_debugreg(d0, 0);
124 	get_debugreg(d1, 1);
125 	get_debugreg(d2, 2);
126 	get_debugreg(d3, 3);
127 	get_debugreg(d6, 6);
128 	get_debugreg(d7, 7);
129 
130 	/* Only print out debug registers if they are in their non-default state. */
131 	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
132 	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
133 		printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
134 		       d0, d1, d2);
135 		printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
136 		       d3, d6, d7);
137 	}
138 
139 	if (boot_cpu_has(X86_FEATURE_OSPKE))
140 		printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
141 }
142 
143 void release_thread(struct task_struct *dead_task)
144 {
145 	if (dead_task->mm) {
146 #ifdef CONFIG_MODIFY_LDT_SYSCALL
147 		if (dead_task->mm->context.ldt) {
148 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
149 				dead_task->comm,
150 				dead_task->mm->context.ldt->entries,
151 				dead_task->mm->context.ldt->nr_entries);
152 			BUG();
153 		}
154 #endif
155 	}
156 }
157 
158 enum which_selector {
159 	FS,
160 	GS
161 };
162 
163 /*
164  * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
165  * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
166  * It's forcibly inlined because it'll generate better code and this function
167  * is hot.
168  */
169 static __always_inline void save_base_legacy(struct task_struct *prev_p,
170 					     unsigned short selector,
171 					     enum which_selector which)
172 {
173 	if (likely(selector == 0)) {
174 		/*
175 		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
176 		 * be the pre-existing saved base or it could be zero.  On AMD
177 		 * (with X86_BUG_NULL_SEG), the segment base could be almost
178 		 * anything.
179 		 *
180 		 * This branch is very hot (it's hit twice on almost every
181 		 * context switch between 64-bit programs), and avoiding
182 		 * the RDMSR helps a lot, so we just assume that whatever
183 		 * value is already saved is correct.  This matches historical
184 		 * Linux behavior, so it won't break existing applications.
185 		 *
186 		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
187 		 * report that the base is zero, it needs to actually be zero:
188 		 * see the corresponding logic in load_seg_legacy.
189 		 */
190 	} else {
191 		/*
192 		 * If the selector is 1, 2, or 3, then the base is zero on
193 		 * !X86_BUG_NULL_SEG CPUs and could be anything on
194 		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
195 		 * has never attempted to preserve the base across context
196 		 * switches.
197 		 *
198 		 * If selector > 3, then it refers to a real segment, and
199 		 * saving the base isn't necessary.
200 		 */
201 		if (which == FS)
202 			prev_p->thread.fsbase = 0;
203 		else
204 			prev_p->thread.gsbase = 0;
205 	}
206 }
207 
208 static __always_inline void save_fsgs(struct task_struct *task)
209 {
210 	savesegment(fs, task->thread.fsindex);
211 	savesegment(gs, task->thread.gsindex);
212 	save_base_legacy(task, task->thread.fsindex, FS);
213 	save_base_legacy(task, task->thread.gsindex, GS);
214 }
215 
216 #if IS_ENABLED(CONFIG_KVM)
217 /*
218  * While a process is running,current->thread.fsbase and current->thread.gsbase
219  * may not match the corresponding CPU registers (see save_base_legacy()). KVM
220  * wants an efficient way to save and restore FSBASE and GSBASE.
221  * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
222  */
223 void save_fsgs_for_kvm(void)
224 {
225 	save_fsgs(current);
226 }
227 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
228 #endif
229 
230 static __always_inline void loadseg(enum which_selector which,
231 				    unsigned short sel)
232 {
233 	if (which == FS)
234 		loadsegment(fs, sel);
235 	else
236 		load_gs_index(sel);
237 }
238 
239 static __always_inline void load_seg_legacy(unsigned short prev_index,
240 					    unsigned long prev_base,
241 					    unsigned short next_index,
242 					    unsigned long next_base,
243 					    enum which_selector which)
244 {
245 	if (likely(next_index <= 3)) {
246 		/*
247 		 * The next task is using 64-bit TLS, is not using this
248 		 * segment at all, or is having fun with arcane CPU features.
249 		 */
250 		if (next_base == 0) {
251 			/*
252 			 * Nasty case: on AMD CPUs, we need to forcibly zero
253 			 * the base.
254 			 */
255 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
256 				loadseg(which, __USER_DS);
257 				loadseg(which, next_index);
258 			} else {
259 				/*
260 				 * We could try to exhaustively detect cases
261 				 * under which we can skip the segment load,
262 				 * but there's really only one case that matters
263 				 * for performance: if both the previous and
264 				 * next states are fully zeroed, we can skip
265 				 * the load.
266 				 *
267 				 * (This assumes that prev_base == 0 has no
268 				 * false positives.  This is the case on
269 				 * Intel-style CPUs.)
270 				 */
271 				if (likely(prev_index | next_index | prev_base))
272 					loadseg(which, next_index);
273 			}
274 		} else {
275 			if (prev_index != next_index)
276 				loadseg(which, next_index);
277 			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
278 			       next_base);
279 		}
280 	} else {
281 		/*
282 		 * The next task is using a real segment.  Loading the selector
283 		 * is sufficient.
284 		 */
285 		loadseg(which, next_index);
286 	}
287 }
288 
289 static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
290 					      struct thread_struct *next)
291 {
292 	load_seg_legacy(prev->fsindex, prev->fsbase,
293 			next->fsindex, next->fsbase, FS);
294 	load_seg_legacy(prev->gsindex, prev->gsbase,
295 			next->gsindex, next->gsbase, GS);
296 }
297 
298 static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
299 					    unsigned short selector)
300 {
301 	unsigned short idx = selector >> 3;
302 	unsigned long base;
303 
304 	if (likely((selector & SEGMENT_TI_MASK) == 0)) {
305 		if (unlikely(idx >= GDT_ENTRIES))
306 			return 0;
307 
308 		/*
309 		 * There are no user segments in the GDT with nonzero bases
310 		 * other than the TLS segments.
311 		 */
312 		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
313 			return 0;
314 
315 		idx -= GDT_ENTRY_TLS_MIN;
316 		base = get_desc_base(&task->thread.tls_array[idx]);
317 	} else {
318 #ifdef CONFIG_MODIFY_LDT_SYSCALL
319 		struct ldt_struct *ldt;
320 
321 		/*
322 		 * If performance here mattered, we could protect the LDT
323 		 * with RCU.  This is a slow path, though, so we can just
324 		 * take the mutex.
325 		 */
326 		mutex_lock(&task->mm->context.lock);
327 		ldt = task->mm->context.ldt;
328 		if (unlikely(idx >= ldt->nr_entries))
329 			base = 0;
330 		else
331 			base = get_desc_base(ldt->entries + idx);
332 		mutex_unlock(&task->mm->context.lock);
333 #else
334 		base = 0;
335 #endif
336 	}
337 
338 	return base;
339 }
340 
341 unsigned long x86_fsbase_read_task(struct task_struct *task)
342 {
343 	unsigned long fsbase;
344 
345 	if (task == current)
346 		fsbase = x86_fsbase_read_cpu();
347 	else if (task->thread.fsindex == 0)
348 		fsbase = task->thread.fsbase;
349 	else
350 		fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
351 
352 	return fsbase;
353 }
354 
355 unsigned long x86_gsbase_read_task(struct task_struct *task)
356 {
357 	unsigned long gsbase;
358 
359 	if (task == current)
360 		gsbase = x86_gsbase_read_cpu_inactive();
361 	else if (task->thread.gsindex == 0)
362 		gsbase = task->thread.gsbase;
363 	else
364 		gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
365 
366 	return gsbase;
367 }
368 
369 void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
370 {
371 	WARN_ON_ONCE(task == current);
372 
373 	task->thread.fsbase = fsbase;
374 }
375 
376 void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
377 {
378 	WARN_ON_ONCE(task == current);
379 
380 	task->thread.gsbase = gsbase;
381 }
382 
383 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
384 		unsigned long arg, struct task_struct *p, unsigned long tls)
385 {
386 	int err;
387 	struct pt_regs *childregs;
388 	struct fork_frame *fork_frame;
389 	struct inactive_task_frame *frame;
390 	struct task_struct *me = current;
391 
392 	childregs = task_pt_regs(p);
393 	fork_frame = container_of(childregs, struct fork_frame, regs);
394 	frame = &fork_frame->frame;
395 
396 	frame->bp = 0;
397 	frame->ret_addr = (unsigned long) ret_from_fork;
398 	p->thread.sp = (unsigned long) fork_frame;
399 	p->thread.io_bitmap_ptr = NULL;
400 
401 	savesegment(gs, p->thread.gsindex);
402 	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
403 	savesegment(fs, p->thread.fsindex);
404 	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
405 	savesegment(es, p->thread.es);
406 	savesegment(ds, p->thread.ds);
407 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
408 
409 	if (unlikely(p->flags & PF_KTHREAD)) {
410 		/* kernel thread */
411 		memset(childregs, 0, sizeof(struct pt_regs));
412 		frame->bx = sp;		/* function */
413 		frame->r12 = arg;
414 		return 0;
415 	}
416 	frame->bx = 0;
417 	*childregs = *current_pt_regs();
418 
419 	childregs->ax = 0;
420 	if (sp)
421 		childregs->sp = sp;
422 
423 	err = -ENOMEM;
424 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
425 		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
426 						  IO_BITMAP_BYTES, GFP_KERNEL);
427 		if (!p->thread.io_bitmap_ptr) {
428 			p->thread.io_bitmap_max = 0;
429 			return -ENOMEM;
430 		}
431 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
432 	}
433 
434 	/*
435 	 * Set a new TLS for the child thread?
436 	 */
437 	if (clone_flags & CLONE_SETTLS) {
438 #ifdef CONFIG_IA32_EMULATION
439 		if (in_ia32_syscall())
440 			err = do_set_thread_area(p, -1,
441 				(struct user_desc __user *)tls, 0);
442 		else
443 #endif
444 			err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
445 		if (err)
446 			goto out;
447 	}
448 	err = 0;
449 out:
450 	if (err && p->thread.io_bitmap_ptr) {
451 		kfree(p->thread.io_bitmap_ptr);
452 		p->thread.io_bitmap_max = 0;
453 	}
454 
455 	return err;
456 }
457 
458 static void
459 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
460 		    unsigned long new_sp,
461 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
462 {
463 	WARN_ON_ONCE(regs != current_pt_regs());
464 
465 	if (static_cpu_has(X86_BUG_NULL_SEG)) {
466 		/* Loading zero below won't clear the base. */
467 		loadsegment(fs, __USER_DS);
468 		load_gs_index(__USER_DS);
469 	}
470 
471 	loadsegment(fs, 0);
472 	loadsegment(es, _ds);
473 	loadsegment(ds, _ds);
474 	load_gs_index(0);
475 
476 	regs->ip		= new_ip;
477 	regs->sp		= new_sp;
478 	regs->cs		= _cs;
479 	regs->ss		= _ss;
480 	regs->flags		= X86_EFLAGS_IF;
481 	force_iret();
482 }
483 
484 void
485 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
486 {
487 	start_thread_common(regs, new_ip, new_sp,
488 			    __USER_CS, __USER_DS, 0);
489 }
490 EXPORT_SYMBOL_GPL(start_thread);
491 
492 #ifdef CONFIG_COMPAT
493 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
494 {
495 	start_thread_common(regs, new_ip, new_sp,
496 			    test_thread_flag(TIF_X32)
497 			    ? __USER_CS : __USER32_CS,
498 			    __USER_DS, __USER_DS);
499 }
500 #endif
501 
502 /*
503  *	switch_to(x,y) should switch tasks from x to y.
504  *
505  * This could still be optimized:
506  * - fold all the options into a flag word and test it with a single test.
507  * - could test fs/gs bitsliced
508  *
509  * Kprobes not supported here. Set the probe on schedule instead.
510  * Function graph tracer not supported too.
511  */
512 __visible __notrace_funcgraph struct task_struct *
513 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
514 {
515 	struct thread_struct *prev = &prev_p->thread;
516 	struct thread_struct *next = &next_p->thread;
517 	struct fpu *prev_fpu = &prev->fpu;
518 	struct fpu *next_fpu = &next->fpu;
519 	int cpu = smp_processor_id();
520 
521 	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
522 		     this_cpu_read(irq_count) != -1);
523 
524 	switch_fpu_prepare(prev_fpu, cpu);
525 
526 	/* We must save %fs and %gs before load_TLS() because
527 	 * %fs and %gs may be cleared by load_TLS().
528 	 *
529 	 * (e.g. xen_load_tls())
530 	 */
531 	save_fsgs(prev_p);
532 
533 	/*
534 	 * Load TLS before restoring any segments so that segment loads
535 	 * reference the correct GDT entries.
536 	 */
537 	load_TLS(next, cpu);
538 
539 	/*
540 	 * Leave lazy mode, flushing any hypercalls made here.  This
541 	 * must be done after loading TLS entries in the GDT but before
542 	 * loading segments that might reference them, and and it must
543 	 * be done before fpu__restore(), so the TS bit is up to
544 	 * date.
545 	 */
546 	arch_end_context_switch(next_p);
547 
548 	/* Switch DS and ES.
549 	 *
550 	 * Reading them only returns the selectors, but writing them (if
551 	 * nonzero) loads the full descriptor from the GDT or LDT.  The
552 	 * LDT for next is loaded in switch_mm, and the GDT is loaded
553 	 * above.
554 	 *
555 	 * We therefore need to write new values to the segment
556 	 * registers on every context switch unless both the new and old
557 	 * values are zero.
558 	 *
559 	 * Note that we don't need to do anything for CS and SS, as
560 	 * those are saved and restored as part of pt_regs.
561 	 */
562 	savesegment(es, prev->es);
563 	if (unlikely(next->es | prev->es))
564 		loadsegment(es, next->es);
565 
566 	savesegment(ds, prev->ds);
567 	if (unlikely(next->ds | prev->ds))
568 		loadsegment(ds, next->ds);
569 
570 	x86_fsgsbase_load(prev, next);
571 
572 	switch_fpu_finish(next_fpu, cpu);
573 
574 	/*
575 	 * Switch the PDA and FPU contexts.
576 	 */
577 	this_cpu_write(current_task, next_p);
578 	this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
579 
580 	/* Reload sp0. */
581 	update_task_stack(next_p);
582 
583 	switch_to_extra(prev_p, next_p);
584 
585 #ifdef CONFIG_XEN_PV
586 	/*
587 	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
588 	 * current_pt_regs()->flags may not match the current task's
589 	 * intended IOPL.  We need to switch it manually.
590 	 */
591 	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
592 		     prev->iopl != next->iopl))
593 		xen_set_iopl_mask(next->iopl);
594 #endif
595 
596 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
597 		/*
598 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
599 		 * does not update the cached descriptor.  As a result, if we
600 		 * do SYSRET while SS is NULL, we'll end up in user mode with
601 		 * SS apparently equal to __USER_DS but actually unusable.
602 		 *
603 		 * The straightforward workaround would be to fix it up just
604 		 * before SYSRET, but that would slow down the system call
605 		 * fast paths.  Instead, we ensure that SS is never NULL in
606 		 * system call context.  We do this by replacing NULL SS
607 		 * selectors at every context switch.  SYSCALL sets up a valid
608 		 * SS, so the only way to get NULL is to re-enter the kernel
609 		 * from CPL 3 through an interrupt.  Since that can't happen
610 		 * in the same task as a running syscall, we are guaranteed to
611 		 * context switch between every interrupt vector entry and a
612 		 * subsequent SYSRET.
613 		 *
614 		 * We read SS first because SS reads are much faster than
615 		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
616 		 * it previously had a different non-NULL value.
617 		 */
618 		unsigned short ss_sel;
619 		savesegment(ss, ss_sel);
620 		if (ss_sel != __KERNEL_DS)
621 			loadsegment(ss, __KERNEL_DS);
622 	}
623 
624 	/* Load the Intel cache allocation PQR MSR. */
625 	resctrl_sched_in();
626 
627 	return prev_p;
628 }
629 
630 void set_personality_64bit(void)
631 {
632 	/* inherit personality from parent */
633 
634 	/* Make sure to be in 64bit mode */
635 	clear_thread_flag(TIF_IA32);
636 	clear_thread_flag(TIF_ADDR32);
637 	clear_thread_flag(TIF_X32);
638 	/* Pretend that this comes from a 64bit execve */
639 	task_pt_regs(current)->orig_ax = __NR_execve;
640 	current_thread_info()->status &= ~TS_COMPAT;
641 
642 	/* Ensure the corresponding mm is not marked. */
643 	if (current->mm)
644 		current->mm->context.ia32_compat = 0;
645 
646 	/* TBD: overwrites user setup. Should have two bits.
647 	   But 64bit processes have always behaved this way,
648 	   so it's not too bad. The main problem is just that
649 	   32bit children are affected again. */
650 	current->personality &= ~READ_IMPLIES_EXEC;
651 }
652 
653 static void __set_personality_x32(void)
654 {
655 #ifdef CONFIG_X86_X32
656 	clear_thread_flag(TIF_IA32);
657 	set_thread_flag(TIF_X32);
658 	if (current->mm)
659 		current->mm->context.ia32_compat = TIF_X32;
660 	current->personality &= ~READ_IMPLIES_EXEC;
661 	/*
662 	 * in_32bit_syscall() uses the presence of the x32 syscall bit
663 	 * flag to determine compat status.  The x86 mmap() code relies on
664 	 * the syscall bitness so set x32 syscall bit right here to make
665 	 * in_32bit_syscall() work during exec().
666 	 *
667 	 * Pretend to come from a x32 execve.
668 	 */
669 	task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
670 	current_thread_info()->status &= ~TS_COMPAT;
671 #endif
672 }
673 
674 static void __set_personality_ia32(void)
675 {
676 #ifdef CONFIG_IA32_EMULATION
677 	set_thread_flag(TIF_IA32);
678 	clear_thread_flag(TIF_X32);
679 	if (current->mm)
680 		current->mm->context.ia32_compat = TIF_IA32;
681 	current->personality |= force_personality32;
682 	/* Prepare the first "return" to user space */
683 	task_pt_regs(current)->orig_ax = __NR_ia32_execve;
684 	current_thread_info()->status |= TS_COMPAT;
685 #endif
686 }
687 
688 void set_personality_ia32(bool x32)
689 {
690 	/* Make sure to be in 32bit mode */
691 	set_thread_flag(TIF_ADDR32);
692 
693 	if (x32)
694 		__set_personality_x32();
695 	else
696 		__set_personality_ia32();
697 }
698 EXPORT_SYMBOL_GPL(set_personality_ia32);
699 
700 #ifdef CONFIG_CHECKPOINT_RESTORE
701 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
702 {
703 	int ret;
704 
705 	ret = map_vdso_once(image, addr);
706 	if (ret)
707 		return ret;
708 
709 	return (long)image->size;
710 }
711 #endif
712 
713 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
714 {
715 	int ret = 0;
716 
717 	switch (option) {
718 	case ARCH_SET_GS: {
719 		if (unlikely(arg2 >= TASK_SIZE_MAX))
720 			return -EPERM;
721 
722 		preempt_disable();
723 		/*
724 		 * ARCH_SET_GS has always overwritten the index
725 		 * and the base. Zero is the most sensible value
726 		 * to put in the index, and is the only value that
727 		 * makes any sense if FSGSBASE is unavailable.
728 		 */
729 		if (task == current) {
730 			loadseg(GS, 0);
731 			x86_gsbase_write_cpu_inactive(arg2);
732 
733 			/*
734 			 * On non-FSGSBASE systems, save_base_legacy() expects
735 			 * that we also fill in thread.gsbase.
736 			 */
737 			task->thread.gsbase = arg2;
738 
739 		} else {
740 			task->thread.gsindex = 0;
741 			x86_gsbase_write_task(task, arg2);
742 		}
743 		preempt_enable();
744 		break;
745 	}
746 	case ARCH_SET_FS: {
747 		/*
748 		 * Not strictly needed for %fs, but do it for symmetry
749 		 * with %gs
750 		 */
751 		if (unlikely(arg2 >= TASK_SIZE_MAX))
752 			return -EPERM;
753 
754 		preempt_disable();
755 		/*
756 		 * Set the selector to 0 for the same reason
757 		 * as %gs above.
758 		 */
759 		if (task == current) {
760 			loadseg(FS, 0);
761 			x86_fsbase_write_cpu(arg2);
762 
763 			/*
764 			 * On non-FSGSBASE systems, save_base_legacy() expects
765 			 * that we also fill in thread.fsbase.
766 			 */
767 			task->thread.fsbase = arg2;
768 		} else {
769 			task->thread.fsindex = 0;
770 			x86_fsbase_write_task(task, arg2);
771 		}
772 		preempt_enable();
773 		break;
774 	}
775 	case ARCH_GET_FS: {
776 		unsigned long base = x86_fsbase_read_task(task);
777 
778 		ret = put_user(base, (unsigned long __user *)arg2);
779 		break;
780 	}
781 	case ARCH_GET_GS: {
782 		unsigned long base = x86_gsbase_read_task(task);
783 
784 		ret = put_user(base, (unsigned long __user *)arg2);
785 		break;
786 	}
787 
788 #ifdef CONFIG_CHECKPOINT_RESTORE
789 # ifdef CONFIG_X86_X32_ABI
790 	case ARCH_MAP_VDSO_X32:
791 		return prctl_map_vdso(&vdso_image_x32, arg2);
792 # endif
793 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
794 	case ARCH_MAP_VDSO_32:
795 		return prctl_map_vdso(&vdso_image_32, arg2);
796 # endif
797 	case ARCH_MAP_VDSO_64:
798 		return prctl_map_vdso(&vdso_image_64, arg2);
799 #endif
800 
801 	default:
802 		ret = -EINVAL;
803 		break;
804 	}
805 
806 	return ret;
807 }
808 
809 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
810 {
811 	long ret;
812 
813 	ret = do_arch_prctl_64(current, option, arg2);
814 	if (ret == -EINVAL)
815 		ret = do_arch_prctl_common(current, option, arg2);
816 
817 	return ret;
818 }
819 
820 #ifdef CONFIG_IA32_EMULATION
821 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
822 {
823 	return do_arch_prctl_common(current, option, arg2);
824 }
825 #endif
826 
827 unsigned long KSTK_ESP(struct task_struct *task)
828 {
829 	return task_pt_regs(task)->sp;
830 }
831