xref: /linux/arch/x86/include/asm/switch_to.h (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SWITCH_TO_H
3 #define _ASM_X86_SWITCH_TO_H
4 
5 #include <linux/sched/task_stack.h>
6 
7 struct task_struct; /* one of the stranger aspects of C forward declarations */
8 
9 struct task_struct *__switch_to_asm(struct task_struct *prev,
10 				    struct task_struct *next);
11 
12 __visible struct task_struct *__switch_to(struct task_struct *prev,
13 					  struct task_struct *next);
14 
15 asmlinkage void ret_from_fork_asm(void);
16 __visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
17 			     int (*fn)(void *), void *fn_arg);
18 
19 /*
20  * This is the structure pointed to by thread.sp for an inactive task.  The
21  * order of the fields must match the code in __switch_to_asm().
22  */
23 struct inactive_task_frame {
24 #ifdef CONFIG_X86_64
25 	unsigned long r15;
26 	unsigned long r14;
27 	unsigned long r13;
28 	unsigned long r12;
29 #else
30 	unsigned long flags;
31 	unsigned long si;
32 	unsigned long di;
33 #endif
34 	unsigned long bx;
35 
36 	/*
37 	 * These two fields must be together.  They form a stack frame header,
38 	 * needed by get_frame_pointer().
39 	 */
40 	unsigned long bp;
41 	unsigned long ret_addr;
42 };
43 
44 struct fork_frame {
45 	struct inactive_task_frame frame;
46 	struct pt_regs regs;
47 };
48 
49 #define switch_to(prev, next, last)					\
50 do {									\
51 	((last) = __switch_to_asm((prev), (next)));			\
52 } while (0)
53 
54 #ifdef CONFIG_X86_32
55 static inline void refresh_sysenter_cs(struct thread_struct *thread)
56 {
57 	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
58 	if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
59 		return;
60 
61 	this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
62 	wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
63 }
64 #endif
65 
66 /* This is used when switching tasks or entering/exiting vm86 mode. */
67 static inline void update_task_stack(struct task_struct *task)
68 {
69 	/* sp0 always points to the entry trampoline stack, which is constant: */
70 #ifdef CONFIG_X86_32
71 	this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
72 #else
73 	if (cpu_feature_enabled(X86_FEATURE_FRED)) {
74 		/* WRMSRNS is a baseline feature for FRED. */
75 		wrmsrns(MSR_IA32_FRED_RSP0, (unsigned long)task_stack_page(task) + THREAD_SIZE);
76 	} else if (cpu_feature_enabled(X86_FEATURE_XENPV)) {
77 		/* Xen PV enters the kernel on the thread stack. */
78 		load_sp0(task_top_of_stack(task));
79 	}
80 #endif
81 }
82 
83 static inline void kthread_frame_init(struct inactive_task_frame *frame,
84 				      int (*fun)(void *), void *arg)
85 {
86 	frame->bx = (unsigned long)fun;
87 #ifdef CONFIG_X86_32
88 	frame->di = (unsigned long)arg;
89 #else
90 	frame->r12 = (unsigned long)arg;
91 #endif
92 }
93 
94 #endif /* _ASM_X86_SWITCH_TO_H */
95