1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_SWITCH_TO_H
7 #define _ASM_RISCV_SWITCH_TO_H
8
9 #include <linux/jump_label.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/mm_types.h>
12 #include <asm/vector.h>
13 #include <asm/cpufeature.h>
14 #include <asm/processor.h>
15 #include <asm/ptrace.h>
16 #include <asm/csr.h>
17
18 #ifdef CONFIG_FPU
19 extern void __fstate_save(struct task_struct *save_to);
20 extern void __fstate_restore(struct task_struct *restore_from);
21
__fstate_clean(struct pt_regs * regs)22 static inline void __fstate_clean(struct pt_regs *regs)
23 {
24 regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN;
25 }
26
fstate_off(struct task_struct * task,struct pt_regs * regs)27 static inline void fstate_off(struct task_struct *task,
28 struct pt_regs *regs)
29 {
30 regs->status = (regs->status & ~SR_FS) | SR_FS_OFF;
31 }
32
fstate_save(struct task_struct * task,struct pt_regs * regs)33 static inline void fstate_save(struct task_struct *task,
34 struct pt_regs *regs)
35 {
36 if ((regs->status & SR_FS) == SR_FS_DIRTY) {
37 __fstate_save(task);
38 __fstate_clean(regs);
39 }
40 }
41
fstate_restore(struct task_struct * task,struct pt_regs * regs)42 static inline void fstate_restore(struct task_struct *task,
43 struct pt_regs *regs)
44 {
45 if ((regs->status & SR_FS) != SR_FS_OFF) {
46 __fstate_restore(task);
47 __fstate_clean(regs);
48 }
49 }
50
__switch_to_fpu(struct task_struct * prev,struct task_struct * next)51 static inline void __switch_to_fpu(struct task_struct *prev,
52 struct task_struct *next)
53 {
54 struct pt_regs *regs;
55
56 regs = task_pt_regs(prev);
57 fstate_save(prev, regs);
58 fstate_restore(next, task_pt_regs(next));
59 }
60
has_fpu(void)61 static __always_inline bool has_fpu(void)
62 {
63 return riscv_has_extension_likely(RISCV_ISA_EXT_f) ||
64 riscv_has_extension_likely(RISCV_ISA_EXT_d);
65 }
66 #else
has_fpu(void)67 static __always_inline bool has_fpu(void) { return false; }
68 #define fstate_save(task, regs) do { } while (0)
69 #define fstate_restore(task, regs) do { } while (0)
70 #define __switch_to_fpu(__prev, __next) do { } while (0)
71 #endif
72
envcfg_update_bits(struct task_struct * task,unsigned long mask,unsigned long val)73 static inline void envcfg_update_bits(struct task_struct *task,
74 unsigned long mask, unsigned long val)
75 {
76 unsigned long envcfg;
77
78 envcfg = (task->thread.envcfg & ~mask) | val;
79 task->thread.envcfg = envcfg;
80 if (task == current)
81 csr_write(CSR_ENVCFG, envcfg);
82 }
83
__switch_to_envcfg(struct task_struct * next)84 static inline void __switch_to_envcfg(struct task_struct *next)
85 {
86 asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0",
87 0, RISCV_ISA_EXT_XLINUXENVCFG, 1)
88 :: "r" (next->thread.envcfg) : "memory");
89 }
90
91 extern struct task_struct *__switch_to(struct task_struct *,
92 struct task_struct *);
93
switch_to_should_flush_icache(struct task_struct * task)94 static inline bool switch_to_should_flush_icache(struct task_struct *task)
95 {
96 #ifdef CONFIG_SMP
97 bool stale_mm = task->mm && task->mm->context.force_icache_flush;
98 bool stale_thread = task->thread.force_icache_flush;
99 bool thread_migrated = smp_processor_id() != task->thread.prev_cpu;
100
101 return thread_migrated && (stale_mm || stale_thread);
102 #else
103 return false;
104 #endif
105 }
106
107 #ifdef CONFIG_SMP
108 #define __set_prev_cpu(thread) ((thread).prev_cpu = smp_processor_id())
109 #else
110 #define __set_prev_cpu(thread)
111 #endif
112
113 #define switch_to(prev, next, last) \
114 do { \
115 struct task_struct *__prev = (prev); \
116 struct task_struct *__next = (next); \
117 __set_prev_cpu(__prev->thread); \
118 if (has_fpu()) \
119 __switch_to_fpu(__prev, __next); \
120 if (has_vector()) \
121 __switch_to_vector(__prev, __next); \
122 if (switch_to_should_flush_icache(__next)) \
123 local_flush_icache_all(); \
124 __switch_to_envcfg(__next); \
125 ((last) = __switch_to(__prev, __next)); \
126 } while (0)
127
128 #endif /* _ASM_RISCV_SWITCH_TO_H */
129