xref: /linux/arch/riscv/include/asm/processor.h (revision 119b1e61a769aa98e68599f44721661a4d8c55f3)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_PROCESSOR_H
7 #define _ASM_RISCV_PROCESSOR_H
8 
9 #include <linux/const.h>
10 #include <linux/cache.h>
11 #include <linux/prctl.h>
12 
13 #include <vdso/processor.h>
14 
15 #include <asm/ptrace.h>
16 #include <asm/insn-def.h>
17 #include <asm/alternative-macros.h>
18 #include <asm/hwcap.h>
19 
20 #define arch_get_mmap_end(addr, len, flags)			\
21 ({								\
22 	STACK_TOP_MAX;						\
23 })
24 
25 #define arch_get_mmap_base(addr, base)				\
26 ({								\
27 	base;							\
28 })
29 
30 #ifdef CONFIG_64BIT
31 #define DEFAULT_MAP_WINDOW	(UL(1) << (MMAP_VA_BITS - 1))
32 #define STACK_TOP_MAX		TASK_SIZE_64
33 #else
34 #define DEFAULT_MAP_WINDOW	TASK_SIZE
35 #define STACK_TOP_MAX		TASK_SIZE
36 #endif
37 #define STACK_ALIGN		16
38 
39 #define STACK_TOP		DEFAULT_MAP_WINDOW
40 
41 #ifdef CONFIG_MMU
42 #define user_max_virt_addr() arch_get_mmap_end(ULONG_MAX, 0, 0)
43 #else
44 #define user_max_virt_addr() 0
45 #endif /* CONFIG_MMU */
46 
47 /*
48  * This decides where the kernel will search for a free chunk of vm
49  * space during mmap's.
50  */
51 #ifdef CONFIG_64BIT
52 #define TASK_UNMAPPED_BASE	PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3)
53 #else
54 #define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
55 #endif
56 
57 #ifndef __ASSEMBLY__
58 
59 struct task_struct;
60 struct pt_regs;
61 
62 /*
63  * We use a flag to track in-kernel Vector context. Currently the flag has the
64  * following meaning:
65  *
66  *  - bit 0: indicates whether the in-kernel Vector context is active. The
67  *    activation of this state disables the preemption. On a non-RT kernel, it
68  *    also disable bh.
69  *  - bits 8: is used for tracking preemptible kernel-mode Vector, when
70  *    RISCV_ISA_V_PREEMPTIVE is enabled. Calling kernel_vector_begin() does not
71  *    disable the preemption if the thread's kernel_vstate.datap is allocated.
72  *    Instead, the kernel set this bit field. Then the trap entry/exit code
73  *    knows if we are entering/exiting the context that owns preempt_v.
74  *     - 0: the task is not using preempt_v
75  *     - 1: the task is actively using preempt_v. But whether does the task own
76  *          the preempt_v context is decided by bits in RISCV_V_CTX_DEPTH_MASK.
77  *  - bit 16-23 are RISCV_V_CTX_DEPTH_MASK, used by context tracking routine
78  *     when preempt_v starts:
79  *     - 0: the task is actively using, and own preempt_v context.
80  *     - non-zero: the task was using preempt_v, but then took a trap within.
81  *       Thus, the task does not own preempt_v. Any use of Vector will have to
82  *       save preempt_v, if dirty, and fallback to non-preemptible kernel-mode
83  *       Vector.
84  *  - bit 29: The thread voluntarily calls schedule() while holding an active
85  *    preempt_v. All preempt_v context should be dropped in such case because
86  *    V-regs are caller-saved. Only sstatus.VS=ON is persisted across a
87  *    schedule() call.
88  *  - bit 30: The in-kernel preempt_v context is saved, and requries to be
89  *    restored when returning to the context that owns the preempt_v.
90  *  - bit 31: The in-kernel preempt_v context is dirty, as signaled by the
91  *    trap entry code. Any context switches out-of current task need to save
92  *    it to the task's in-kernel V context. Also, any traps nesting on-top-of
93  *    preempt_v requesting to use V needs a save.
94  */
95 #define RISCV_V_CTX_DEPTH_MASK		0x00ff0000
96 
97 #define RISCV_V_CTX_UNIT_DEPTH		0x00010000
98 #define RISCV_KERNEL_MODE_V		0x00000001
99 #define RISCV_PREEMPT_V			0x00000100
100 #define RISCV_PREEMPT_V_DIRTY		0x80000000
101 #define RISCV_PREEMPT_V_NEED_RESTORE	0x40000000
102 #define RISCV_PREEMPT_V_IN_SCHEDULE	0x20000000
103 
104 /* CPU-specific state of a task */
105 struct thread_struct {
106 	/* Callee-saved registers */
107 	unsigned long ra;
108 	unsigned long sp;	/* Kernel mode stack */
109 	unsigned long s[12];	/* s[0]: frame pointer */
110 	struct __riscv_d_ext_state fstate;
111 	unsigned long bad_cause;
112 	unsigned long envcfg;
113 	unsigned long sum;
114 	u32 riscv_v_flags;
115 	u32 vstate_ctrl;
116 	struct __riscv_v_ext_state vstate;
117 	unsigned long align_ctl;
118 	struct __riscv_v_ext_state kernel_vstate;
119 #ifdef CONFIG_SMP
120 	/* Flush the icache on migration */
121 	bool force_icache_flush;
122 	/* A forced icache flush is not needed if migrating to the previous cpu. */
123 	unsigned int prev_cpu;
124 #endif
125 };
126 
127 /* Whitelist the fstate from the task_struct for hardened usercopy */
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)128 static inline void arch_thread_struct_whitelist(unsigned long *offset,
129 						unsigned long *size)
130 {
131 	*offset = offsetof(struct thread_struct, fstate);
132 	*size = sizeof_field(struct thread_struct, fstate);
133 }
134 
135 #define INIT_THREAD {					\
136 	.sp = sizeof(init_stack) + (long)&init_stack,	\
137 	.align_ctl = PR_UNALIGN_NOPRINT,		\
138 }
139 
140 #define task_pt_regs(tsk)						\
141 	((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE		\
142 			    - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
143 
144 #define KSTK_EIP(tsk)		(task_pt_regs(tsk)->epc)
145 #define KSTK_ESP(tsk)		(task_pt_regs(tsk)->sp)
146 
147 #define PREFETCH_ASM(x)							\
148 	ALTERNATIVE(__nops(1), PREFETCH_R(x, 0), 0,			\
149 		    RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP)
150 
151 #define PREFETCHW_ASM(x)						\
152 	ALTERNATIVE(__nops(1), PREFETCH_W(x, 0), 0,			\
153 		    RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP)
154 
155 #ifdef CONFIG_RISCV_ISA_ZICBOP
156 #define ARCH_HAS_PREFETCH
prefetch(const void * x)157 static inline void prefetch(const void *x)
158 {
159 	__asm__ __volatile__(PREFETCH_ASM(%0) : : "r" (x) : "memory");
160 }
161 
162 #define ARCH_HAS_PREFETCHW
prefetchw(const void * x)163 static inline void prefetchw(const void *x)
164 {
165 	__asm__ __volatile__(PREFETCHW_ASM(%0) : : "r" (x) : "memory");
166 }
167 #endif /* CONFIG_RISCV_ISA_ZICBOP */
168 
169 /* Do necessary setup to start up a newly executed thread. */
170 extern void start_thread(struct pt_regs *regs,
171 			unsigned long pc, unsigned long sp);
172 
173 extern unsigned long __get_wchan(struct task_struct *p);
174 
175 
wait_for_interrupt(void)176 static inline void wait_for_interrupt(void)
177 {
178 	__asm__ __volatile__ ("wfi");
179 }
180 
181 extern phys_addr_t dma32_phys_limit;
182 
183 struct device_node;
184 int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
185 int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid);
186 int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
187 
188 extern void riscv_fill_hwcap(void);
189 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
190 
191 extern unsigned long signal_minsigstksz __ro_after_init;
192 
193 #ifdef CONFIG_RISCV_ISA_V
194 /* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */
195 #define RISCV_V_SET_CONTROL(arg)	riscv_v_vstate_ctrl_set_current(arg)
196 #define RISCV_V_GET_CONTROL()		riscv_v_vstate_ctrl_get_current()
197 extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
198 extern long riscv_v_vstate_ctrl_get_current(void);
199 #endif /* CONFIG_RISCV_ISA_V */
200 
201 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr);
202 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
203 
204 #define GET_UNALIGN_CTL(tsk, addr)	get_unalign_ctl((tsk), (addr))
205 #define SET_UNALIGN_CTL(tsk, val)	set_unalign_ctl((tsk), (val))
206 
207 #define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2)	riscv_set_icache_flush_ctx(arg1, arg2)
208 extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread);
209 
210 #ifdef CONFIG_RISCV_ISA_SUPM
211 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
212 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
213 long get_tagged_addr_ctrl(struct task_struct *task);
214 #define SET_TAGGED_ADDR_CTRL(arg)	set_tagged_addr_ctrl(current, arg)
215 #define GET_TAGGED_ADDR_CTRL()		get_tagged_addr_ctrl(current)
216 #endif
217 
218 #endif /* __ASSEMBLY__ */
219 
220 #endif /* _ASM_RISCV_PROCESSOR_H */
221