1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (C) 2020 SiFive
4 */
5
6 #ifndef __ASM_RISCV_VECTOR_H
7 #define __ASM_RISCV_VECTOR_H
8
9 #include <linux/types.h>
10 #include <uapi/asm-generic/errno.h>
11
12 #ifdef CONFIG_RISCV_ISA_V
13
14 #include <linux/stringify.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <asm/ptrace.h>
18 #include <asm/cpufeature.h>
19 #include <asm/csr.h>
20 #include <asm/asm.h>
21 #include <asm/vendorid_list.h>
22 #include <asm/vendor_extensions.h>
23 #include <asm/vendor_extensions/thead.h>
24
25 #define __riscv_v_vstate_or(_val, TYPE) ({ \
26 typeof(_val) _res = _val; \
27 if (has_xtheadvector()) \
28 _res = (_res & ~SR_VS_THEAD) | SR_VS_##TYPE##_THEAD; \
29 else \
30 _res = (_res & ~SR_VS) | SR_VS_##TYPE; \
31 _res; \
32 })
33
34 #define __riscv_v_vstate_check(_val, TYPE) ({ \
35 bool _res; \
36 if (has_xtheadvector()) \
37 _res = ((_val) & SR_VS_THEAD) == SR_VS_##TYPE##_THEAD; \
38 else \
39 _res = ((_val) & SR_VS) == SR_VS_##TYPE; \
40 _res; \
41 })
42
43 extern unsigned long riscv_v_vsize;
44 int riscv_v_setup_vsize(void);
45 bool insn_is_vector(u32 insn_buf);
46 bool riscv_v_first_use_handler(struct pt_regs *regs);
47 void kernel_vector_begin(void);
48 void kernel_vector_end(void);
49 void get_cpu_vector_context(void);
50 void put_cpu_vector_context(void);
51 void riscv_v_thread_free(struct task_struct *tsk);
52 void __init riscv_v_setup_ctx_cache(void);
53 void riscv_v_thread_alloc(struct task_struct *tsk);
54
riscv_v_flags(void)55 static inline u32 riscv_v_flags(void)
56 {
57 return READ_ONCE(current->thread.riscv_v_flags);
58 }
59
has_vector(void)60 static __always_inline bool has_vector(void)
61 {
62 return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X);
63 }
64
has_xtheadvector_no_alternatives(void)65 static __always_inline bool has_xtheadvector_no_alternatives(void)
66 {
67 if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
68 return riscv_isa_vendor_extension_available(THEAD_VENDOR_ID, XTHEADVECTOR);
69 else
70 return false;
71 }
72
has_xtheadvector(void)73 static __always_inline bool has_xtheadvector(void)
74 {
75 if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
76 return riscv_has_vendor_extension_unlikely(THEAD_VENDOR_ID,
77 RISCV_ISA_VENDOR_EXT_XTHEADVECTOR);
78 else
79 return false;
80 }
81
__riscv_v_vstate_clean(struct pt_regs * regs)82 static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
83 {
84 regs->status = __riscv_v_vstate_or(regs->status, CLEAN);
85 }
86
__riscv_v_vstate_dirty(struct pt_regs * regs)87 static inline void __riscv_v_vstate_dirty(struct pt_regs *regs)
88 {
89 regs->status = __riscv_v_vstate_or(regs->status, DIRTY);
90 }
91
riscv_v_vstate_off(struct pt_regs * regs)92 static inline void riscv_v_vstate_off(struct pt_regs *regs)
93 {
94 regs->status = __riscv_v_vstate_or(regs->status, OFF);
95 }
96
riscv_v_vstate_on(struct pt_regs * regs)97 static inline void riscv_v_vstate_on(struct pt_regs *regs)
98 {
99 regs->status = __riscv_v_vstate_or(regs->status, INITIAL);
100 }
101
riscv_v_vstate_query(struct pt_regs * regs)102 static inline bool riscv_v_vstate_query(struct pt_regs *regs)
103 {
104 return !__riscv_v_vstate_check(regs->status, OFF);
105 }
106
riscv_v_enable(void)107 static __always_inline void riscv_v_enable(void)
108 {
109 if (has_xtheadvector())
110 csr_set(CSR_SSTATUS, SR_VS_THEAD);
111 else
112 csr_set(CSR_SSTATUS, SR_VS);
113 }
114
riscv_v_disable(void)115 static __always_inline void riscv_v_disable(void)
116 {
117 if (has_xtheadvector())
118 csr_clear(CSR_SSTATUS, SR_VS_THEAD);
119 else
120 csr_clear(CSR_SSTATUS, SR_VS);
121 }
122
riscv_v_is_on(void)123 static __always_inline bool riscv_v_is_on(void)
124 {
125 return !!(csr_read(CSR_SSTATUS) & SR_VS);
126 }
127
__vstate_csr_save(struct __riscv_v_ext_state * dest)128 static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
129 {
130 asm volatile (
131 "csrr %0, " __stringify(CSR_VSTART) "\n\t"
132 "csrr %1, " __stringify(CSR_VTYPE) "\n\t"
133 "csrr %2, " __stringify(CSR_VL) "\n\t"
134 : "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl),
135 "=r" (dest->vcsr) : :);
136
137 if (has_xtheadvector()) {
138 unsigned long status;
139
140 /*
141 * CSR_VCSR is defined as
142 * [2:1] - vxrm[1:0]
143 * [0] - vxsat
144 * The earlier vector spec implemented by T-Head uses separate
145 * registers for the same bit-elements, so just combine those
146 * into the existing output field.
147 *
148 * Additionally T-Head cores need FS to be enabled when accessing
149 * the VXRM and VXSAT CSRs, otherwise ending in illegal instructions.
150 * Though the cores do not implement the VXRM and VXSAT fields in the
151 * FCSR CSR that vector-0.7.1 specifies.
152 */
153 status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
154 dest->vcsr = csr_read(CSR_VXSAT) | csr_read(CSR_VXRM) << CSR_VXRM_SHIFT;
155
156 dest->vlenb = riscv_v_vsize / 32;
157
158 if ((status & SR_FS) != SR_FS_DIRTY)
159 csr_write(CSR_STATUS, status);
160 } else {
161 dest->vcsr = csr_read(CSR_VCSR);
162 dest->vlenb = csr_read(CSR_VLENB);
163 }
164 }
165
__vstate_csr_restore(struct __riscv_v_ext_state * src)166 static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src)
167 {
168 asm volatile (
169 ".option push\n\t"
170 ".option arch, +zve32x\n\t"
171 "vsetvl x0, %2, %1\n\t"
172 ".option pop\n\t"
173 "csrw " __stringify(CSR_VSTART) ", %0\n\t"
174 : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl));
175
176 if (has_xtheadvector()) {
177 unsigned long status = csr_read(CSR_SSTATUS);
178
179 /*
180 * Similar to __vstate_csr_save above, restore values for the
181 * separate VXRM and VXSAT CSRs from the vcsr variable.
182 */
183 status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
184
185 csr_write(CSR_VXRM, (src->vcsr >> CSR_VXRM_SHIFT) & CSR_VXRM_MASK);
186 csr_write(CSR_VXSAT, src->vcsr & CSR_VXSAT_MASK);
187
188 if ((status & SR_FS) != SR_FS_DIRTY)
189 csr_write(CSR_STATUS, status);
190 } else {
191 csr_write(CSR_VCSR, src->vcsr);
192 }
193 }
194
__riscv_v_vstate_save(struct __riscv_v_ext_state * save_to,void * datap)195 static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
196 void *datap)
197 {
198 unsigned long vl;
199
200 riscv_v_enable();
201 __vstate_csr_save(save_to);
202 if (has_xtheadvector()) {
203 asm volatile (
204 "mv t0, %0\n\t"
205 THEAD_VSETVLI_T4X0E8M8D1
206 THEAD_VSB_V_V0T0
207 "add t0, t0, t4\n\t"
208 THEAD_VSB_V_V0T0
209 "add t0, t0, t4\n\t"
210 THEAD_VSB_V_V0T0
211 "add t0, t0, t4\n\t"
212 THEAD_VSB_V_V0T0
213 : : "r" (datap) : "memory", "t0", "t4");
214 } else {
215 asm volatile (
216 ".option push\n\t"
217 ".option arch, +zve32x\n\t"
218 "vsetvli %0, x0, e8, m8, ta, ma\n\t"
219 "vse8.v v0, (%1)\n\t"
220 "add %1, %1, %0\n\t"
221 "vse8.v v8, (%1)\n\t"
222 "add %1, %1, %0\n\t"
223 "vse8.v v16, (%1)\n\t"
224 "add %1, %1, %0\n\t"
225 "vse8.v v24, (%1)\n\t"
226 ".option pop\n\t"
227 : "=&r" (vl) : "r" (datap) : "memory");
228 }
229 riscv_v_disable();
230 }
231
__riscv_v_vstate_restore(struct __riscv_v_ext_state * restore_from,void * datap)232 static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_from,
233 void *datap)
234 {
235 unsigned long vl;
236
237 riscv_v_enable();
238 if (has_xtheadvector()) {
239 asm volatile (
240 "mv t0, %0\n\t"
241 THEAD_VSETVLI_T4X0E8M8D1
242 THEAD_VLB_V_V0T0
243 "add t0, t0, t4\n\t"
244 THEAD_VLB_V_V0T0
245 "add t0, t0, t4\n\t"
246 THEAD_VLB_V_V0T0
247 "add t0, t0, t4\n\t"
248 THEAD_VLB_V_V0T0
249 : : "r" (datap) : "memory", "t0", "t4");
250 } else {
251 asm volatile (
252 ".option push\n\t"
253 ".option arch, +zve32x\n\t"
254 "vsetvli %0, x0, e8, m8, ta, ma\n\t"
255 "vle8.v v0, (%1)\n\t"
256 "add %1, %1, %0\n\t"
257 "vle8.v v8, (%1)\n\t"
258 "add %1, %1, %0\n\t"
259 "vle8.v v16, (%1)\n\t"
260 "add %1, %1, %0\n\t"
261 "vle8.v v24, (%1)\n\t"
262 ".option pop\n\t"
263 : "=&r" (vl) : "r" (datap) : "memory");
264 }
265 __vstate_csr_restore(restore_from);
266 riscv_v_disable();
267 }
268
__riscv_v_vstate_discard(void)269 static inline void __riscv_v_vstate_discard(void)
270 {
271 unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1);
272
273 riscv_v_enable();
274 if (has_xtheadvector())
275 asm volatile (THEAD_VSETVLI_T4X0E8M8D1 : : : "t4");
276 else
277 asm volatile (
278 ".option push\n\t"
279 ".option arch, +zve32x\n\t"
280 "vsetvli %0, x0, e8, m8, ta, ma\n\t"
281 ".option pop\n\t": "=&r" (vl));
282
283 asm volatile (
284 ".option push\n\t"
285 ".option arch, +zve32x\n\t"
286 "vmv.v.i v0, -1\n\t"
287 "vmv.v.i v8, -1\n\t"
288 "vmv.v.i v16, -1\n\t"
289 "vmv.v.i v24, -1\n\t"
290 "vsetvl %0, x0, %1\n\t"
291 ".option pop\n\t"
292 : "=&r" (vl) : "r" (vtype_inval));
293
294 riscv_v_disable();
295 }
296
riscv_v_vstate_discard(struct pt_regs * regs)297 static inline void riscv_v_vstate_discard(struct pt_regs *regs)
298 {
299 if (riscv_v_vstate_query(regs)) {
300 __riscv_v_vstate_discard();
301 __riscv_v_vstate_dirty(regs);
302 }
303 }
304
riscv_v_vstate_save(struct __riscv_v_ext_state * vstate,struct pt_regs * regs)305 static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
306 struct pt_regs *regs)
307 {
308 if (__riscv_v_vstate_check(regs->status, DIRTY)) {
309 __riscv_v_vstate_save(vstate, vstate->datap);
310 __riscv_v_vstate_clean(regs);
311 }
312 }
313
riscv_v_vstate_restore(struct __riscv_v_ext_state * vstate,struct pt_regs * regs)314 static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
315 struct pt_regs *regs)
316 {
317 if (riscv_v_vstate_query(regs)) {
318 __riscv_v_vstate_restore(vstate, vstate->datap);
319 __riscv_v_vstate_clean(regs);
320 }
321 }
322
riscv_v_vstate_set_restore(struct task_struct * task,struct pt_regs * regs)323 static inline void riscv_v_vstate_set_restore(struct task_struct *task,
324 struct pt_regs *regs)
325 {
326 if (riscv_v_vstate_query(regs)) {
327 set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
328 riscv_v_vstate_on(regs);
329 }
330 }
331
332 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
riscv_preempt_v_dirty(struct task_struct * task)333 static inline bool riscv_preempt_v_dirty(struct task_struct *task)
334 {
335 return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY);
336 }
337
riscv_preempt_v_restore(struct task_struct * task)338 static inline bool riscv_preempt_v_restore(struct task_struct *task)
339 {
340 return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE);
341 }
342
riscv_preempt_v_clear_dirty(struct task_struct * task)343 static inline void riscv_preempt_v_clear_dirty(struct task_struct *task)
344 {
345 barrier();
346 task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY;
347 }
348
riscv_preempt_v_set_restore(struct task_struct * task)349 static inline void riscv_preempt_v_set_restore(struct task_struct *task)
350 {
351 barrier();
352 task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE;
353 }
354
riscv_preempt_v_started(struct task_struct * task)355 static inline bool riscv_preempt_v_started(struct task_struct *task)
356 {
357 return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V);
358 }
359
360 #else /* !CONFIG_RISCV_ISA_V_PREEMPTIVE */
riscv_preempt_v_dirty(struct task_struct * task)361 static inline bool riscv_preempt_v_dirty(struct task_struct *task) { return false; }
riscv_preempt_v_restore(struct task_struct * task)362 static inline bool riscv_preempt_v_restore(struct task_struct *task) { return false; }
riscv_preempt_v_started(struct task_struct * task)363 static inline bool riscv_preempt_v_started(struct task_struct *task) { return false; }
364 #define riscv_preempt_v_clear_dirty(tsk) do {} while (0)
365 #define riscv_preempt_v_set_restore(tsk) do {} while (0)
366 #endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
367
__switch_to_vector(struct task_struct * prev,struct task_struct * next)368 static inline void __switch_to_vector(struct task_struct *prev,
369 struct task_struct *next)
370 {
371 struct pt_regs *regs;
372
373 if (riscv_preempt_v_started(prev)) {
374 if (riscv_v_is_on()) {
375 WARN_ON(prev->thread.riscv_v_flags & RISCV_V_CTX_DEPTH_MASK);
376 riscv_v_disable();
377 prev->thread.riscv_v_flags |= RISCV_PREEMPT_V_IN_SCHEDULE;
378 }
379 if (riscv_preempt_v_dirty(prev)) {
380 __riscv_v_vstate_save(&prev->thread.kernel_vstate,
381 prev->thread.kernel_vstate.datap);
382 riscv_preempt_v_clear_dirty(prev);
383 }
384 } else {
385 regs = task_pt_regs(prev);
386 riscv_v_vstate_save(&prev->thread.vstate, regs);
387 }
388
389 if (riscv_preempt_v_started(next)) {
390 if (next->thread.riscv_v_flags & RISCV_PREEMPT_V_IN_SCHEDULE) {
391 next->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_IN_SCHEDULE;
392 riscv_v_enable();
393 } else {
394 riscv_preempt_v_set_restore(next);
395 }
396 } else {
397 riscv_v_vstate_set_restore(next, task_pt_regs(next));
398 }
399 }
400
401 void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
402 bool riscv_v_vstate_ctrl_user_allowed(void);
403
404 #else /* ! CONFIG_RISCV_ISA_V */
405
406 struct pt_regs;
407
riscv_v_setup_vsize(void)408 static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
has_vector(void)409 static __always_inline bool has_vector(void) { return false; }
insn_is_vector(u32 insn_buf)410 static __always_inline bool insn_is_vector(u32 insn_buf) { return false; }
has_xtheadvector_no_alternatives(void)411 static __always_inline bool has_xtheadvector_no_alternatives(void) { return false; }
has_xtheadvector(void)412 static __always_inline bool has_xtheadvector(void) { return false; }
riscv_v_first_use_handler(struct pt_regs * regs)413 static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
riscv_v_vstate_query(struct pt_regs * regs)414 static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
riscv_v_vstate_ctrl_user_allowed(void)415 static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
416 #define riscv_v_vsize (0)
417 #define riscv_v_vstate_discard(regs) do {} while (0)
418 #define riscv_v_vstate_save(vstate, regs) do {} while (0)
419 #define riscv_v_vstate_restore(vstate, regs) do {} while (0)
420 #define __switch_to_vector(__prev, __next) do {} while (0)
421 #define riscv_v_vstate_off(regs) do {} while (0)
422 #define riscv_v_vstate_on(regs) do {} while (0)
423 #define riscv_v_thread_free(tsk) do {} while (0)
424 #define riscv_v_setup_ctx_cache() do {} while (0)
425 #define riscv_v_thread_alloc(tsk) do {} while (0)
426
427 #endif /* CONFIG_RISCV_ISA_V */
428
429 /*
430 * Return the implementation's vlen value.
431 *
432 * riscv_v_vsize contains the value of "32 vector registers with vlenb length"
433 * so rebuild the vlen value in bits from it.
434 */
riscv_vector_vlen(void)435 static inline int riscv_vector_vlen(void)
436 {
437 return riscv_v_vsize / 32 * 8;
438 }
439
440 #endif /* ! __ASM_RISCV_VECTOR_H */
441