1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright (C) 2020 SiFive 4 */ 5 6 #ifndef __ASM_RISCV_VECTOR_H 7 #define __ASM_RISCV_VECTOR_H 8 9 #include <linux/types.h> 10 #include <uapi/asm-generic/errno.h> 11 12 #ifdef CONFIG_RISCV_ISA_V 13 14 #include <linux/stringify.h> 15 #include <linux/sched.h> 16 #include <linux/sched/task_stack.h> 17 #include <asm/ptrace.h> 18 #include <asm/cpufeature.h> 19 #include <asm/csr.h> 20 #include <asm/asm.h> 21 22 extern unsigned long riscv_v_vsize; 23 int riscv_v_setup_vsize(void); 24 bool insn_is_vector(u32 insn_buf); 25 bool riscv_v_first_use_handler(struct pt_regs *regs); 26 void kernel_vector_begin(void); 27 void kernel_vector_end(void); 28 void get_cpu_vector_context(void); 29 void put_cpu_vector_context(void); 30 void riscv_v_thread_free(struct task_struct *tsk); 31 void __init riscv_v_setup_ctx_cache(void); 32 void riscv_v_thread_alloc(struct task_struct *tsk); 33 34 static inline u32 riscv_v_flags(void) 35 { 36 return READ_ONCE(current->thread.riscv_v_flags); 37 } 38 39 static __always_inline bool has_vector(void) 40 { 41 return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X); 42 } 43 44 static inline void __riscv_v_vstate_clean(struct pt_regs *regs) 45 { 46 regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN; 47 } 48 49 static inline void __riscv_v_vstate_dirty(struct pt_regs *regs) 50 { 51 regs->status = (regs->status & ~SR_VS) | SR_VS_DIRTY; 52 } 53 54 static inline void riscv_v_vstate_off(struct pt_regs *regs) 55 { 56 regs->status = (regs->status & ~SR_VS) | SR_VS_OFF; 57 } 58 59 static inline void riscv_v_vstate_on(struct pt_regs *regs) 60 { 61 regs->status = (regs->status & ~SR_VS) | SR_VS_INITIAL; 62 } 63 64 static inline bool riscv_v_vstate_query(struct pt_regs *regs) 65 { 66 return (regs->status & SR_VS) != 0; 67 } 68 69 static __always_inline void riscv_v_enable(void) 70 { 71 csr_set(CSR_SSTATUS, SR_VS); 72 } 73 74 static __always_inline void riscv_v_disable(void) 75 { 76 csr_clear(CSR_SSTATUS, SR_VS); 77 } 78 79 static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest) 80 { 81 asm volatile ( 82 "csrr %0, " __stringify(CSR_VSTART) "\n\t" 83 "csrr %1, " __stringify(CSR_VTYPE) "\n\t" 84 "csrr %2, " __stringify(CSR_VL) "\n\t" 85 "csrr %3, " __stringify(CSR_VCSR) "\n\t" 86 "csrr %4, " __stringify(CSR_VLENB) "\n\t" 87 : "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl), 88 "=r" (dest->vcsr), "=r" (dest->vlenb) : :); 89 } 90 91 static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src) 92 { 93 asm volatile ( 94 ".option push\n\t" 95 ".option arch, +zve32x\n\t" 96 "vsetvl x0, %2, %1\n\t" 97 ".option pop\n\t" 98 "csrw " __stringify(CSR_VSTART) ", %0\n\t" 99 "csrw " __stringify(CSR_VCSR) ", %3\n\t" 100 : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl), 101 "r" (src->vcsr) :); 102 } 103 104 static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to, 105 void *datap) 106 { 107 unsigned long vl; 108 109 riscv_v_enable(); 110 __vstate_csr_save(save_to); 111 asm volatile ( 112 ".option push\n\t" 113 ".option arch, +zve32x\n\t" 114 "vsetvli %0, x0, e8, m8, ta, ma\n\t" 115 "vse8.v v0, (%1)\n\t" 116 "add %1, %1, %0\n\t" 117 "vse8.v v8, (%1)\n\t" 118 "add %1, %1, %0\n\t" 119 "vse8.v v16, (%1)\n\t" 120 "add %1, %1, %0\n\t" 121 "vse8.v v24, (%1)\n\t" 122 ".option pop\n\t" 123 : "=&r" (vl) : "r" (datap) : "memory"); 124 riscv_v_disable(); 125 } 126 127 static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_from, 128 void *datap) 129 { 130 unsigned long vl; 131 132 riscv_v_enable(); 133 asm volatile ( 134 ".option push\n\t" 135 ".option arch, +zve32x\n\t" 136 "vsetvli %0, x0, e8, m8, ta, ma\n\t" 137 "vle8.v v0, (%1)\n\t" 138 "add %1, %1, %0\n\t" 139 "vle8.v v8, (%1)\n\t" 140 "add %1, %1, %0\n\t" 141 "vle8.v v16, (%1)\n\t" 142 "add %1, %1, %0\n\t" 143 "vle8.v v24, (%1)\n\t" 144 ".option pop\n\t" 145 : "=&r" (vl) : "r" (datap) : "memory"); 146 __vstate_csr_restore(restore_from); 147 riscv_v_disable(); 148 } 149 150 static inline void __riscv_v_vstate_discard(void) 151 { 152 unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1); 153 154 riscv_v_enable(); 155 asm volatile ( 156 ".option push\n\t" 157 ".option arch, +zve32x\n\t" 158 "vsetvli %0, x0, e8, m8, ta, ma\n\t" 159 "vmv.v.i v0, -1\n\t" 160 "vmv.v.i v8, -1\n\t" 161 "vmv.v.i v16, -1\n\t" 162 "vmv.v.i v24, -1\n\t" 163 "vsetvl %0, x0, %1\n\t" 164 ".option pop\n\t" 165 : "=&r" (vl) : "r" (vtype_inval) : "memory"); 166 riscv_v_disable(); 167 } 168 169 static inline void riscv_v_vstate_discard(struct pt_regs *regs) 170 { 171 if ((regs->status & SR_VS) == SR_VS_OFF) 172 return; 173 174 __riscv_v_vstate_discard(); 175 __riscv_v_vstate_dirty(regs); 176 } 177 178 static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate, 179 struct pt_regs *regs) 180 { 181 if ((regs->status & SR_VS) == SR_VS_DIRTY) { 182 __riscv_v_vstate_save(vstate, vstate->datap); 183 __riscv_v_vstate_clean(regs); 184 } 185 } 186 187 static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate, 188 struct pt_regs *regs) 189 { 190 if ((regs->status & SR_VS) != SR_VS_OFF) { 191 __riscv_v_vstate_restore(vstate, vstate->datap); 192 __riscv_v_vstate_clean(regs); 193 } 194 } 195 196 static inline void riscv_v_vstate_set_restore(struct task_struct *task, 197 struct pt_regs *regs) 198 { 199 if ((regs->status & SR_VS) != SR_VS_OFF) { 200 set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE); 201 riscv_v_vstate_on(regs); 202 } 203 } 204 205 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE 206 static inline bool riscv_preempt_v_dirty(struct task_struct *task) 207 { 208 return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY); 209 } 210 211 static inline bool riscv_preempt_v_restore(struct task_struct *task) 212 { 213 return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE); 214 } 215 216 static inline void riscv_preempt_v_clear_dirty(struct task_struct *task) 217 { 218 barrier(); 219 task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY; 220 } 221 222 static inline void riscv_preempt_v_set_restore(struct task_struct *task) 223 { 224 barrier(); 225 task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE; 226 } 227 228 static inline bool riscv_preempt_v_started(struct task_struct *task) 229 { 230 return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V); 231 } 232 233 #else /* !CONFIG_RISCV_ISA_V_PREEMPTIVE */ 234 static inline bool riscv_preempt_v_dirty(struct task_struct *task) { return false; } 235 static inline bool riscv_preempt_v_restore(struct task_struct *task) { return false; } 236 static inline bool riscv_preempt_v_started(struct task_struct *task) { return false; } 237 #define riscv_preempt_v_clear_dirty(tsk) do {} while (0) 238 #define riscv_preempt_v_set_restore(tsk) do {} while (0) 239 #endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */ 240 241 static inline void __switch_to_vector(struct task_struct *prev, 242 struct task_struct *next) 243 { 244 struct pt_regs *regs; 245 246 if (riscv_preempt_v_started(prev)) { 247 if (riscv_preempt_v_dirty(prev)) { 248 __riscv_v_vstate_save(&prev->thread.kernel_vstate, 249 prev->thread.kernel_vstate.datap); 250 riscv_preempt_v_clear_dirty(prev); 251 } 252 } else { 253 regs = task_pt_regs(prev); 254 riscv_v_vstate_save(&prev->thread.vstate, regs); 255 } 256 257 if (riscv_preempt_v_started(next)) 258 riscv_preempt_v_set_restore(next); 259 else 260 riscv_v_vstate_set_restore(next, task_pt_regs(next)); 261 } 262 263 void riscv_v_vstate_ctrl_init(struct task_struct *tsk); 264 bool riscv_v_vstate_ctrl_user_allowed(void); 265 266 #else /* ! CONFIG_RISCV_ISA_V */ 267 268 struct pt_regs; 269 270 static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; } 271 static __always_inline bool has_vector(void) { return false; } 272 static __always_inline bool insn_is_vector(u32 insn_buf) { return false; } 273 static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; } 274 static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } 275 static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } 276 #define riscv_v_vsize (0) 277 #define riscv_v_vstate_discard(regs) do {} while (0) 278 #define riscv_v_vstate_save(vstate, regs) do {} while (0) 279 #define riscv_v_vstate_restore(vstate, regs) do {} while (0) 280 #define __switch_to_vector(__prev, __next) do {} while (0) 281 #define riscv_v_vstate_off(regs) do {} while (0) 282 #define riscv_v_vstate_on(regs) do {} while (0) 283 #define riscv_v_thread_free(tsk) do {} while (0) 284 #define riscv_v_setup_ctx_cache() do {} while (0) 285 #define riscv_v_thread_alloc(tsk) do {} while (0) 286 287 #endif /* CONFIG_RISCV_ISA_V */ 288 289 /* 290 * Return the implementation's vlen value. 291 * 292 * riscv_v_vsize contains the value of "32 vector registers with vlenb length" 293 * so rebuild the vlen value in bits from it. 294 */ 295 static inline int riscv_vector_vlen(void) 296 { 297 return riscv_v_vsize / 32 * 8; 298 } 299 300 #endif /* ! __ASM_RISCV_VECTOR_H */ 301