1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Lennox Wu <lennox.wu@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/cpu.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/tick.h>
17 #include <linux/ptrace.h>
18 #include <linux/uaccess.h>
19 #include <linux/personality.h>
20 #include <linux/entry-common.h>
21
22 #include <asm/asm-prototypes.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
25 #include <asm/csr.h>
26 #include <asm/stacktrace.h>
27 #include <asm/string.h>
28 #include <asm/switch_to.h>
29 #include <asm/thread_info.h>
30 #include <asm/cpuidle.h>
31 #include <asm/vector.h>
32 #include <asm/cpufeature.h>
33 #include <asm/exec.h>
34
35 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
36 #include <linux/stackprotector.h>
37 unsigned long __stack_chk_guard __read_mostly;
38 EXPORT_SYMBOL(__stack_chk_guard);
39 #endif
40
41 extern asmlinkage void ret_from_fork_kernel_asm(void);
42 extern asmlinkage void ret_from_fork_user_asm(void);
43
arch_cpu_idle(void)44 void noinstr arch_cpu_idle(void)
45 {
46 cpu_do_idle();
47 }
48
set_unalign_ctl(struct task_struct * tsk,unsigned int val)49 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
50 {
51 if (!unaligned_ctl_available())
52 return -EINVAL;
53
54 tsk->thread.align_ctl = val;
55 return 0;
56 }
57
get_unalign_ctl(struct task_struct * tsk,unsigned long adr)58 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
59 {
60 if (!unaligned_ctl_available())
61 return -EINVAL;
62
63 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
64 }
65
__show_regs(struct pt_regs * regs)66 void __show_regs(struct pt_regs *regs)
67 {
68 show_regs_print_info(KERN_DEFAULT);
69
70 if (!user_mode(regs)) {
71 pr_cont("epc : %pS\n", (void *)regs->epc);
72 pr_cont(" ra : %pS\n", (void *)regs->ra);
73 }
74
75 pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
76 regs->epc, regs->ra, regs->sp);
77 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
78 regs->gp, regs->tp, regs->t0);
79 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
80 regs->t1, regs->t2, regs->s0);
81 pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
82 regs->s1, regs->a0, regs->a1);
83 pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
84 regs->a2, regs->a3, regs->a4);
85 pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
86 regs->a5, regs->a6, regs->a7);
87 pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
88 regs->s2, regs->s3, regs->s4);
89 pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
90 regs->s5, regs->s6, regs->s7);
91 pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
92 regs->s8, regs->s9, regs->s10);
93 pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
94 regs->s11, regs->t3, regs->t4);
95 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
96 regs->t5, regs->t6);
97
98 pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
99 regs->status, regs->badaddr, regs->cause);
100 }
show_regs(struct pt_regs * regs)101 void show_regs(struct pt_regs *regs)
102 {
103 __show_regs(regs);
104 if (!user_mode(regs))
105 dump_backtrace(regs, NULL, KERN_DEFAULT);
106 }
107
arch_align_stack(unsigned long sp)108 unsigned long arch_align_stack(unsigned long sp)
109 {
110 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
111 sp -= get_random_u32_below(PAGE_SIZE);
112 return sp & ~0xf;
113 }
114
115 #ifdef CONFIG_COMPAT
116 static bool compat_mode_supported __read_mostly;
117
compat_elf_check_arch(Elf32_Ehdr * hdr)118 bool compat_elf_check_arch(Elf32_Ehdr *hdr)
119 {
120 return compat_mode_supported &&
121 hdr->e_machine == EM_RISCV &&
122 hdr->e_ident[EI_CLASS] == ELFCLASS32;
123 }
124
compat_mode_detect(void)125 static int __init compat_mode_detect(void)
126 {
127 unsigned long tmp = csr_read(CSR_STATUS);
128
129 csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
130 compat_mode_supported =
131 (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
132
133 csr_write(CSR_STATUS, tmp);
134
135 pr_info("riscv: ELF compat mode %s",
136 compat_mode_supported ? "supported" : "unsupported");
137
138 return 0;
139 }
140 early_initcall(compat_mode_detect);
141 #endif
142
start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)143 void start_thread(struct pt_regs *regs, unsigned long pc,
144 unsigned long sp)
145 {
146 regs->status = SR_PIE;
147 if (has_fpu()) {
148 regs->status |= SR_FS_INITIAL;
149 /*
150 * Restore the initial value to the FP register
151 * before starting the user program.
152 */
153 fstate_restore(current, regs);
154 }
155 regs->epc = pc;
156 regs->sp = sp;
157
158 #ifdef CONFIG_64BIT
159 regs->status &= ~SR_UXL;
160
161 if (is_compat_task())
162 regs->status |= SR_UXL_32;
163 else
164 regs->status |= SR_UXL_64;
165 #endif
166 }
167
flush_thread(void)168 void flush_thread(void)
169 {
170 #ifdef CONFIG_FPU
171 /*
172 * Reset FPU state and context
173 * frm: round to nearest, ties to even (IEEE default)
174 * fflags: accrued exceptions cleared
175 */
176 fstate_off(current, task_pt_regs(current));
177 memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
178 #endif
179 #ifdef CONFIG_RISCV_ISA_V
180 /* Reset vector state */
181 riscv_v_vstate_ctrl_init(current);
182 riscv_v_vstate_off(task_pt_regs(current));
183 kfree(current->thread.vstate.datap);
184 memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
185 clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
186 #endif
187 #ifdef CONFIG_RISCV_ISA_SUPM
188 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
189 envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0);
190 #endif
191 }
192
arch_release_task_struct(struct task_struct * tsk)193 void arch_release_task_struct(struct task_struct *tsk)
194 {
195 /* Free the vector context of datap. */
196 if (has_vector() || has_xtheadvector())
197 riscv_v_thread_free(tsk);
198 }
199
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)200 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
201 {
202 fstate_save(src, task_pt_regs(src));
203 *dst = *src;
204 /* clear entire V context, including datap for a new task */
205 memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
206 memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
207 clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
208
209 return 0;
210 }
211
ret_from_fork_kernel(void * fn_arg,int (* fn)(void *),struct pt_regs * regs)212 asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs)
213 {
214 fn(fn_arg);
215
216 syscall_exit_to_user_mode(regs);
217 }
218
ret_from_fork_user(struct pt_regs * regs)219 asmlinkage void ret_from_fork_user(struct pt_regs *regs)
220 {
221 syscall_exit_to_user_mode(regs);
222 }
223
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)224 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
225 {
226 unsigned long clone_flags = args->flags;
227 unsigned long usp = args->stack;
228 unsigned long tls = args->tls;
229 struct pt_regs *childregs = task_pt_regs(p);
230
231 /* Ensure all threads in this mm have the same pointer masking mode. */
232 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM))
233 set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags);
234
235 memset(&p->thread.s, 0, sizeof(p->thread.s));
236
237 /* p->thread holds context to be restored by __switch_to() */
238 if (unlikely(args->fn)) {
239 /* Kernel thread */
240 memset(childregs, 0, sizeof(struct pt_regs));
241 /* Supervisor/Machine, irqs on: */
242 childregs->status = SR_PP | SR_PIE;
243
244 p->thread.s[0] = (unsigned long)args->fn;
245 p->thread.s[1] = (unsigned long)args->fn_arg;
246 p->thread.ra = (unsigned long)ret_from_fork_kernel_asm;
247 } else {
248 *childregs = *(current_pt_regs());
249 /* Turn off status.VS */
250 riscv_v_vstate_off(childregs);
251 if (usp) /* User fork */
252 childregs->sp = usp;
253 if (clone_flags & CLONE_SETTLS)
254 childregs->tp = tls;
255 childregs->a0 = 0; /* Return value of fork() */
256 p->thread.ra = (unsigned long)ret_from_fork_user_asm;
257 }
258 p->thread.riscv_v_flags = 0;
259 if (has_vector() || has_xtheadvector())
260 riscv_v_thread_alloc(p);
261 p->thread.sp = (unsigned long)childregs; /* kernel sp */
262 return 0;
263 }
264
arch_task_cache_init(void)265 void __init arch_task_cache_init(void)
266 {
267 riscv_v_setup_ctx_cache();
268 }
269
270 #ifdef CONFIG_RISCV_ISA_SUPM
271 enum {
272 PMLEN_0 = 0,
273 PMLEN_7 = 7,
274 PMLEN_16 = 16,
275 };
276
277 static bool have_user_pmlen_7;
278 static bool have_user_pmlen_16;
279
280 /*
281 * Control the relaxed ABI allowing tagged user addresses into the kernel.
282 */
283 static unsigned int tagged_addr_disabled;
284
set_tagged_addr_ctrl(struct task_struct * task,unsigned long arg)285 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
286 {
287 unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
288 struct thread_info *ti = task_thread_info(task);
289 struct mm_struct *mm = task->mm;
290 unsigned long pmm;
291 u8 pmlen;
292
293 if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
294 return -EINVAL;
295
296 if (is_compat_thread(ti))
297 return -EINVAL;
298
299 if (arg & ~valid_mask)
300 return -EINVAL;
301
302 /*
303 * Prefer the smallest PMLEN that satisfies the user's request,
304 * in case choosing a larger PMLEN has a performance impact.
305 */
306 pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
307 if (pmlen == PMLEN_0) {
308 pmm = ENVCFG_PMM_PMLEN_0;
309 } else if (pmlen <= PMLEN_7 && have_user_pmlen_7) {
310 pmlen = PMLEN_7;
311 pmm = ENVCFG_PMM_PMLEN_7;
312 } else if (pmlen <= PMLEN_16 && have_user_pmlen_16) {
313 pmlen = PMLEN_16;
314 pmm = ENVCFG_PMM_PMLEN_16;
315 } else {
316 return -EINVAL;
317 }
318
319 /*
320 * Do not allow the enabling of the tagged address ABI if globally
321 * disabled via sysctl abi.tagged_addr_disabled, if pointer masking
322 * is disabled for userspace.
323 */
324 if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
325 return -EINVAL;
326
327 if (!(arg & PR_TAGGED_ADDR_ENABLE))
328 pmlen = PMLEN_0;
329
330 if (mmap_write_lock_killable(mm))
331 return -EINTR;
332
333 if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) {
334 mmap_write_unlock(mm);
335 return -EBUSY;
336 }
337
338 envcfg_update_bits(task, ENVCFG_PMM, pmm);
339 mm->context.pmlen = pmlen;
340
341 mmap_write_unlock(mm);
342
343 return 0;
344 }
345
get_tagged_addr_ctrl(struct task_struct * task)346 long get_tagged_addr_ctrl(struct task_struct *task)
347 {
348 struct thread_info *ti = task_thread_info(task);
349 long ret = 0;
350
351 if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
352 return -EINVAL;
353
354 if (is_compat_thread(ti))
355 return -EINVAL;
356
357 /*
358 * The mm context's pmlen is set only when the tagged address ABI is
359 * enabled, so the effective PMLEN must be extracted from envcfg.PMM.
360 */
361 switch (task->thread.envcfg & ENVCFG_PMM) {
362 case ENVCFG_PMM_PMLEN_7:
363 ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
364 break;
365 case ENVCFG_PMM_PMLEN_16:
366 ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16);
367 break;
368 }
369
370 if (task->mm->context.pmlen)
371 ret |= PR_TAGGED_ADDR_ENABLE;
372
373 return ret;
374 }
375
try_to_set_pmm(unsigned long value)376 static bool try_to_set_pmm(unsigned long value)
377 {
378 csr_set(CSR_ENVCFG, value);
379 return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
380 }
381
382 /*
383 * Global sysctl to disable the tagged user addresses support. This control
384 * only prevents the tagged address ABI enabling via prctl() and does not
385 * disable it for tasks that already opted in to the relaxed ABI.
386 */
387
388 static const struct ctl_table tagged_addr_sysctl_table[] = {
389 {
390 .procname = "tagged_addr_disabled",
391 .mode = 0644,
392 .data = &tagged_addr_disabled,
393 .maxlen = sizeof(int),
394 .proc_handler = proc_dointvec_minmax,
395 .extra1 = SYSCTL_ZERO,
396 .extra2 = SYSCTL_ONE,
397 },
398 };
399
tagged_addr_init(void)400 static int __init tagged_addr_init(void)
401 {
402 if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
403 return 0;
404
405 /*
406 * envcfg.PMM is a WARL field. Detect which values are supported.
407 * Assume the supported PMLEN values are the same on all harts.
408 */
409 csr_clear(CSR_ENVCFG, ENVCFG_PMM);
410 have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
411 have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
412
413 if (!register_sysctl("abi", tagged_addr_sysctl_table))
414 return -EINVAL;
415
416 return 0;
417 }
418 core_initcall(tagged_addr_init);
419 #endif /* CONFIG_RISCV_ISA_SUPM */
420