xref: /linux/arch/riscv/kernel/process.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Chen Liqin <liqin.chen@sunplusct.com>
5  *  Lennox Wu <lennox.wu@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  * Copyright (C) 2017 SiFive
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/cpu.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/tick.h>
17 #include <linux/ptrace.h>
18 #include <linux/uaccess.h>
19 #include <linux/personality.h>
20 
21 #include <asm/unistd.h>
22 #include <asm/processor.h>
23 #include <asm/csr.h>
24 #include <asm/stacktrace.h>
25 #include <asm/string.h>
26 #include <asm/switch_to.h>
27 #include <asm/thread_info.h>
28 #include <asm/cpuidle.h>
29 #include <asm/vector.h>
30 #include <asm/cpufeature.h>
31 #include <asm/exec.h>
32 
33 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
34 #include <linux/stackprotector.h>
35 unsigned long __stack_chk_guard __read_mostly;
36 EXPORT_SYMBOL(__stack_chk_guard);
37 #endif
38 
39 extern asmlinkage void ret_from_fork(void);
40 
41 void noinstr arch_cpu_idle(void)
42 {
43 	cpu_do_idle();
44 }
45 
46 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
47 {
48 	if (!unaligned_ctl_available())
49 		return -EINVAL;
50 
51 	tsk->thread.align_ctl = val;
52 	return 0;
53 }
54 
55 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
56 {
57 	if (!unaligned_ctl_available())
58 		return -EINVAL;
59 
60 	return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
61 }
62 
63 void __show_regs(struct pt_regs *regs)
64 {
65 	show_regs_print_info(KERN_DEFAULT);
66 
67 	if (!user_mode(regs)) {
68 		pr_cont("epc : %pS\n", (void *)regs->epc);
69 		pr_cont(" ra : %pS\n", (void *)regs->ra);
70 	}
71 
72 	pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
73 		regs->epc, regs->ra, regs->sp);
74 	pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
75 		regs->gp, regs->tp, regs->t0);
76 	pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
77 		regs->t1, regs->t2, regs->s0);
78 	pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
79 		regs->s1, regs->a0, regs->a1);
80 	pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
81 		regs->a2, regs->a3, regs->a4);
82 	pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
83 		regs->a5, regs->a6, regs->a7);
84 	pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
85 		regs->s2, regs->s3, regs->s4);
86 	pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
87 		regs->s5, regs->s6, regs->s7);
88 	pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
89 		regs->s8, regs->s9, regs->s10);
90 	pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
91 		regs->s11, regs->t3, regs->t4);
92 	pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
93 		regs->t5, regs->t6);
94 
95 	pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
96 		regs->status, regs->badaddr, regs->cause);
97 }
98 void show_regs(struct pt_regs *regs)
99 {
100 	__show_regs(regs);
101 	if (!user_mode(regs))
102 		dump_backtrace(regs, NULL, KERN_DEFAULT);
103 }
104 
105 unsigned long arch_align_stack(unsigned long sp)
106 {
107 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
108 		sp -= get_random_u32_below(PAGE_SIZE);
109 	return sp & ~0xf;
110 }
111 
112 #ifdef CONFIG_COMPAT
113 static bool compat_mode_supported __read_mostly;
114 
115 bool compat_elf_check_arch(Elf32_Ehdr *hdr)
116 {
117 	return compat_mode_supported &&
118 	       hdr->e_machine == EM_RISCV &&
119 	       hdr->e_ident[EI_CLASS] == ELFCLASS32;
120 }
121 
122 static int __init compat_mode_detect(void)
123 {
124 	unsigned long tmp = csr_read(CSR_STATUS);
125 
126 	csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
127 	compat_mode_supported =
128 			(csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
129 
130 	csr_write(CSR_STATUS, tmp);
131 
132 	pr_info("riscv: ELF compat mode %s",
133 			compat_mode_supported ? "supported" : "unsupported");
134 
135 	return 0;
136 }
137 early_initcall(compat_mode_detect);
138 #endif
139 
140 void start_thread(struct pt_regs *regs, unsigned long pc,
141 	unsigned long sp)
142 {
143 	regs->status = SR_PIE;
144 	if (has_fpu()) {
145 		regs->status |= SR_FS_INITIAL;
146 		/*
147 		 * Restore the initial value to the FP register
148 		 * before starting the user program.
149 		 */
150 		fstate_restore(current, regs);
151 	}
152 	regs->epc = pc;
153 	regs->sp = sp;
154 
155 #ifdef CONFIG_64BIT
156 	regs->status &= ~SR_UXL;
157 
158 	if (is_compat_task())
159 		regs->status |= SR_UXL_32;
160 	else
161 		regs->status |= SR_UXL_64;
162 #endif
163 }
164 
165 void flush_thread(void)
166 {
167 #ifdef CONFIG_FPU
168 	/*
169 	 * Reset FPU state and context
170 	 *	frm: round to nearest, ties to even (IEEE default)
171 	 *	fflags: accrued exceptions cleared
172 	 */
173 	fstate_off(current, task_pt_regs(current));
174 	memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
175 #endif
176 #ifdef CONFIG_RISCV_ISA_V
177 	/* Reset vector state */
178 	riscv_v_vstate_ctrl_init(current);
179 	riscv_v_vstate_off(task_pt_regs(current));
180 	kfree(current->thread.vstate.datap);
181 	memset(&current->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
182 	clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
183 #endif
184 #ifdef CONFIG_RISCV_ISA_SUPM
185 	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
186 		envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0);
187 #endif
188 }
189 
190 void arch_release_task_struct(struct task_struct *tsk)
191 {
192 	/* Free the vector context of datap. */
193 	if (has_vector())
194 		riscv_v_thread_free(tsk);
195 }
196 
197 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
198 {
199 	fstate_save(src, task_pt_regs(src));
200 	*dst = *src;
201 	/* clear entire V context, including datap for a new task */
202 	memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
203 	memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
204 	clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
205 
206 	return 0;
207 }
208 
209 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
210 {
211 	unsigned long clone_flags = args->flags;
212 	unsigned long usp = args->stack;
213 	unsigned long tls = args->tls;
214 	struct pt_regs *childregs = task_pt_regs(p);
215 
216 	/* Ensure all threads in this mm have the same pointer masking mode. */
217 	if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM))
218 		set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags);
219 
220 	memset(&p->thread.s, 0, sizeof(p->thread.s));
221 
222 	/* p->thread holds context to be restored by __switch_to() */
223 	if (unlikely(args->fn)) {
224 		/* Kernel thread */
225 		memset(childregs, 0, sizeof(struct pt_regs));
226 		/* Supervisor/Machine, irqs on: */
227 		childregs->status = SR_PP | SR_PIE;
228 
229 		p->thread.s[0] = (unsigned long)args->fn;
230 		p->thread.s[1] = (unsigned long)args->fn_arg;
231 	} else {
232 		*childregs = *(current_pt_regs());
233 		/* Turn off status.VS */
234 		riscv_v_vstate_off(childregs);
235 		if (usp) /* User fork */
236 			childregs->sp = usp;
237 		if (clone_flags & CLONE_SETTLS)
238 			childregs->tp = tls;
239 		childregs->a0 = 0; /* Return value of fork() */
240 		p->thread.s[0] = 0;
241 	}
242 	p->thread.riscv_v_flags = 0;
243 	if (has_vector())
244 		riscv_v_thread_alloc(p);
245 	p->thread.ra = (unsigned long)ret_from_fork;
246 	p->thread.sp = (unsigned long)childregs; /* kernel sp */
247 	return 0;
248 }
249 
250 void __init arch_task_cache_init(void)
251 {
252 	riscv_v_setup_ctx_cache();
253 }
254 
255 #ifdef CONFIG_RISCV_ISA_SUPM
256 enum {
257 	PMLEN_0 = 0,
258 	PMLEN_7 = 7,
259 	PMLEN_16 = 16,
260 };
261 
262 static bool have_user_pmlen_7;
263 static bool have_user_pmlen_16;
264 
265 /*
266  * Control the relaxed ABI allowing tagged user addresses into the kernel.
267  */
268 static unsigned int tagged_addr_disabled;
269 
270 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
271 {
272 	unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
273 	struct thread_info *ti = task_thread_info(task);
274 	struct mm_struct *mm = task->mm;
275 	unsigned long pmm;
276 	u8 pmlen;
277 
278 	if (is_compat_thread(ti))
279 		return -EINVAL;
280 
281 	if (arg & ~valid_mask)
282 		return -EINVAL;
283 
284 	/*
285 	 * Prefer the smallest PMLEN that satisfies the user's request,
286 	 * in case choosing a larger PMLEN has a performance impact.
287 	 */
288 	pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
289 	if (pmlen == PMLEN_0) {
290 		pmm = ENVCFG_PMM_PMLEN_0;
291 	} else if (pmlen <= PMLEN_7 && have_user_pmlen_7) {
292 		pmlen = PMLEN_7;
293 		pmm = ENVCFG_PMM_PMLEN_7;
294 	} else if (pmlen <= PMLEN_16 && have_user_pmlen_16) {
295 		pmlen = PMLEN_16;
296 		pmm = ENVCFG_PMM_PMLEN_16;
297 	} else {
298 		return -EINVAL;
299 	}
300 
301 	/*
302 	 * Do not allow the enabling of the tagged address ABI if globally
303 	 * disabled via sysctl abi.tagged_addr_disabled, if pointer masking
304 	 * is disabled for userspace.
305 	 */
306 	if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
307 		return -EINVAL;
308 
309 	if (!(arg & PR_TAGGED_ADDR_ENABLE))
310 		pmlen = PMLEN_0;
311 
312 	if (mmap_write_lock_killable(mm))
313 		return -EINTR;
314 
315 	if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) {
316 		mmap_write_unlock(mm);
317 		return -EBUSY;
318 	}
319 
320 	envcfg_update_bits(task, ENVCFG_PMM, pmm);
321 	mm->context.pmlen = pmlen;
322 
323 	mmap_write_unlock(mm);
324 
325 	return 0;
326 }
327 
328 long get_tagged_addr_ctrl(struct task_struct *task)
329 {
330 	struct thread_info *ti = task_thread_info(task);
331 	long ret = 0;
332 
333 	if (is_compat_thread(ti))
334 		return -EINVAL;
335 
336 	/*
337 	 * The mm context's pmlen is set only when the tagged address ABI is
338 	 * enabled, so the effective PMLEN must be extracted from envcfg.PMM.
339 	 */
340 	switch (task->thread.envcfg & ENVCFG_PMM) {
341 	case ENVCFG_PMM_PMLEN_7:
342 		ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
343 		break;
344 	case ENVCFG_PMM_PMLEN_16:
345 		ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16);
346 		break;
347 	}
348 
349 	if (task->mm->context.pmlen)
350 		ret |= PR_TAGGED_ADDR_ENABLE;
351 
352 	return ret;
353 }
354 
355 static bool try_to_set_pmm(unsigned long value)
356 {
357 	csr_set(CSR_ENVCFG, value);
358 	return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
359 }
360 
361 /*
362  * Global sysctl to disable the tagged user addresses support. This control
363  * only prevents the tagged address ABI enabling via prctl() and does not
364  * disable it for tasks that already opted in to the relaxed ABI.
365  */
366 
367 static struct ctl_table tagged_addr_sysctl_table[] = {
368 	{
369 		.procname	= "tagged_addr_disabled",
370 		.mode		= 0644,
371 		.data		= &tagged_addr_disabled,
372 		.maxlen		= sizeof(int),
373 		.proc_handler	= proc_dointvec_minmax,
374 		.extra1		= SYSCTL_ZERO,
375 		.extra2		= SYSCTL_ONE,
376 	},
377 };
378 
379 static int __init tagged_addr_init(void)
380 {
381 	if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
382 		return 0;
383 
384 	/*
385 	 * envcfg.PMM is a WARL field. Detect which values are supported.
386 	 * Assume the supported PMLEN values are the same on all harts.
387 	 */
388 	csr_clear(CSR_ENVCFG, ENVCFG_PMM);
389 	have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
390 	have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
391 
392 	if (!register_sysctl("abi", tagged_addr_sysctl_table))
393 		return -EINVAL;
394 
395 	return 0;
396 }
397 core_initcall(tagged_addr_init);
398 #endif	/* CONFIG_RISCV_ISA_SUPM */
399