xref: /linux/arch/riscv/kernel/usercfi.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1c70772afSDeepak Gupta // SPDX-License-Identifier: GPL-2.0
2c70772afSDeepak Gupta /*
3c70772afSDeepak Gupta  * Copyright (C) 2024 Rivos, Inc.
4c70772afSDeepak Gupta  * Deepak Gupta <debug@rivosinc.com>
5c70772afSDeepak Gupta  */
6c70772afSDeepak Gupta 
7c70772afSDeepak Gupta #include <linux/sched.h>
8c70772afSDeepak Gupta #include <linux/bitops.h>
9c70772afSDeepak Gupta #include <linux/types.h>
10c70772afSDeepak Gupta #include <linux/mm.h>
11c70772afSDeepak Gupta #include <linux/mman.h>
12c70772afSDeepak Gupta #include <linux/uaccess.h>
13c70772afSDeepak Gupta #include <linux/sizes.h>
14c70772afSDeepak Gupta #include <linux/user.h>
15c70772afSDeepak Gupta #include <linux/syscalls.h>
16c70772afSDeepak Gupta #include <linux/prctl.h>
17c70772afSDeepak Gupta #include <asm/csr.h>
18c70772afSDeepak Gupta #include <asm/usercfi.h>
19c70772afSDeepak Gupta 
20*c9b859c4SDeepak Gupta unsigned long riscv_nousercfi __read_mostly;
21*c9b859c4SDeepak Gupta 
22c70772afSDeepak Gupta #define SHSTK_ENTRY_SIZE sizeof(void *)
23c70772afSDeepak Gupta 
24fd44a4a8SDeepak Gupta bool is_shstk_enabled(struct task_struct *task)
25fd44a4a8SDeepak Gupta {
26fd44a4a8SDeepak Gupta 	return task->thread_info.user_cfi_state.ubcfi_en;
27fd44a4a8SDeepak Gupta }
28fd44a4a8SDeepak Gupta 
2961a02002SDeepak Gupta bool is_shstk_allocated(struct task_struct *task)
3061a02002SDeepak Gupta {
3161a02002SDeepak Gupta 	return task->thread_info.user_cfi_state.shdw_stk_base;
3261a02002SDeepak Gupta }
3361a02002SDeepak Gupta 
3461a02002SDeepak Gupta bool is_shstk_locked(struct task_struct *task)
3561a02002SDeepak Gupta {
3661a02002SDeepak Gupta 	return task->thread_info.user_cfi_state.ubcfi_locked;
3761a02002SDeepak Gupta }
3861a02002SDeepak Gupta 
39fd44a4a8SDeepak Gupta void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size)
40fd44a4a8SDeepak Gupta {
41fd44a4a8SDeepak Gupta 	task->thread_info.user_cfi_state.shdw_stk_base = shstk_addr;
42fd44a4a8SDeepak Gupta 	task->thread_info.user_cfi_state.shdw_stk_size = size;
43fd44a4a8SDeepak Gupta }
44fd44a4a8SDeepak Gupta 
45fd44a4a8SDeepak Gupta unsigned long get_shstk_base(struct task_struct *task, unsigned long *size)
46fd44a4a8SDeepak Gupta {
47fd44a4a8SDeepak Gupta 	if (size)
48fd44a4a8SDeepak Gupta 		*size = task->thread_info.user_cfi_state.shdw_stk_size;
49fd44a4a8SDeepak Gupta 	return task->thread_info.user_cfi_state.shdw_stk_base;
50fd44a4a8SDeepak Gupta }
51fd44a4a8SDeepak Gupta 
52fd44a4a8SDeepak Gupta void set_active_shstk(struct task_struct *task, unsigned long shstk_addr)
53fd44a4a8SDeepak Gupta {
54fd44a4a8SDeepak Gupta 	task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
55fd44a4a8SDeepak Gupta }
56fd44a4a8SDeepak Gupta 
5766c9c713SDeepak Gupta unsigned long get_active_shstk(struct task_struct *task)
5866c9c713SDeepak Gupta {
5966c9c713SDeepak Gupta 	return task->thread_info.user_cfi_state.user_shdw_stk;
6066c9c713SDeepak Gupta }
6166c9c713SDeepak Gupta 
6261a02002SDeepak Gupta void set_shstk_status(struct task_struct *task, bool enable)
6361a02002SDeepak Gupta {
64*c9b859c4SDeepak Gupta 	if (!is_user_shstk_enabled())
6561a02002SDeepak Gupta 		return;
6661a02002SDeepak Gupta 
6761a02002SDeepak Gupta 	task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0;
6861a02002SDeepak Gupta 
6961a02002SDeepak Gupta 	if (enable)
7061a02002SDeepak Gupta 		task->thread.envcfg |= ENVCFG_SSE;
7161a02002SDeepak Gupta 	else
7261a02002SDeepak Gupta 		task->thread.envcfg &= ~ENVCFG_SSE;
7361a02002SDeepak Gupta 
7461a02002SDeepak Gupta 	csr_write(CSR_ENVCFG, task->thread.envcfg);
7561a02002SDeepak Gupta }
7661a02002SDeepak Gupta 
7761a02002SDeepak Gupta void set_shstk_lock(struct task_struct *task)
7861a02002SDeepak Gupta {
7961a02002SDeepak Gupta 	task->thread_info.user_cfi_state.ubcfi_locked = 1;
8061a02002SDeepak Gupta }
8161a02002SDeepak Gupta 
828a9e22d2SDeepak Gupta bool is_indir_lp_enabled(struct task_struct *task)
838a9e22d2SDeepak Gupta {
848a9e22d2SDeepak Gupta 	return task->thread_info.user_cfi_state.ufcfi_en;
858a9e22d2SDeepak Gupta }
868a9e22d2SDeepak Gupta 
878a9e22d2SDeepak Gupta bool is_indir_lp_locked(struct task_struct *task)
888a9e22d2SDeepak Gupta {
898a9e22d2SDeepak Gupta 	return task->thread_info.user_cfi_state.ufcfi_locked;
908a9e22d2SDeepak Gupta }
918a9e22d2SDeepak Gupta 
928a9e22d2SDeepak Gupta void set_indir_lp_status(struct task_struct *task, bool enable)
938a9e22d2SDeepak Gupta {
94*c9b859c4SDeepak Gupta 	if (!is_user_lpad_enabled())
958a9e22d2SDeepak Gupta 		return;
968a9e22d2SDeepak Gupta 
978a9e22d2SDeepak Gupta 	task->thread_info.user_cfi_state.ufcfi_en = enable ? 1 : 0;
988a9e22d2SDeepak Gupta 
998a9e22d2SDeepak Gupta 	if (enable)
1008a9e22d2SDeepak Gupta 		task->thread.envcfg |= ENVCFG_LPE;
1018a9e22d2SDeepak Gupta 	else
1028a9e22d2SDeepak Gupta 		task->thread.envcfg &= ~ENVCFG_LPE;
1038a9e22d2SDeepak Gupta 
1048a9e22d2SDeepak Gupta 	csr_write(CSR_ENVCFG, task->thread.envcfg);
1058a9e22d2SDeepak Gupta }
1068a9e22d2SDeepak Gupta 
1078a9e22d2SDeepak Gupta void set_indir_lp_lock(struct task_struct *task)
1088a9e22d2SDeepak Gupta {
1098a9e22d2SDeepak Gupta 	task->thread_info.user_cfi_state.ufcfi_locked = 1;
1108a9e22d2SDeepak Gupta }
111fd44a4a8SDeepak Gupta /*
112fd44a4a8SDeepak Gupta  * If size is 0, then to be compatible with regular stack we want it to be as big as
113fd44a4a8SDeepak Gupta  * regular stack. Else PAGE_ALIGN it and return back
114fd44a4a8SDeepak Gupta  */
115fd44a4a8SDeepak Gupta static unsigned long calc_shstk_size(unsigned long size)
116fd44a4a8SDeepak Gupta {
117fd44a4a8SDeepak Gupta 	if (size)
118fd44a4a8SDeepak Gupta 		return PAGE_ALIGN(size);
119fd44a4a8SDeepak Gupta 
120fd44a4a8SDeepak Gupta 	return PAGE_ALIGN(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G));
121fd44a4a8SDeepak Gupta }
122fd44a4a8SDeepak Gupta 
123c70772afSDeepak Gupta /*
124c70772afSDeepak Gupta  * Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen
125c70772afSDeepak Gupta  * implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to
126c70772afSDeepak Gupta  * shadow stack. To keep it simple, we plan to use `ssamoswap` to perform writes on shadow
127c70772afSDeepak Gupta  * stack.
128c70772afSDeepak Gupta  */
129c70772afSDeepak Gupta static noinline unsigned long amo_user_shstk(unsigned long __user *addr, unsigned long val)
130c70772afSDeepak Gupta {
131c70772afSDeepak Gupta 	/*
132c70772afSDeepak Gupta 	 * Never expect -1 on shadow stack. Expect return addresses and zero
133c70772afSDeepak Gupta 	 */
134c70772afSDeepak Gupta 	unsigned long swap = -1;
135c70772afSDeepak Gupta 
136c70772afSDeepak Gupta 	__enable_user_access();
137c70772afSDeepak Gupta 	asm goto(".option push\n"
138c70772afSDeepak Gupta 		".option arch, +zicfiss\n"
139c70772afSDeepak Gupta 		"1: ssamoswap.d %[swap], %[val], %[addr]\n"
140c70772afSDeepak Gupta 		_ASM_EXTABLE(1b, %l[fault])
141c70772afSDeepak Gupta 		".option pop\n"
142c70772afSDeepak Gupta 		 : [swap] "=r" (swap), [addr] "+A" (*(__force unsigned long *)addr)
143c70772afSDeepak Gupta 		: [val] "r" (val)
144c70772afSDeepak Gupta 		: "memory"
145c70772afSDeepak Gupta 		: fault
146c70772afSDeepak Gupta 		);
147c70772afSDeepak Gupta 	__disable_user_access();
148c70772afSDeepak Gupta 	return swap;
149c70772afSDeepak Gupta fault:
150c70772afSDeepak Gupta 	__disable_user_access();
151c70772afSDeepak Gupta 	return -1;
152c70772afSDeepak Gupta }
153c70772afSDeepak Gupta 
154c70772afSDeepak Gupta /*
155c70772afSDeepak Gupta  * Create a restore token on the shadow stack.  A token is always XLEN wide
156c70772afSDeepak Gupta  * and aligned to XLEN.
157c70772afSDeepak Gupta  */
158c70772afSDeepak Gupta static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
159c70772afSDeepak Gupta {
160c70772afSDeepak Gupta 	unsigned long addr;
161c70772afSDeepak Gupta 
162c70772afSDeepak Gupta 	/* Token must be aligned */
163c70772afSDeepak Gupta 	if (!IS_ALIGNED(ssp, SHSTK_ENTRY_SIZE))
164c70772afSDeepak Gupta 		return -EINVAL;
165c70772afSDeepak Gupta 
166c70772afSDeepak Gupta 	/* On RISC-V we're constructing token to be function of address itself */
167c70772afSDeepak Gupta 	addr = ssp - SHSTK_ENTRY_SIZE;
168c70772afSDeepak Gupta 
169c70772afSDeepak Gupta 	if (amo_user_shstk((unsigned long __user *)addr, (unsigned long)ssp) == -1)
170c70772afSDeepak Gupta 		return -EFAULT;
171c70772afSDeepak Gupta 
172c70772afSDeepak Gupta 	if (token_addr)
173c70772afSDeepak Gupta 		*token_addr = addr;
174c70772afSDeepak Gupta 
175c70772afSDeepak Gupta 	return 0;
176c70772afSDeepak Gupta }
177c70772afSDeepak Gupta 
17866c9c713SDeepak Gupta /*
17966c9c713SDeepak Gupta  * Save user shadow stack pointer on the shadow stack itself and return a pointer to saved location.
18066c9c713SDeepak Gupta  * Returns -EFAULT if unsuccessful.
18166c9c713SDeepak Gupta  */
18266c9c713SDeepak Gupta int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr)
18366c9c713SDeepak Gupta {
18466c9c713SDeepak Gupta 	unsigned long ss_ptr = 0;
18566c9c713SDeepak Gupta 	unsigned long token_loc = 0;
18666c9c713SDeepak Gupta 	int ret = 0;
18766c9c713SDeepak Gupta 
18866c9c713SDeepak Gupta 	if (!saved_shstk_ptr)
18966c9c713SDeepak Gupta 		return -EINVAL;
19066c9c713SDeepak Gupta 
19166c9c713SDeepak Gupta 	ss_ptr = get_active_shstk(tsk);
19266c9c713SDeepak Gupta 	ret = create_rstor_token(ss_ptr, &token_loc);
19366c9c713SDeepak Gupta 
19466c9c713SDeepak Gupta 	if (!ret) {
19566c9c713SDeepak Gupta 		*saved_shstk_ptr = token_loc;
19666c9c713SDeepak Gupta 		set_active_shstk(tsk, token_loc);
19766c9c713SDeepak Gupta 	}
19866c9c713SDeepak Gupta 
19966c9c713SDeepak Gupta 	return ret;
20066c9c713SDeepak Gupta }
20166c9c713SDeepak Gupta 
20266c9c713SDeepak Gupta /*
20366c9c713SDeepak Gupta  * Restores the user shadow stack pointer from the token on the shadow stack for task 'tsk'.
20466c9c713SDeepak Gupta  * Returns -EFAULT if unsuccessful.
20566c9c713SDeepak Gupta  */
20666c9c713SDeepak Gupta int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr)
20766c9c713SDeepak Gupta {
20866c9c713SDeepak Gupta 	unsigned long token = 0;
20966c9c713SDeepak Gupta 
21066c9c713SDeepak Gupta 	token = amo_user_shstk((unsigned long __user *)shstk_ptr, 0);
21166c9c713SDeepak Gupta 
21266c9c713SDeepak Gupta 	if (token == -1)
21366c9c713SDeepak Gupta 		return -EFAULT;
21466c9c713SDeepak Gupta 
21566c9c713SDeepak Gupta 	/* invalid token, return EINVAL */
21666c9c713SDeepak Gupta 	if ((token - shstk_ptr) != SHSTK_ENTRY_SIZE) {
21766c9c713SDeepak Gupta 		pr_info_ratelimited("%s[%d]: bad restore token in %s: pc=%p sp=%p, token=%p, shstk_ptr=%p\n",
21866c9c713SDeepak Gupta 				    tsk->comm, task_pid_nr(tsk), __func__,
21966c9c713SDeepak Gupta 				    (void *)(task_pt_regs(tsk)->epc),
22066c9c713SDeepak Gupta 				    (void *)(task_pt_regs(tsk)->sp),
22166c9c713SDeepak Gupta 				    (void *)token, (void *)shstk_ptr);
22266c9c713SDeepak Gupta 		return -EINVAL;
22366c9c713SDeepak Gupta 	}
22466c9c713SDeepak Gupta 
22566c9c713SDeepak Gupta 	/* all checks passed, set active shstk and return success */
22666c9c713SDeepak Gupta 	set_active_shstk(tsk, token);
22766c9c713SDeepak Gupta 	return 0;
22866c9c713SDeepak Gupta }
22966c9c713SDeepak Gupta 
230c70772afSDeepak Gupta static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
231c70772afSDeepak Gupta 					   unsigned long token_offset, bool set_tok)
232c70772afSDeepak Gupta {
233c70772afSDeepak Gupta 	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
234c70772afSDeepak Gupta 	struct mm_struct *mm = current->mm;
235c70772afSDeepak Gupta 	unsigned long populate;
236c70772afSDeepak Gupta 
237c70772afSDeepak Gupta 	if (addr)
238c70772afSDeepak Gupta 		flags |= MAP_FIXED_NOREPLACE;
239c70772afSDeepak Gupta 
240c70772afSDeepak Gupta 	mmap_write_lock(mm);
241c70772afSDeepak Gupta 	addr = do_mmap(NULL, addr, size, PROT_READ, flags,
242c70772afSDeepak Gupta 		       VM_SHADOW_STACK | VM_WRITE, 0, &populate, NULL);
243c70772afSDeepak Gupta 	mmap_write_unlock(mm);
244c70772afSDeepak Gupta 
245c70772afSDeepak Gupta 	if (!set_tok || IS_ERR_VALUE(addr))
246c70772afSDeepak Gupta 		goto out;
247c70772afSDeepak Gupta 
248c70772afSDeepak Gupta 	if (create_rstor_token(addr + token_offset, NULL)) {
249c70772afSDeepak Gupta 		vm_munmap(addr, size);
250c70772afSDeepak Gupta 		return -EINVAL;
251c70772afSDeepak Gupta 	}
252c70772afSDeepak Gupta 
253c70772afSDeepak Gupta out:
254c70772afSDeepak Gupta 	return addr;
255c70772afSDeepak Gupta }
256c70772afSDeepak Gupta 
257c70772afSDeepak Gupta SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
258c70772afSDeepak Gupta {
259c70772afSDeepak Gupta 	bool set_tok = flags & SHADOW_STACK_SET_TOKEN;
260c70772afSDeepak Gupta 	unsigned long aligned_size = 0;
261c70772afSDeepak Gupta 
262*c9b859c4SDeepak Gupta 	if (!is_user_shstk_enabled())
263c70772afSDeepak Gupta 		return -EOPNOTSUPP;
264c70772afSDeepak Gupta 
265c70772afSDeepak Gupta 	/* Anything other than set token should result in invalid param */
266c70772afSDeepak Gupta 	if (flags & ~SHADOW_STACK_SET_TOKEN)
267c70772afSDeepak Gupta 		return -EINVAL;
268c70772afSDeepak Gupta 
269c70772afSDeepak Gupta 	/*
270c70772afSDeepak Gupta 	 * Unlike other architectures, on RISC-V, SSP pointer is held in CSR_SSP and is an available
271c70772afSDeepak Gupta 	 * CSR in all modes. CSR accesses are performed using 12bit index programmed in instruction
272c70772afSDeepak Gupta 	 * itself. This provides static property on register programming and writes to CSR can't
273c70772afSDeepak Gupta 	 * be unintentional from programmer's perspective. As long as programmer has guarded areas
274c70772afSDeepak Gupta 	 * which perform writes to CSR_SSP properly, shadow stack pivoting is not possible. Since
275c70772afSDeepak Gupta 	 * CSR_SSP is writable by user mode, it itself can setup a shadow stack token subsequent
276c70772afSDeepak Gupta 	 * to allocation. Although in order to provide portablity with other architectures (because
277c70772afSDeepak Gupta 	 * `map_shadow_stack` is arch agnostic syscall), RISC-V will follow expectation of a token
278c70772afSDeepak Gupta 	 * flag in flags and if provided in flags, will setup a token at the base.
279c70772afSDeepak Gupta 	 */
280c70772afSDeepak Gupta 
281c70772afSDeepak Gupta 	/* If there isn't space for a token */
282c70772afSDeepak Gupta 	if (set_tok && size < SHSTK_ENTRY_SIZE)
283c70772afSDeepak Gupta 		return -ENOSPC;
284c70772afSDeepak Gupta 
285c70772afSDeepak Gupta 	if (addr && (addr & (PAGE_SIZE - 1)))
286c70772afSDeepak Gupta 		return -EINVAL;
287c70772afSDeepak Gupta 
288c70772afSDeepak Gupta 	aligned_size = PAGE_ALIGN(size);
289c70772afSDeepak Gupta 	if (aligned_size < size)
290c70772afSDeepak Gupta 		return -EOVERFLOW;
291c70772afSDeepak Gupta 
292c70772afSDeepak Gupta 	return allocate_shadow_stack(addr, aligned_size, size, set_tok);
293c70772afSDeepak Gupta }
294fd44a4a8SDeepak Gupta 
295fd44a4a8SDeepak Gupta /*
296fd44a4a8SDeepak Gupta  * This gets called during clone/clone3/fork. And is needed to allocate a shadow stack for
297fd44a4a8SDeepak Gupta  * cases where CLONE_VM is specified and thus a different stack is specified by user. We
298fd44a4a8SDeepak Gupta  * thus need a separate shadow stack too. How a separate shadow stack is specified by
299fd44a4a8SDeepak Gupta  * user is still being debated. Once that's settled, remove this part of the comment.
300fd44a4a8SDeepak Gupta  * This function simply returns 0 if shadow stacks are not supported or if separate shadow
301fd44a4a8SDeepak Gupta  * stack allocation is not needed (like in case of !CLONE_VM)
302fd44a4a8SDeepak Gupta  */
303fd44a4a8SDeepak Gupta unsigned long shstk_alloc_thread_stack(struct task_struct *tsk,
304fd44a4a8SDeepak Gupta 				       const struct kernel_clone_args *args)
305fd44a4a8SDeepak Gupta {
306fd44a4a8SDeepak Gupta 	unsigned long addr, size;
307fd44a4a8SDeepak Gupta 
308fd44a4a8SDeepak Gupta 	/* If shadow stack is not supported, return 0 */
309*c9b859c4SDeepak Gupta 	if (!is_user_shstk_enabled())
310fd44a4a8SDeepak Gupta 		return 0;
311fd44a4a8SDeepak Gupta 
312fd44a4a8SDeepak Gupta 	/*
313fd44a4a8SDeepak Gupta 	 * If shadow stack is not enabled on the new thread, skip any
314fd44a4a8SDeepak Gupta 	 * switch to a new shadow stack.
315fd44a4a8SDeepak Gupta 	 */
316fd44a4a8SDeepak Gupta 	if (!is_shstk_enabled(tsk))
317fd44a4a8SDeepak Gupta 		return 0;
318fd44a4a8SDeepak Gupta 
319fd44a4a8SDeepak Gupta 	/*
320fd44a4a8SDeepak Gupta 	 * For CLONE_VFORK the child will share the parents shadow stack.
321fd44a4a8SDeepak Gupta 	 * Set base = 0 and size = 0, this is special means to track this state
322fd44a4a8SDeepak Gupta 	 * so the freeing logic run for child knows to leave it alone.
323fd44a4a8SDeepak Gupta 	 */
324fd44a4a8SDeepak Gupta 	if (args->flags & CLONE_VFORK) {
325fd44a4a8SDeepak Gupta 		set_shstk_base(tsk, 0, 0);
326fd44a4a8SDeepak Gupta 		return 0;
327fd44a4a8SDeepak Gupta 	}
328fd44a4a8SDeepak Gupta 
329fd44a4a8SDeepak Gupta 	/*
330fd44a4a8SDeepak Gupta 	 * For !CLONE_VM the child will use a copy of the parents shadow
331fd44a4a8SDeepak Gupta 	 * stack.
332fd44a4a8SDeepak Gupta 	 */
333fd44a4a8SDeepak Gupta 	if (!(args->flags & CLONE_VM))
334fd44a4a8SDeepak Gupta 		return 0;
335fd44a4a8SDeepak Gupta 
336fd44a4a8SDeepak Gupta 	/*
337fd44a4a8SDeepak Gupta 	 * reaching here means, CLONE_VM was specified and thus a separate shadow
338fd44a4a8SDeepak Gupta 	 * stack is needed for new cloned thread. Note: below allocation is happening
339fd44a4a8SDeepak Gupta 	 * using current mm.
340fd44a4a8SDeepak Gupta 	 */
341fd44a4a8SDeepak Gupta 	size = calc_shstk_size(args->stack_size);
342fd44a4a8SDeepak Gupta 	addr = allocate_shadow_stack(0, size, 0, false);
343fd44a4a8SDeepak Gupta 	if (IS_ERR_VALUE(addr))
344fd44a4a8SDeepak Gupta 		return addr;
345fd44a4a8SDeepak Gupta 
346fd44a4a8SDeepak Gupta 	set_shstk_base(tsk, addr, size);
347fd44a4a8SDeepak Gupta 
348fd44a4a8SDeepak Gupta 	return addr + size;
349fd44a4a8SDeepak Gupta }
350fd44a4a8SDeepak Gupta 
351fd44a4a8SDeepak Gupta void shstk_release(struct task_struct *tsk)
352fd44a4a8SDeepak Gupta {
353fd44a4a8SDeepak Gupta 	unsigned long base = 0, size = 0;
354fd44a4a8SDeepak Gupta 	/* If shadow stack is not supported or not enabled, nothing to release */
355*c9b859c4SDeepak Gupta 	if (!is_user_shstk_enabled() || !is_shstk_enabled(tsk))
356fd44a4a8SDeepak Gupta 		return;
357fd44a4a8SDeepak Gupta 
358fd44a4a8SDeepak Gupta 	/*
359fd44a4a8SDeepak Gupta 	 * When fork() with CLONE_VM fails, the child (tsk) already has a
360fd44a4a8SDeepak Gupta 	 * shadow stack allocated, and exit_thread() calls this function to
361fd44a4a8SDeepak Gupta 	 * free it.  In this case the parent (current) and the child share
362fd44a4a8SDeepak Gupta 	 * the same mm struct. Move forward only when they're same.
363fd44a4a8SDeepak Gupta 	 */
364fd44a4a8SDeepak Gupta 	if (!tsk->mm || tsk->mm != current->mm)
365fd44a4a8SDeepak Gupta 		return;
366fd44a4a8SDeepak Gupta 
367fd44a4a8SDeepak Gupta 	/*
368fd44a4a8SDeepak Gupta 	 * We know shadow stack is enabled but if base is NULL, then
369fd44a4a8SDeepak Gupta 	 * this task is not managing its own shadow stack (CLONE_VFORK). So
370fd44a4a8SDeepak Gupta 	 * skip freeing it.
371fd44a4a8SDeepak Gupta 	 */
372fd44a4a8SDeepak Gupta 	base = get_shstk_base(tsk, &size);
373fd44a4a8SDeepak Gupta 	if (!base)
374fd44a4a8SDeepak Gupta 		return;
375fd44a4a8SDeepak Gupta 
376fd44a4a8SDeepak Gupta 	vm_munmap(base, size);
377fd44a4a8SDeepak Gupta 	set_shstk_base(tsk, 0, 0);
378fd44a4a8SDeepak Gupta }
37961a02002SDeepak Gupta 
38061a02002SDeepak Gupta int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status)
38161a02002SDeepak Gupta {
38261a02002SDeepak Gupta 	unsigned long bcfi_status = 0;
38361a02002SDeepak Gupta 
384*c9b859c4SDeepak Gupta 	if (!is_user_shstk_enabled())
38561a02002SDeepak Gupta 		return -EINVAL;
38661a02002SDeepak Gupta 
38761a02002SDeepak Gupta 	/* this means shadow stack is enabled on the task */
38861a02002SDeepak Gupta 	bcfi_status |= (is_shstk_enabled(t) ? PR_SHADOW_STACK_ENABLE : 0);
38961a02002SDeepak Gupta 
39061a02002SDeepak Gupta 	return copy_to_user(status, &bcfi_status, sizeof(bcfi_status)) ? -EFAULT : 0;
39161a02002SDeepak Gupta }
39261a02002SDeepak Gupta 
39361a02002SDeepak Gupta int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status)
39461a02002SDeepak Gupta {
39561a02002SDeepak Gupta 	unsigned long size = 0, addr = 0;
39661a02002SDeepak Gupta 	bool enable_shstk = false;
39761a02002SDeepak Gupta 
398*c9b859c4SDeepak Gupta 	if (!is_user_shstk_enabled())
39961a02002SDeepak Gupta 		return -EINVAL;
40061a02002SDeepak Gupta 
40161a02002SDeepak Gupta 	/* Reject unknown flags */
40261a02002SDeepak Gupta 	if (status & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
40361a02002SDeepak Gupta 		return -EINVAL;
40461a02002SDeepak Gupta 
40561a02002SDeepak Gupta 	/* bcfi status is locked and further can't be modified by user */
40661a02002SDeepak Gupta 	if (is_shstk_locked(t))
40761a02002SDeepak Gupta 		return -EINVAL;
40861a02002SDeepak Gupta 
40961a02002SDeepak Gupta 	enable_shstk = status & PR_SHADOW_STACK_ENABLE;
41061a02002SDeepak Gupta 	/* Request is to enable shadow stack and shadow stack is not enabled already */
41161a02002SDeepak Gupta 	if (enable_shstk && !is_shstk_enabled(t)) {
41261a02002SDeepak Gupta 		/* shadow stack was allocated and enable request again
41361a02002SDeepak Gupta 		 * no need to support such usecase and return EINVAL.
41461a02002SDeepak Gupta 		 */
41561a02002SDeepak Gupta 		if (is_shstk_allocated(t))
41661a02002SDeepak Gupta 			return -EINVAL;
41761a02002SDeepak Gupta 
41861a02002SDeepak Gupta 		size = calc_shstk_size(0);
41961a02002SDeepak Gupta 		addr = allocate_shadow_stack(0, size, 0, false);
42061a02002SDeepak Gupta 		if (IS_ERR_VALUE(addr))
42161a02002SDeepak Gupta 			return -ENOMEM;
42261a02002SDeepak Gupta 		set_shstk_base(t, addr, size);
42361a02002SDeepak Gupta 		set_active_shstk(t, addr + size);
42461a02002SDeepak Gupta 	}
42561a02002SDeepak Gupta 
42661a02002SDeepak Gupta 	/*
42761a02002SDeepak Gupta 	 * If a request to disable shadow stack happens, let's go ahead and release it
42861a02002SDeepak Gupta 	 * Although, if CLONE_VFORKed child did this, then in that case we will end up
42961a02002SDeepak Gupta 	 * not releasing the shadow stack (because it might be needed in parent). Although
43061a02002SDeepak Gupta 	 * we will disable it for VFORKed child. And if VFORKed child tries to enable again
43161a02002SDeepak Gupta 	 * then in that case, it'll get entirely new shadow stack because following condition
43261a02002SDeepak Gupta 	 * are true
43361a02002SDeepak Gupta 	 *  - shadow stack was not enabled for vforked child
43461a02002SDeepak Gupta 	 *  - shadow stack base was anyways pointing to 0
43561a02002SDeepak Gupta 	 * This shouldn't be a big issue because we want parent to have availability of shadow
43661a02002SDeepak Gupta 	 * stack whenever VFORKed child releases resources via exit or exec but at the same
43761a02002SDeepak Gupta 	 * time we want VFORKed child to break away and establish new shadow stack if it desires
43861a02002SDeepak Gupta 	 *
43961a02002SDeepak Gupta 	 */
44061a02002SDeepak Gupta 	if (!enable_shstk)
44161a02002SDeepak Gupta 		shstk_release(t);
44261a02002SDeepak Gupta 
44361a02002SDeepak Gupta 	set_shstk_status(t, enable_shstk);
44461a02002SDeepak Gupta 	return 0;
44561a02002SDeepak Gupta }
44661a02002SDeepak Gupta 
44761a02002SDeepak Gupta int arch_lock_shadow_stack_status(struct task_struct *task,
44861a02002SDeepak Gupta 				  unsigned long arg)
44961a02002SDeepak Gupta {
45061a02002SDeepak Gupta 	/* If shtstk not supported or not enabled on task, nothing to lock here */
451*c9b859c4SDeepak Gupta 	if (!is_user_shstk_enabled() ||
45261a02002SDeepak Gupta 	    !is_shstk_enabled(task) || arg != 0)
45361a02002SDeepak Gupta 		return -EINVAL;
45461a02002SDeepak Gupta 
45561a02002SDeepak Gupta 	set_shstk_lock(task);
45661a02002SDeepak Gupta 
45761a02002SDeepak Gupta 	return 0;
45861a02002SDeepak Gupta }
4598a9e22d2SDeepak Gupta 
4608a9e22d2SDeepak Gupta int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status)
4618a9e22d2SDeepak Gupta {
4628a9e22d2SDeepak Gupta 	unsigned long fcfi_status = 0;
4638a9e22d2SDeepak Gupta 
464*c9b859c4SDeepak Gupta 	if (!is_user_lpad_enabled())
4658a9e22d2SDeepak Gupta 		return -EINVAL;
4668a9e22d2SDeepak Gupta 
4678a9e22d2SDeepak Gupta 	/* indirect branch tracking is enabled on the task or not */
4688a9e22d2SDeepak Gupta 	fcfi_status |= (is_indir_lp_enabled(t) ? PR_INDIR_BR_LP_ENABLE : 0);
4698a9e22d2SDeepak Gupta 
4708a9e22d2SDeepak Gupta 	return copy_to_user(status, &fcfi_status, sizeof(fcfi_status)) ? -EFAULT : 0;
4718a9e22d2SDeepak Gupta }
4728a9e22d2SDeepak Gupta 
4738a9e22d2SDeepak Gupta int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status)
4748a9e22d2SDeepak Gupta {
4758a9e22d2SDeepak Gupta 	bool enable_indir_lp = false;
4768a9e22d2SDeepak Gupta 
477*c9b859c4SDeepak Gupta 	if (!is_user_lpad_enabled())
4788a9e22d2SDeepak Gupta 		return -EINVAL;
4798a9e22d2SDeepak Gupta 
4808a9e22d2SDeepak Gupta 	/* indirect branch tracking is locked and further can't be modified by user */
4818a9e22d2SDeepak Gupta 	if (is_indir_lp_locked(t))
4828a9e22d2SDeepak Gupta 		return -EINVAL;
4838a9e22d2SDeepak Gupta 
4848a9e22d2SDeepak Gupta 	/* Reject unknown flags */
4858a9e22d2SDeepak Gupta 	if (status & ~PR_INDIR_BR_LP_ENABLE)
4868a9e22d2SDeepak Gupta 		return -EINVAL;
4878a9e22d2SDeepak Gupta 
4888a9e22d2SDeepak Gupta 	enable_indir_lp = (status & PR_INDIR_BR_LP_ENABLE);
4898a9e22d2SDeepak Gupta 	set_indir_lp_status(t, enable_indir_lp);
4908a9e22d2SDeepak Gupta 
4918a9e22d2SDeepak Gupta 	return 0;
4928a9e22d2SDeepak Gupta }
4938a9e22d2SDeepak Gupta 
4948a9e22d2SDeepak Gupta int arch_lock_indir_br_lp_status(struct task_struct *task,
4958a9e22d2SDeepak Gupta 				 unsigned long arg)
4968a9e22d2SDeepak Gupta {
4978a9e22d2SDeepak Gupta 	/*
4988a9e22d2SDeepak Gupta 	 * If indirect branch tracking is not supported or not enabled on task,
4998a9e22d2SDeepak Gupta 	 * nothing to lock here
5008a9e22d2SDeepak Gupta 	 */
501*c9b859c4SDeepak Gupta 	if (!is_user_lpad_enabled() ||
5028a9e22d2SDeepak Gupta 	    !is_indir_lp_enabled(task) || arg != 0)
5038a9e22d2SDeepak Gupta 		return -EINVAL;
5048a9e22d2SDeepak Gupta 
5058a9e22d2SDeepak Gupta 	set_indir_lp_lock(task);
5068a9e22d2SDeepak Gupta 
5078a9e22d2SDeepak Gupta 	return 0;
5088a9e22d2SDeepak Gupta }
509*c9b859c4SDeepak Gupta 
510*c9b859c4SDeepak Gupta bool is_user_shstk_enabled(void)
511*c9b859c4SDeepak Gupta {
512*c9b859c4SDeepak Gupta 	return (cpu_supports_shadow_stack() &&
513*c9b859c4SDeepak Gupta 		!(riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_BCFI));
514*c9b859c4SDeepak Gupta }
515*c9b859c4SDeepak Gupta 
516*c9b859c4SDeepak Gupta bool is_user_lpad_enabled(void)
517*c9b859c4SDeepak Gupta {
518*c9b859c4SDeepak Gupta 	return (cpu_supports_indirect_br_lp_instr() &&
519*c9b859c4SDeepak Gupta 		!(riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_FCFI));
520*c9b859c4SDeepak Gupta }
521*c9b859c4SDeepak Gupta 
522*c9b859c4SDeepak Gupta static int __init setup_global_riscv_enable(char *str)
523*c9b859c4SDeepak Gupta {
524*c9b859c4SDeepak Gupta 	if (strcmp(str, "all") == 0)
525*c9b859c4SDeepak Gupta 		riscv_nousercfi = CMDLINE_DISABLE_RISCV_USERCFI;
526*c9b859c4SDeepak Gupta 
527*c9b859c4SDeepak Gupta 	if (strcmp(str, "fcfi") == 0)
528*c9b859c4SDeepak Gupta 		riscv_nousercfi |= CMDLINE_DISABLE_RISCV_USERCFI_FCFI;
529*c9b859c4SDeepak Gupta 
530*c9b859c4SDeepak Gupta 	if (strcmp(str, "bcfi") == 0)
531*c9b859c4SDeepak Gupta 		riscv_nousercfi |= CMDLINE_DISABLE_RISCV_USERCFI_BCFI;
532*c9b859c4SDeepak Gupta 
533*c9b859c4SDeepak Gupta 	if (riscv_nousercfi)
534*c9b859c4SDeepak Gupta 		pr_info("RISC-V user CFI disabled via cmdline - shadow stack status : %s, landing pad status : %s\n",
535*c9b859c4SDeepak Gupta 			(riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_BCFI) ? "disabled" :
536*c9b859c4SDeepak Gupta 			"enabled", (riscv_nousercfi & CMDLINE_DISABLE_RISCV_USERCFI_FCFI) ?
537*c9b859c4SDeepak Gupta 			"disabled" : "enabled");
538*c9b859c4SDeepak Gupta 
539*c9b859c4SDeepak Gupta 	return 1;
540*c9b859c4SDeepak Gupta }
541*c9b859c4SDeepak Gupta 
542*c9b859c4SDeepak Gupta __setup("riscv_nousercfi=", setup_global_riscv_enable);
543