xref: /linux/arch/arm64/mm/gcs.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/mm.h>
4 #include <linux/mman.h>
5 #include <linux/syscalls.h>
6 #include <linux/types.h>
7 
8 #include <asm/cmpxchg.h>
9 #include <asm/cpufeature.h>
10 #include <asm/gcs.h>
11 #include <asm/page.h>
12 
13 static unsigned long alloc_gcs(unsigned long addr, unsigned long size)
14 {
15 	return vm_mmap_shadow_stack(addr, size, 0);
16 }
17 
18 static unsigned long gcs_size(unsigned long size)
19 {
20 	if (size)
21 		return PAGE_ALIGN(size);
22 
23 	/* Allocate RLIMIT_STACK/2 with limits of PAGE_SIZE..2G */
24 	size = PAGE_ALIGN(min_t(unsigned long long,
25 				rlimit(RLIMIT_STACK) / 2, SZ_2G));
26 	return max(PAGE_SIZE, size);
27 }
28 
29 unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
30 				     const struct kernel_clone_args *args)
31 {
32 	unsigned long addr, size;
33 
34 	if (!system_supports_gcs())
35 		return 0;
36 
37 	if (!task_gcs_el0_enabled(tsk))
38 		return 0;
39 
40 	if ((args->flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM) {
41 		tsk->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
42 		return 0;
43 	}
44 
45 	size = args->stack_size / 2;
46 
47 	size = gcs_size(size);
48 	addr = alloc_gcs(0, size);
49 	if (IS_ERR_VALUE(addr))
50 		return addr;
51 
52 	tsk->thread.gcs_base = addr;
53 	tsk->thread.gcs_size = size;
54 	tsk->thread.gcspr_el0 = addr + size - sizeof(u64);
55 
56 	return addr;
57 }
58 
59 SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
60 {
61 	unsigned long alloc_size;
62 	unsigned long __user *cap_ptr;
63 	unsigned long cap_val;
64 	int ret = 0;
65 	int cap_offset;
66 
67 	if (!system_supports_gcs())
68 		return -EOPNOTSUPP;
69 
70 	if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER))
71 		return -EINVAL;
72 
73 	if (!PAGE_ALIGNED(addr))
74 		return -EINVAL;
75 
76 	if (size == 8 || !IS_ALIGNED(size, 8))
77 		return -EINVAL;
78 
79 	/*
80 	 * An overflow would result in attempting to write the restore token
81 	 * to the wrong location. Not catastrophic, but just return the right
82 	 * error code and block it.
83 	 */
84 	alloc_size = PAGE_ALIGN(size);
85 	if (alloc_size < size)
86 		return -EOVERFLOW;
87 
88 	addr = alloc_gcs(addr, alloc_size);
89 	if (IS_ERR_VALUE(addr))
90 		return addr;
91 
92 	/*
93 	 * Put a cap token at the end of the allocated region so it
94 	 * can be switched to.
95 	 */
96 	if (flags & SHADOW_STACK_SET_TOKEN) {
97 		/* Leave an extra empty frame as a top of stack marker? */
98 		if (flags & SHADOW_STACK_SET_MARKER)
99 			cap_offset = 2;
100 		else
101 			cap_offset = 1;
102 
103 		cap_ptr = (unsigned long __user *)(addr + size -
104 						   (cap_offset * sizeof(unsigned long)));
105 		cap_val = GCS_CAP(cap_ptr);
106 
107 		put_user_gcs(cap_val, cap_ptr, &ret);
108 		if (ret != 0) {
109 			vm_munmap(addr, size);
110 			return -EFAULT;
111 		}
112 
113 		/*
114 		 * Ensure the new cap is ordered before standard
115 		 * memory accesses to the same location.
116 		 */
117 		gcsb_dsync();
118 	}
119 
120 	return addr;
121 }
122 
123 /*
124  * Apply the GCS mode configured for the specified task to the
125  * hardware.
126  */
127 void gcs_set_el0_mode(struct task_struct *task)
128 {
129 	u64 gcscre0_el1 = GCSCRE0_EL1_nTR;
130 
131 	if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)
132 		gcscre0_el1 |= GCSCRE0_EL1_RVCHKEN | GCSCRE0_EL1_PCRSEL;
133 
134 	if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE)
135 		gcscre0_el1 |= GCSCRE0_EL1_STREn;
136 
137 	if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH)
138 		gcscre0_el1 |= GCSCRE0_EL1_PUSHMEn;
139 
140 	write_sysreg_s(gcscre0_el1, SYS_GCSCRE0_EL1);
141 }
142 
143 void gcs_free(struct task_struct *task)
144 {
145 	if (!system_supports_gcs())
146 		return;
147 
148 	if (!task->mm || task->mm != current->mm)
149 		return;
150 
151 	if (task->thread.gcs_base)
152 		vm_munmap(task->thread.gcs_base, task->thread.gcs_size);
153 
154 	task->thread.gcspr_el0 = 0;
155 	task->thread.gcs_base = 0;
156 	task->thread.gcs_size = 0;
157 }
158 
159 int arch_set_shadow_stack_status(struct task_struct *task, unsigned long arg)
160 {
161 	unsigned long gcs, size;
162 	int ret;
163 
164 	if (!system_supports_gcs())
165 		return -EINVAL;
166 
167 	if (is_compat_thread(task_thread_info(task)))
168 		return -EINVAL;
169 
170 	/* Reject unknown flags */
171 	if (arg & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
172 		return -EINVAL;
173 
174 	ret = gcs_check_locked(task, arg);
175 	if (ret != 0)
176 		return ret;
177 
178 	/* If we are enabling GCS then make sure we have a stack */
179 	if (arg & PR_SHADOW_STACK_ENABLE &&
180 	    !task_gcs_el0_enabled(task)) {
181 		/* Do not allow GCS to be reenabled */
182 		if (task->thread.gcs_base || task->thread.gcspr_el0)
183 			return -EINVAL;
184 
185 		if (task != current)
186 			return -EBUSY;
187 
188 		size = gcs_size(0);
189 		gcs = alloc_gcs(0, size);
190 		if (IS_ERR_VALUE(gcs))
191 			return gcs;
192 
193 		task->thread.gcspr_el0 = gcs + size - sizeof(u64);
194 		task->thread.gcs_base = gcs;
195 		task->thread.gcs_size = size;
196 		if (task == current)
197 			write_sysreg_s(task->thread.gcspr_el0,
198 				       SYS_GCSPR_EL0);
199 	}
200 
201 	task->thread.gcs_el0_mode = arg;
202 	if (task == current)
203 		gcs_set_el0_mode(task);
204 
205 	return 0;
206 }
207 
208 int arch_get_shadow_stack_status(struct task_struct *task,
209 				 unsigned long __user *arg)
210 {
211 	if (!system_supports_gcs())
212 		return -EINVAL;
213 
214 	if (is_compat_thread(task_thread_info(task)))
215 		return -EINVAL;
216 
217 	return put_user(task->thread.gcs_el0_mode, arg);
218 }
219 
220 int arch_lock_shadow_stack_status(struct task_struct *task,
221 				  unsigned long arg)
222 {
223 	if (!system_supports_gcs())
224 		return -EINVAL;
225 
226 	if (is_compat_thread(task_thread_info(task)))
227 		return -EINVAL;
228 
229 	/*
230 	 * We support locking unknown bits so applications can prevent
231 	 * any changes in a future proof manner.
232 	 */
233 	task->thread.gcs_el0_locked |= arg;
234 
235 	return 0;
236 }
237