xref: /linux/arch/um/kernel/skas/uaccess.c (revision 3ca3af7d1f230d1f93ba4cd8cd9d054870f2406f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <asm/current.h>
12 #include <asm/page.h>
13 #include <kern_util.h>
14 #include <asm/futex.h>
15 #include <os.h>
16 
17 pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
18 {
19 	pgd_t *pgd;
20 	p4d_t *p4d;
21 	pud_t *pud;
22 	pmd_t *pmd;
23 
24 	if (mm == NULL)
25 		return NULL;
26 
27 	pgd = pgd_offset(mm, addr);
28 	if (!pgd_present(*pgd))
29 		return NULL;
30 
31 	p4d = p4d_offset(pgd, addr);
32 	if (!p4d_present(*p4d))
33 		return NULL;
34 
35 	pud = pud_offset(p4d, addr);
36 	if (!pud_present(*pud))
37 		return NULL;
38 
39 	pmd = pmd_offset(pud, addr);
40 	if (!pmd_present(*pmd))
41 		return NULL;
42 
43 	return pte_offset_kernel(pmd, addr);
44 }
45 
46 static pte_t *maybe_map(unsigned long virt, int is_write)
47 {
48 	pte_t *pte = virt_to_pte(current->mm, virt);
49 	int err, dummy_code;
50 
51 	if ((pte == NULL) || !pte_present(*pte) ||
52 	    (is_write && !pte_write(*pte))) {
53 		err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
54 		if (err)
55 			return NULL;
56 		pte = virt_to_pte(current->mm, virt);
57 	}
58 	if (!pte_present(*pte))
59 		pte = NULL;
60 
61 	return pte;
62 }
63 
64 static int do_op_one_page(unsigned long addr, int len, int is_write,
65 		 int (*op)(unsigned long addr, int len, void *arg), void *arg)
66 {
67 	struct page *page;
68 	pte_t *pte;
69 	int n;
70 
71 	pte = maybe_map(addr, is_write);
72 	if (pte == NULL)
73 		return -1;
74 
75 	page = pte_page(*pte);
76 #ifdef CONFIG_64BIT
77 	pagefault_disable();
78 	addr = (unsigned long) page_address(page) +
79 		(addr & ~PAGE_MASK);
80 #else
81 	addr = (unsigned long) kmap_atomic(page) +
82 		(addr & ~PAGE_MASK);
83 #endif
84 	n = (*op)(addr, len, arg);
85 
86 #ifdef CONFIG_64BIT
87 	pagefault_enable();
88 #else
89 	kunmap_atomic((void *)addr);
90 #endif
91 
92 	return n;
93 }
94 
95 static long buffer_op(unsigned long addr, int len, int is_write,
96 		      int (*op)(unsigned long, int, void *), void *arg)
97 {
98 	long size, remain, n;
99 
100 	size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
101 	remain = len;
102 
103 	n = do_op_one_page(addr, size, is_write, op, arg);
104 	if (n != 0) {
105 		remain = (n < 0 ? remain : 0);
106 		goto out;
107 	}
108 
109 	addr += size;
110 	remain -= size;
111 	if (remain == 0)
112 		goto out;
113 
114 	while (addr < ((addr + remain) & PAGE_MASK)) {
115 		n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
116 		if (n != 0) {
117 			remain = (n < 0 ? remain : 0);
118 			goto out;
119 		}
120 
121 		addr += PAGE_SIZE;
122 		remain -= PAGE_SIZE;
123 	}
124 	if (remain == 0)
125 		goto out;
126 
127 	n = do_op_one_page(addr, remain, is_write, op, arg);
128 	if (n != 0) {
129 		remain = (n < 0 ? remain : 0);
130 		goto out;
131 	}
132 
133 	return 0;
134  out:
135 	return remain;
136 }
137 
138 static int copy_chunk_from_user(unsigned long from, int len, void *arg)
139 {
140 	unsigned long *to_ptr = arg, to = *to_ptr;
141 
142 	memcpy((void *) to, (void *) from, len);
143 	*to_ptr += len;
144 	return 0;
145 }
146 
147 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
148 {
149 	if (uaccess_kernel()) {
150 		memcpy(to, (__force void*)from, n);
151 		return 0;
152 	}
153 
154 	return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
155 }
156 EXPORT_SYMBOL(raw_copy_from_user);
157 
158 static int copy_chunk_to_user(unsigned long to, int len, void *arg)
159 {
160 	unsigned long *from_ptr = arg, from = *from_ptr;
161 
162 	memcpy((void *) to, (void *) from, len);
163 	*from_ptr += len;
164 	return 0;
165 }
166 
167 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
168 {
169 	if (uaccess_kernel()) {
170 		memcpy((__force void *) to, from, n);
171 		return 0;
172 	}
173 
174 	return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
175 }
176 EXPORT_SYMBOL(raw_copy_to_user);
177 
178 static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
179 {
180 	char **to_ptr = arg, *to = *to_ptr;
181 	int n;
182 
183 	strncpy(to, (void *) from, len);
184 	n = strnlen(to, len);
185 	*to_ptr += n;
186 
187 	if (n < len)
188 	        return 1;
189 	return 0;
190 }
191 
192 long strncpy_from_user(char *dst, const char __user *src, long count)
193 {
194 	long n;
195 	char *ptr = dst;
196 
197 	if (!access_ok(src, 1))
198 		return -EFAULT;
199 
200 	if (uaccess_kernel()) {
201 		strncpy(dst, (__force void *) src, count);
202 		return strnlen(dst, count);
203 	}
204 
205 	n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
206 		      &ptr);
207 	if (n != 0)
208 		return -EFAULT;
209 	return strnlen(dst, count);
210 }
211 EXPORT_SYMBOL(strncpy_from_user);
212 
213 static int clear_chunk(unsigned long addr, int len, void *unused)
214 {
215 	memset((void *) addr, 0, len);
216 	return 0;
217 }
218 
219 unsigned long __clear_user(void __user *mem, unsigned long len)
220 {
221 	if (uaccess_kernel()) {
222 		memset((__force void*)mem, 0, len);
223 		return 0;
224 	}
225 
226 	return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL);
227 }
228 EXPORT_SYMBOL(__clear_user);
229 
230 static int strnlen_chunk(unsigned long str, int len, void *arg)
231 {
232 	int *len_ptr = arg, n;
233 
234 	n = strnlen((void *) str, len);
235 	*len_ptr += n;
236 
237 	if (n < len)
238 		return 1;
239 	return 0;
240 }
241 
242 long strnlen_user(const char __user *str, long len)
243 {
244 	int count = 0, n;
245 
246 	if (!access_ok(str, 1))
247 		return -EFAULT;
248 
249 	if (uaccess_kernel())
250 		return strnlen((__force char*)str, len) + 1;
251 
252 	n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
253 	if (n == 0)
254 		return count + 1;
255 	return 0;
256 }
257 EXPORT_SYMBOL(strnlen_user);
258 
259 /**
260  * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
261  *			  argument and comparison of the previous
262  *			  futex value with another constant.
263  *
264  * @encoded_op:	encoded operation to execute
265  * @uaddr:	pointer to user space address
266  *
267  * Return:
268  * 0 - On success
269  * -EFAULT - User access resulted in a page fault
270  * -EAGAIN - Atomic operation was unable to complete due to contention
271  * -ENOSYS - Operation not supported
272  */
273 
274 int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
275 {
276 	int oldval, ret;
277 	struct page *page;
278 	unsigned long addr = (unsigned long) uaddr;
279 	pte_t *pte;
280 
281 	ret = -EFAULT;
282 	if (!access_ok(uaddr, sizeof(*uaddr)))
283 		return -EFAULT;
284 	preempt_disable();
285 	pte = maybe_map(addr, 1);
286 	if (pte == NULL)
287 		goto out_inuser;
288 
289 	page = pte_page(*pte);
290 #ifdef CONFIG_64BIT
291 	pagefault_disable();
292 	addr = (unsigned long) page_address(page) +
293 			(((unsigned long) addr) & ~PAGE_MASK);
294 #else
295 	addr = (unsigned long) kmap_atomic(page) +
296 		((unsigned long) addr & ~PAGE_MASK);
297 #endif
298 	uaddr = (u32 *) addr;
299 	oldval = *uaddr;
300 
301 	ret = 0;
302 
303 	switch (op) {
304 	case FUTEX_OP_SET:
305 		*uaddr = oparg;
306 		break;
307 	case FUTEX_OP_ADD:
308 		*uaddr += oparg;
309 		break;
310 	case FUTEX_OP_OR:
311 		*uaddr |= oparg;
312 		break;
313 	case FUTEX_OP_ANDN:
314 		*uaddr &= ~oparg;
315 		break;
316 	case FUTEX_OP_XOR:
317 		*uaddr ^= oparg;
318 		break;
319 	default:
320 		ret = -ENOSYS;
321 	}
322 #ifdef CONFIG_64BIT
323 	pagefault_enable();
324 #else
325 	kunmap_atomic((void *)addr);
326 #endif
327 
328 out_inuser:
329 	preempt_enable();
330 
331 	if (ret == 0)
332 		*oval = oldval;
333 
334 	return ret;
335 }
336 EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
337 
338 /**
339  * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
340  *				uaddr with newval if the current value is
341  *				oldval.
342  * @uval:	pointer to store content of @uaddr
343  * @uaddr:	pointer to user space address
344  * @oldval:	old value
345  * @newval:	new value to store to @uaddr
346  *
347  * Return:
348  * 0 - On success
349  * -EFAULT - User access resulted in a page fault
350  * -EAGAIN - Atomic operation was unable to complete due to contention
351  * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
352  */
353 
354 int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
355 			      u32 oldval, u32 newval)
356 {
357 	struct page *page;
358 	pte_t *pte;
359 	int ret = -EFAULT;
360 
361 	if (!access_ok(uaddr, sizeof(*uaddr)))
362 		return -EFAULT;
363 
364 	preempt_disable();
365 	pte = maybe_map((unsigned long) uaddr, 1);
366 	if (pte == NULL)
367 		goto out_inatomic;
368 
369 	page = pte_page(*pte);
370 #ifdef CONFIG_64BIT
371 	pagefault_disable();
372 	uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK);
373 #else
374 	uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK);
375 #endif
376 
377 	*uval = *uaddr;
378 
379 	ret = cmpxchg(uaddr, oldval, newval);
380 
381 #ifdef CONFIG_64BIT
382 	pagefault_enable();
383 #else
384 	kunmap_atomic(uaddr);
385 #endif
386 	ret = 0;
387 
388 out_inatomic:
389 	preempt_enable();
390 	return ret;
391 }
392 EXPORT_SYMBOL(futex_atomic_cmpxchg_inatomic);
393