xref: /linux/include/linux/uaccess.h (revision 509d3f45847627f4c5cdce004c3ec79262b5239c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4 
5 #include <linux/cleanup.h>
6 #include <linux/fault-inject-usercopy.h>
7 #include <linux/instrumented.h>
8 #include <linux/minmax.h>
9 #include <linux/nospec.h>
10 #include <linux/sched.h>
11 #include <linux/ucopysize.h>
12 
13 #include <asm/uaccess.h>
14 
15 /*
16  * Architectures that support memory tagging (assigning tags to memory regions,
17  * embedding these tags into addresses that point to these memory regions, and
18  * checking that the memory and the pointer tags match on memory accesses)
19  * redefine this macro to strip tags from pointers.
20  *
21  * Passing down mm_struct allows to define untagging rules on per-process
22  * basis.
23  *
24  * It's defined as noop for architectures that don't support memory tagging.
25  */
26 #ifndef untagged_addr
27 #define untagged_addr(addr) (addr)
28 #endif
29 
30 #ifndef untagged_addr_remote
31 #define untagged_addr_remote(mm, addr)	({		\
32 	mmap_assert_locked(mm);				\
33 	untagged_addr(addr);				\
34 })
35 #endif
36 
37 #ifdef masked_user_access_begin
38  #define can_do_masked_user_access() 1
39 # ifndef masked_user_write_access_begin
40 #  define masked_user_write_access_begin masked_user_access_begin
41 # endif
42 # ifndef masked_user_read_access_begin
43 #  define masked_user_read_access_begin masked_user_access_begin
44 #endif
45 #else
46  #define can_do_masked_user_access() 0
47  #define masked_user_access_begin(src) NULL
48  #define masked_user_read_access_begin(src) NULL
49  #define masked_user_write_access_begin(src) NULL
50  #define mask_user_address(src) (src)
51 #endif
52 
53 /*
54  * Architectures should provide two primitives (raw_copy_{to,from}_user())
55  * and get rid of their private instances of copy_{to,from}_user() and
56  * __copy_{to,from}_user{,_inatomic}().
57  *
58  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
59  * return the amount left to copy.  They should assume that access_ok() has
60  * already been checked (and succeeded); they should *not* zero-pad anything.
61  * No KASAN or object size checks either - those belong here.
62  *
63  * Both of these functions should attempt to copy size bytes starting at from
64  * into the area starting at to.  They must not fetch or store anything
65  * outside of those areas.  Return value must be between 0 (everything
66  * copied successfully) and size (nothing copied).
67  *
68  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
69  * at to must become equal to the bytes fetched from the corresponding area
70  * starting at from.  All data past to + size - N must be left unmodified.
71  *
72  * If copying succeeds, the return value must be 0.  If some data cannot be
73  * fetched, it is permitted to copy less than had been fetched; the only
74  * hard requirement is that not storing anything at all (i.e. returning size)
75  * should happen only when nothing could be copied.  In other words, you don't
76  * have to squeeze as much as possible - it is allowed, but not necessary.
77  *
78  * For raw_copy_from_user() to always points to kernel memory and no faults
79  * on store should happen.  Interpretation of from is affected by set_fs().
80  * For raw_copy_to_user() it's the other way round.
81  *
82  * Both can be inlined - it's up to architectures whether it wants to bother
83  * with that.  They should not be used directly; they are used to implement
84  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
85  * that are used instead.  Out of those, __... ones are inlined.  Plain
86  * copy_{to,from}_user() might or might not be inlined.  If you want them
87  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
88  *
89  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
90  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
91  * at all; their callers absolutely must check the return value.
92  *
93  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
94  * but both source and destination are __user pointers (affected by set_fs()
95  * as usual) and both source and destination can trigger faults.
96  */
97 
98 static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void * to,const void __user * from,unsigned long n)99 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
100 {
101 	unsigned long res;
102 
103 	instrument_copy_from_user_before(to, from, n);
104 	check_object_size(to, n, false);
105 	res = raw_copy_from_user(to, from, n);
106 	instrument_copy_from_user_after(to, from, n, res);
107 	return res;
108 }
109 
110 static __always_inline __must_check unsigned long
__copy_from_user(void * to,const void __user * from,unsigned long n)111 __copy_from_user(void *to, const void __user *from, unsigned long n)
112 {
113 	unsigned long res;
114 
115 	might_fault();
116 	instrument_copy_from_user_before(to, from, n);
117 	if (should_fail_usercopy())
118 		return n;
119 	check_object_size(to, n, false);
120 	res = raw_copy_from_user(to, from, n);
121 	instrument_copy_from_user_after(to, from, n, res);
122 	return res;
123 }
124 
125 /**
126  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
127  * @to:   Destination address, in user space.
128  * @from: Source address, in kernel space.
129  * @n:    Number of bytes to copy.
130  *
131  * Context: User context only.
132  *
133  * Copy data from kernel space to user space.  Caller must check
134  * the specified block with access_ok() before calling this function.
135  * The caller should also make sure he pins the user space address
136  * so that we don't result in page fault and sleep.
137  */
138 static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user * to,const void * from,unsigned long n)139 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
140 {
141 	if (should_fail_usercopy())
142 		return n;
143 	instrument_copy_to_user(to, from, n);
144 	check_object_size(from, n, true);
145 	return raw_copy_to_user(to, from, n);
146 }
147 
148 static __always_inline __must_check unsigned long
__copy_to_user(void __user * to,const void * from,unsigned long n)149 __copy_to_user(void __user *to, const void *from, unsigned long n)
150 {
151 	might_fault();
152 	if (should_fail_usercopy())
153 		return n;
154 	instrument_copy_to_user(to, from, n);
155 	check_object_size(from, n, true);
156 	return raw_copy_to_user(to, from, n);
157 }
158 
159 /*
160  * Architectures that #define INLINE_COPY_TO_USER use this function
161  * directly in the normal copy_to/from_user(), the other ones go
162  * through an extern _copy_to/from_user(), which expands the same code
163  * here.
164  */
165 static inline __must_check unsigned long
_inline_copy_from_user(void * to,const void __user * from,unsigned long n)166 _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
167 {
168 	unsigned long res = n;
169 	might_fault();
170 	if (should_fail_usercopy())
171 		goto fail;
172 	if (can_do_masked_user_access())
173 		from = mask_user_address(from);
174 	else {
175 		if (!access_ok(from, n))
176 			goto fail;
177 		/*
178 		 * Ensure that bad access_ok() speculation will not
179 		 * lead to nasty side effects *after* the copy is
180 		 * finished:
181 		 */
182 		barrier_nospec();
183 	}
184 	instrument_copy_from_user_before(to, from, n);
185 	res = raw_copy_from_user(to, from, n);
186 	instrument_copy_from_user_after(to, from, n, res);
187 	if (likely(!res))
188 		return 0;
189 fail:
190 	memset(to + (n - res), 0, res);
191 	return res;
192 }
193 #ifndef INLINE_COPY_FROM_USER
194 extern __must_check unsigned long
195 _copy_from_user(void *, const void __user *, unsigned long);
196 #endif
197 
198 static inline __must_check unsigned long
_inline_copy_to_user(void __user * to,const void * from,unsigned long n)199 _inline_copy_to_user(void __user *to, const void *from, unsigned long n)
200 {
201 	might_fault();
202 	if (should_fail_usercopy())
203 		return n;
204 	if (access_ok(to, n)) {
205 		instrument_copy_to_user(to, from, n);
206 		n = raw_copy_to_user(to, from, n);
207 	}
208 	return n;
209 }
210 #ifndef INLINE_COPY_TO_USER
211 extern __must_check unsigned long
212 _copy_to_user(void __user *, const void *, unsigned long);
213 #endif
214 
215 static __always_inline unsigned long __must_check
copy_from_user(void * to,const void __user * from,unsigned long n)216 copy_from_user(void *to, const void __user *from, unsigned long n)
217 {
218 	if (!check_copy_size(to, n, false))
219 		return n;
220 #ifdef INLINE_COPY_FROM_USER
221 	return _inline_copy_from_user(to, from, n);
222 #else
223 	return _copy_from_user(to, from, n);
224 #endif
225 }
226 
227 static __always_inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)228 copy_to_user(void __user *to, const void *from, unsigned long n)
229 {
230 	if (!check_copy_size(from, n, true))
231 		return n;
232 
233 #ifdef INLINE_COPY_TO_USER
234 	return _inline_copy_to_user(to, from, n);
235 #else
236 	return _copy_to_user(to, from, n);
237 #endif
238 }
239 
240 #ifndef copy_mc_to_kernel
241 /*
242  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
243  * #MC (or arch equivalent) during source read.
244  */
245 static inline unsigned long __must_check
copy_mc_to_kernel(void * dst,const void * src,size_t cnt)246 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
247 {
248 	memcpy(dst, src, cnt);
249 	return 0;
250 }
251 #endif
252 
pagefault_disabled_inc(void)253 static __always_inline void pagefault_disabled_inc(void)
254 {
255 	current->pagefault_disabled++;
256 }
257 
pagefault_disabled_dec(void)258 static __always_inline void pagefault_disabled_dec(void)
259 {
260 	current->pagefault_disabled--;
261 }
262 
263 /*
264  * These routines enable/disable the pagefault handler. If disabled, it will
265  * not take any locks and go straight to the fixup table.
266  *
267  * User access methods will not sleep when called from a pagefault_disabled()
268  * environment.
269  */
pagefault_disable(void)270 static inline void pagefault_disable(void)
271 {
272 	pagefault_disabled_inc();
273 	/*
274 	 * make sure to have issued the store before a pagefault
275 	 * can hit.
276 	 */
277 	barrier();
278 }
279 
pagefault_enable(void)280 static inline void pagefault_enable(void)
281 {
282 	/*
283 	 * make sure to issue those last loads/stores before enabling
284 	 * the pagefault handler again.
285 	 */
286 	barrier();
287 	pagefault_disabled_dec();
288 }
289 
290 /*
291  * Is the pagefault handler disabled? If so, user access methods will not sleep.
292  */
pagefault_disabled(void)293 static inline bool pagefault_disabled(void)
294 {
295 	return current->pagefault_disabled != 0;
296 }
297 
298 /*
299  * The pagefault handler is in general disabled by pagefault_disable() or
300  * when in irq context (via in_atomic()).
301  *
302  * This function should only be used by the fault handlers. Other users should
303  * stick to pagefault_disabled().
304  * Please NEVER use preempt_disable() to disable the fault handler. With
305  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
306  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
307  */
308 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
309 
DEFINE_LOCK_GUARD_0(pagefault,pagefault_disable (),pagefault_enable ())310 DEFINE_LOCK_GUARD_0(pagefault, pagefault_disable(), pagefault_enable())
311 
312 #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
313 
314 /**
315  * probe_subpage_writeable: probe the user range for write faults at sub-page
316  *			    granularity (e.g. arm64 MTE)
317  * @uaddr: start of address range
318  * @size: size of address range
319  *
320  * Returns 0 on success, the number of bytes not probed on fault.
321  *
322  * It is expected that the caller checked for the write permission of each
323  * page in the range either by put_user() or GUP. The architecture port can
324  * implement a more efficient get_user() probing if the same sub-page faults
325  * are triggered by either a read or a write.
326  */
327 static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
328 {
329 	return 0;
330 }
331 
332 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
333 
334 #ifndef ARCH_HAS_NOCACHE_UACCESS
335 
336 static inline __must_check unsigned long
__copy_from_user_inatomic_nocache(void * to,const void __user * from,unsigned long n)337 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
338 				  unsigned long n)
339 {
340 	return __copy_from_user_inatomic(to, from, n);
341 }
342 
343 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
344 
345 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
346 
347 /**
348  * copy_struct_from_user: copy a struct from userspace
349  * @dst:   Destination address, in kernel space. This buffer must be @ksize
350  *         bytes long.
351  * @ksize: Size of @dst struct.
352  * @src:   Source address, in userspace.
353  * @usize: (Alleged) size of @src struct.
354  *
355  * Copies a struct from userspace to kernel space, in a way that guarantees
356  * backwards-compatibility for struct syscall arguments (as long as future
357  * struct extensions are made such that all new fields are *appended* to the
358  * old struct, and zeroed-out new fields have the same meaning as the old
359  * struct).
360  *
361  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
362  * The recommended usage is something like the following:
363  *
364  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
365  *   {
366  *      int err;
367  *      struct foo karg = {};
368  *
369  *      if (usize > PAGE_SIZE)
370  *        return -E2BIG;
371  *      if (usize < FOO_SIZE_VER0)
372  *        return -EINVAL;
373  *
374  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
375  *      if (err)
376  *        return err;
377  *
378  *      // ...
379  *   }
380  *
381  * There are three cases to consider:
382  *  * If @usize == @ksize, then it's copied verbatim.
383  *  * If @usize < @ksize, then the userspace has passed an old struct to a
384  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
385  *    are to be zero-filled.
386  *  * If @usize > @ksize, then the userspace has passed a new struct to an
387  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
388  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
389  *
390  * Returns (in all cases, some data may have been copied):
391  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
392  *  * -EFAULT: access to userspace failed.
393  */
394 static __always_inline __must_check int
copy_struct_from_user(void * dst,size_t ksize,const void __user * src,size_t usize)395 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
396 		      size_t usize)
397 {
398 	size_t size = min(ksize, usize);
399 	size_t rest = max(ksize, usize) - size;
400 
401 	/* Double check if ksize is larger than a known object size. */
402 	if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
403 		return -E2BIG;
404 
405 	/* Deal with trailing bytes. */
406 	if (usize < ksize) {
407 		memset(dst + size, 0, rest);
408 	} else if (usize > ksize) {
409 		int ret = check_zeroed_user(src + size, rest);
410 		if (ret <= 0)
411 			return ret ?: -E2BIG;
412 	}
413 	/* Copy the interoperable parts of the struct. */
414 	if (copy_from_user(dst, src, size))
415 		return -EFAULT;
416 	return 0;
417 }
418 
419 /**
420  * copy_struct_to_user: copy a struct to userspace
421  * @dst:   Destination address, in userspace. This buffer must be @ksize
422  *         bytes long.
423  * @usize: (Alleged) size of @dst struct.
424  * @src:   Source address, in kernel space.
425  * @ksize: Size of @src struct.
426  * @ignored_trailing: Set to %true if there was a non-zero byte in @src that
427  * userspace cannot see because they are using an smaller struct.
428  *
429  * Copies a struct from kernel space to userspace, in a way that guarantees
430  * backwards-compatibility for struct syscall arguments (as long as future
431  * struct extensions are made such that all new fields are *appended* to the
432  * old struct, and zeroed-out new fields have the same meaning as the old
433  * struct).
434  *
435  * Some syscalls may wish to make sure that userspace knows about everything in
436  * the struct, and if there is a non-zero value that userspce doesn't know
437  * about, they want to return an error (such as -EMSGSIZE) or have some other
438  * fallback (such as adding a "you're missing some information" flag). If
439  * @ignored_trailing is non-%NULL, it will be set to %true if there was a
440  * non-zero byte that could not be copied to userspace (ie. was past @usize).
441  *
442  * While unconditionally returning an error in this case is the simplest
443  * solution, for maximum backward compatibility you should try to only return
444  * -EMSGSIZE if the user explicitly requested the data that couldn't be copied.
445  * Note that structure sizes can change due to header changes and simple
446  * recompilations without code changes(!), so if you care about
447  * @ignored_trailing you probably want to make sure that any new field data is
448  * associated with a flag. Otherwise you might assume that a program knows
449  * about data it does not.
450  *
451  * @ksize is just sizeof(*src), and @usize should've been passed by userspace.
452  * The recommended usage is something like the following:
453  *
454  *   SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize)
455  *   {
456  *      int err;
457  *      bool ignored_trailing;
458  *      struct foo karg = {};
459  *
460  *      if (usize > PAGE_SIZE)
461  *		return -E2BIG;
462  *      if (usize < FOO_SIZE_VER0)
463  *		return -EINVAL;
464  *
465  *      // ... modify karg somehow ...
466  *
467  *      err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg),
468  *				  &ignored_trailing);
469  *      if (err)
470  *		return err;
471  *      if (ignored_trailing)
472  *		return -EMSGSIZE:
473  *
474  *      // ...
475  *   }
476  *
477  * There are three cases to consider:
478  *  * If @usize == @ksize, then it's copied verbatim.
479  *  * If @usize < @ksize, then the kernel is trying to pass userspace a newer
480  *    struct than it supports. Thus we only copy the interoperable portions
481  *    (@usize) and ignore the rest (but @ignored_trailing is set to %true if
482  *    any of the trailing (@ksize - @usize) bytes are non-zero).
483  *  * If @usize > @ksize, then the kernel is trying to pass userspace an older
484  *    struct than userspace supports. In order to make sure the
485  *    unknown-to-the-kernel fields don't contain garbage values, we zero the
486  *    trailing (@usize - @ksize) bytes.
487  *
488  * Returns (in all cases, some data may have been copied):
489  *  * -EFAULT: access to userspace failed.
490  */
491 static __always_inline __must_check int
copy_struct_to_user(void __user * dst,size_t usize,const void * src,size_t ksize,bool * ignored_trailing)492 copy_struct_to_user(void __user *dst, size_t usize, const void *src,
493 		    size_t ksize, bool *ignored_trailing)
494 {
495 	size_t size = min(ksize, usize);
496 	size_t rest = max(ksize, usize) - size;
497 
498 	/* Double check if ksize is larger than a known object size. */
499 	if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1)))
500 		return -E2BIG;
501 
502 	/* Deal with trailing bytes. */
503 	if (usize > ksize) {
504 		if (clear_user(dst + size, rest))
505 			return -EFAULT;
506 	}
507 	if (ignored_trailing)
508 		*ignored_trailing = ksize < usize &&
509 			memchr_inv(src + size, 0, rest) != NULL;
510 	/* Copy the interoperable parts of the struct. */
511 	if (copy_to_user(dst, src, size))
512 		return -EFAULT;
513 	return 0;
514 }
515 
516 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
517 
518 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
519 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
520 
521 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
522 long notrace copy_to_user_nofault(void __user *dst, const void *src,
523 		size_t size);
524 
525 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
526 		long count);
527 
528 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
529 		long count);
530 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
531 
532 #ifdef arch_get_kernel_nofault
533 /*
534  * Wrap the architecture implementation so that @label can be outside of a
535  * cleanup() scope. A regular C goto works correctly, but ASM goto does
536  * not. Clang rejects such an attempt, but GCC silently emits buggy code.
537  */
538 #define __get_kernel_nofault(dst, src, type, label)		\
539 do {								\
540 	__label__ local_label;					\
541 	arch_get_kernel_nofault(dst, src, type, local_label);	\
542 	if (0) {						\
543 	local_label:						\
544 		goto label;					\
545 	}							\
546 } while (0)
547 
548 #define __put_kernel_nofault(dst, src, type, label)		\
549 do {								\
550 	__label__ local_label;					\
551 	arch_put_kernel_nofault(dst, src, type, local_label);	\
552 	if (0) {						\
553 	local_label:						\
554 		goto label;					\
555 	}							\
556 } while (0)
557 
558 #elif !defined(__get_kernel_nofault) /* arch_get_kernel_nofault */
559 
560 #define __get_kernel_nofault(dst, src, type, label)	\
561 do {							\
562 	type __user *p = (type __force __user *)(src);	\
563 	type data;					\
564 	if (__get_user(data, p))			\
565 		goto label;				\
566 	*(type *)dst = data;				\
567 } while (0)
568 
569 #define __put_kernel_nofault(dst, src, type, label)	\
570 do {							\
571 	type __user *p = (type __force __user *)(dst);	\
572 	type data = *(type *)src;			\
573 	if (__put_user(data, p))			\
574 		goto label;				\
575 } while (0)
576 
577 #endif  /* !__get_kernel_nofault */
578 
579 /**
580  * get_kernel_nofault(): safely attempt to read from a location
581  * @val: read into this variable
582  * @ptr: address to read from
583  *
584  * Returns 0 on success, or -EFAULT.
585  */
586 #define get_kernel_nofault(val, ptr) ({				\
587 	const typeof(val) *__gk_ptr = (ptr);			\
588 	copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
589 })
590 
591 #ifdef user_access_begin
592 
593 #ifdef arch_unsafe_get_user
594 /*
595  * Wrap the architecture implementation so that @label can be outside of a
596  * cleanup() scope. A regular C goto works correctly, but ASM goto does
597  * not. Clang rejects such an attempt, but GCC silently emits buggy code.
598  *
599  * Some architectures use internal local labels already, but this extra
600  * indirection here is harmless because the compiler optimizes it out
601  * completely in any case. This construct just ensures that the ASM GOTO
602  * target is always in the local scope. The C goto 'label' works correctly
603  * when leaving a cleanup() scope.
604  */
605 #define unsafe_get_user(x, ptr, label)			\
606 do {							\
607 	__label__ local_label;				\
608 	arch_unsafe_get_user(x, ptr, local_label);	\
609 	if (0) {					\
610 	local_label:					\
611 		goto label;				\
612 	}						\
613 } while (0)
614 
615 #define unsafe_put_user(x, ptr, label)			\
616 do {							\
617 	__label__ local_label;				\
618 	arch_unsafe_put_user(x, ptr, local_label);	\
619 	if (0) {					\
620 	local_label:					\
621 		goto label;				\
622 	}						\
623 } while (0)
624 #endif /* arch_unsafe_get_user */
625 
626 #else /* user_access_begin */
627 #define user_access_begin(ptr,len) access_ok(ptr, len)
628 #define user_access_end() do { } while (0)
629 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
630 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
631 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
632 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
633 #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
user_access_save(void)634 static inline unsigned long user_access_save(void) { return 0UL; }
user_access_restore(unsigned long flags)635 static inline void user_access_restore(unsigned long flags) { }
636 #endif /* !user_access_begin */
637 
638 #ifndef user_write_access_begin
639 #define user_write_access_begin user_access_begin
640 #define user_write_access_end user_access_end
641 #endif
642 #ifndef user_read_access_begin
643 #define user_read_access_begin user_access_begin
644 #define user_read_access_end user_access_end
645 #endif
646 
647 /* Define RW variant so the below _mode macro expansion works */
648 #define masked_user_rw_access_begin(u)	masked_user_access_begin(u)
649 #define user_rw_access_begin(u, s)	user_access_begin(u, s)
650 #define user_rw_access_end()		user_access_end()
651 
652 /* Scoped user access */
653 #define USER_ACCESS_GUARD(_mode)				\
654 static __always_inline void __user *				\
655 class_user_##_mode##_begin(void __user *ptr)			\
656 {								\
657 	return ptr;						\
658 }								\
659 								\
660 static __always_inline void					\
661 class_user_##_mode##_end(void __user *ptr)			\
662 {								\
663 	user_##_mode##_access_end();				\
664 }								\
665 								\
666 DEFINE_CLASS(user_ ##_mode## _access, void __user *,		\
667 	     class_user_##_mode##_end(_T),			\
668 	     class_user_##_mode##_begin(ptr), void __user *ptr)	\
669 								\
670 static __always_inline class_user_##_mode##_access_t		\
671 class_user_##_mode##_access_ptr(void __user *scope)		\
672 {								\
673 	return scope;						\
674 }
675 
676 USER_ACCESS_GUARD(read)
677 USER_ACCESS_GUARD(write)
678 USER_ACCESS_GUARD(rw)
679 #undef USER_ACCESS_GUARD
680 
681 /**
682  * __scoped_user_access_begin - Start a scoped user access
683  * @mode:	The mode of the access class (read, write, rw)
684  * @uptr:	The pointer to access user space memory
685  * @size:	Size of the access
686  * @elbl:	Error label to goto when the access region is rejected
687  *
688  * Internal helper for __scoped_user_access(). Don't use directly.
689  */
690 #define __scoped_user_access_begin(mode, uptr, size, elbl)		\
691 ({									\
692 	typeof(uptr) __retptr;						\
693 									\
694 	if (can_do_masked_user_access()) {				\
695 		__retptr = masked_user_##mode##_access_begin(uptr);	\
696 	} else {							\
697 		__retptr = uptr;					\
698 		if (!user_##mode##_access_begin(uptr, size))		\
699 			goto elbl;					\
700 	}								\
701 	__retptr;							\
702 })
703 
704 /**
705  * __scoped_user_access - Open a scope for user access
706  * @mode:	The mode of the access class (read, write, rw)
707  * @uptr:	The pointer to access user space memory
708  * @size:	Size of the access
709  * @elbl:	Error label to goto when the access region is rejected. It
710  *		must be placed outside the scope
711  *
712  * If the user access function inside the scope requires a fault label, it
713  * can use @elbl or a different label outside the scope, which requires
714  * that user access which is implemented with ASM GOTO has been properly
715  * wrapped. See unsafe_get_user() for reference.
716  *
717  *	scoped_user_rw_access(ptr, efault) {
718  *		unsafe_get_user(rval, &ptr->rval, efault);
719  *		unsafe_put_user(wval, &ptr->wval, efault);
720  *	}
721  *	return 0;
722  *  efault:
723  *	return -EFAULT;
724  *
725  * The scope is internally implemented as a autoterminating nested for()
726  * loop, which can be left with 'return', 'break' and 'goto' at any
727  * point.
728  *
729  * When the scope is left user_##@_mode##_access_end() is automatically
730  * invoked.
731  *
732  * When the architecture supports masked user access and the access region
733  * which is determined by @uptr and @size is not a valid user space
734  * address, i.e. < TASK_SIZE, the scope sets the pointer to a faulting user
735  * space address and does not terminate early. This optimizes for the good
736  * case and lets the performance uncritical bad case go through the fault.
737  *
738  * The eventual modification of the pointer is limited to the scope.
739  * Outside of the scope the original pointer value is unmodified, so that
740  * the original pointer value is available for diagnostic purposes in an
741  * out of scope fault path.
742  *
743  * Nesting scoped user access into a user access scope is invalid and fails
744  * the build. Nesting into other guards, e.g. pagefault is safe.
745  *
746  * The masked variant does not check the size of the access and relies on a
747  * mapping hole (e.g. guard page) to catch an out of range pointer, the
748  * first access to user memory inside the scope has to be within
749  * @uptr ... @uptr + PAGE_SIZE - 1
750  *
751  * Don't use directly. Use scoped_masked_user_$MODE_access() instead.
752  */
753 #define __scoped_user_access(mode, uptr, size, elbl)					\
754 for (bool done = false; !done; done = true)						\
755 	for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
756 	     !done; done = true)							\
757 		for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true)	\
758 			/* Force modified pointer usage within the scope */		\
759 			for (const typeof(uptr) uptr = _tmpptr; !done; done = true)
760 
761 /**
762  * scoped_user_read_access_size - Start a scoped user read access with given size
763  * @usrc:	Pointer to the user space address to read from
764  * @size:	Size of the access starting from @usrc
765  * @elbl:	Error label to goto when the access region is rejected
766  *
767  * For further information see __scoped_user_access() above.
768  */
769 #define scoped_user_read_access_size(usrc, size, elbl)		\
770 	__scoped_user_access(read, usrc, size, elbl)
771 
772 /**
773  * scoped_user_read_access - Start a scoped user read access
774  * @usrc:	Pointer to the user space address to read from
775  * @elbl:	Error label to goto when the access region is rejected
776  *
777  * The size of the access starting from @usrc is determined via sizeof(*@usrc)).
778  *
779  * For further information see __scoped_user_access() above.
780  */
781 #define scoped_user_read_access(usrc, elbl)				\
782 	scoped_user_read_access_size(usrc, sizeof(*(usrc)), elbl)
783 
784 /**
785  * scoped_user_write_access_size - Start a scoped user write access with given size
786  * @udst:	Pointer to the user space address to write to
787  * @size:	Size of the access starting from @udst
788  * @elbl:	Error label to goto when the access region is rejected
789  *
790  * For further information see __scoped_user_access() above.
791  */
792 #define scoped_user_write_access_size(udst, size, elbl)			\
793 	__scoped_user_access(write, udst, size, elbl)
794 
795 /**
796  * scoped_user_write_access - Start a scoped user write access
797  * @udst:	Pointer to the user space address to write to
798  * @elbl:	Error label to goto when the access region is rejected
799  *
800  * The size of the access starting from @udst is determined via sizeof(*@udst)).
801  *
802  * For further information see __scoped_user_access() above.
803  */
804 #define scoped_user_write_access(udst, elbl)				\
805 	scoped_user_write_access_size(udst, sizeof(*(udst)), elbl)
806 
807 /**
808  * scoped_user_rw_access_size - Start a scoped user read/write access with given size
809  * @uptr	Pointer to the user space address to read from and write to
810  * @size:	Size of the access starting from @uptr
811  * @elbl:	Error label to goto when the access region is rejected
812  *
813  * For further information see __scoped_user_access() above.
814  */
815 #define scoped_user_rw_access_size(uptr, size, elbl)			\
816 	__scoped_user_access(rw, uptr, size, elbl)
817 
818 /**
819  * scoped_user_rw_access - Start a scoped user read/write access
820  * @uptr	Pointer to the user space address to read from and write to
821  * @elbl:	Error label to goto when the access region is rejected
822  *
823  * The size of the access starting from @uptr is determined via sizeof(*@uptr)).
824  *
825  * For further information see __scoped_user_access() above.
826  */
827 #define scoped_user_rw_access(uptr, elbl)				\
828 	scoped_user_rw_access_size(uptr, sizeof(*(uptr)), elbl)
829 
830 /**
831  * get_user_inline - Read user data inlined
832  * @val:	The variable to store the value read from user memory
833  * @usrc:	Pointer to the user space memory to read from
834  *
835  * Return: 0 if successful, -EFAULT when faulted
836  *
837  * Inlined variant of get_user(). Only use when there is a demonstrable
838  * performance reason.
839  */
840 #define get_user_inline(val, usrc)				\
841 ({								\
842 	__label__ efault;					\
843 	typeof(usrc) _tmpsrc = usrc;				\
844 	int _ret = 0;						\
845 								\
846 	scoped_user_read_access(_tmpsrc, efault)		\
847 		unsafe_get_user(val, _tmpsrc, efault);		\
848 	if (0) {						\
849 	efault:							\
850 		_ret = -EFAULT;					\
851 	}							\
852 	_ret;							\
853 })
854 
855 /**
856  * put_user_inline - Write to user memory inlined
857  * @val:	The value to write
858  * @udst:	Pointer to the user space memory to write to
859  *
860  * Return: 0 if successful, -EFAULT when faulted
861  *
862  * Inlined variant of put_user(). Only use when there is a demonstrable
863  * performance reason.
864  */
865 #define put_user_inline(val, udst)				\
866 ({								\
867 	__label__ efault;					\
868 	typeof(udst) _tmpdst = udst;				\
869 	int _ret = 0;						\
870 								\
871 	scoped_user_write_access(_tmpdst, efault)		\
872 		unsafe_put_user(val, _tmpdst, efault);		\
873 	if (0) {						\
874 	efault:							\
875 		_ret = -EFAULT;					\
876 	}							\
877 	_ret;							\
878 })
879 
880 #ifdef CONFIG_HARDENED_USERCOPY
881 void __noreturn usercopy_abort(const char *name, const char *detail,
882 			       bool to_user, unsigned long offset,
883 			       unsigned long len);
884 #endif
885 
886 #endif		/* __LINUX_UACCESS_H__ */
887