xref: /freebsd/sys/compat/linuxkpi/common/include/linux/uaccess.h (revision b197d4b893974c9eb4d7b38704c6d5c486235d6f)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 François Tigeot
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 #ifndef	_LINUXKPI_LINUX_UACCESS_H_
34 #define	_LINUXKPI_LINUX_UACCESS_H_
35 
36 #include <sys/param.h>
37 #include <sys/lock.h>
38 #include <sys/proc.h>
39 
40 #include <vm/vm.h>
41 #include <vm/vm_extern.h>
42 
43 #include <linux/compiler.h>
44 
45 #define	VERIFY_READ	VM_PROT_READ
46 #define	VERIFY_WRITE	VM_PROT_WRITE
47 
48 #define	__get_user(_x, _p) ({					\
49 	int __err;						\
50 	__typeof(*(_p)) __x;					\
51 	__err = linux_copyin((_p), &(__x), sizeof(*(_p)));	\
52 	(_x) = __x;						\
53 	__err;							\
54 })
55 
56 #define	__put_user(_x, _p) ({				\
57 	__typeof(*(_p)) __x = (_x);			\
58 	linux_copyout(&(__x), (_p), sizeof(*(_p)));	\
59 })
60 #define	get_user(_x, _p)	linux_copyin((_p), &(_x), sizeof(*(_p)))
61 #define	put_user(_x, _p)	__put_user(_x, _p)
62 #define	clear_user(...)		linux_clear_user(__VA_ARGS__)
63 
64 #define	access_ok(a,b)		linux_access_ok(a,b)
65 
66 extern int linux_copyin(const void *uaddr, void *kaddr, size_t len);
67 extern int linux_copyout(const void *kaddr, void *uaddr, size_t len);
68 extern size_t linux_clear_user(void *uaddr, size_t len);
69 extern int linux_access_ok(const void *uaddr, size_t len);
70 
71 /*
72  * NOTE: Each pagefault_disable() call must have a corresponding
73  * pagefault_enable() call in the same scope. The former creates a new
74  * block and defines a temporary variable, and the latter uses the
75  * temporary variable and closes the block. Failure to balance the
76  * calls will result in a compile-time error.
77  */
78 #define	pagefault_disable(void) do {		\
79 	int __saved_pflags =			\
80 	    vm_fault_disable_pagefaults()
81 
82 #define	pagefault_enable(void)				\
83 	vm_fault_enable_pagefaults(__saved_pflags);	\
84 } while (0)
85 
86 static inline bool
87 pagefault_disabled(void)
88 {
89 	return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
90 }
91 
92 static inline int
93 __copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
94 {
95 
96 	return (copyout_nofault(from, to, n) != 0 ? n : 0);
97 }
98 #define	__copy_to_user_inatomic_nocache(to, from, n)	\
99 	__copy_to_user_inatomic((to), (from), (n))
100 
101 static inline unsigned long
102 __copy_from_user_inatomic(void *to, const void __user *from,
103     unsigned long n)
104 {
105 	/*
106 	 * XXXKIB.  Equivalent Linux function is implemented using
107 	 * MOVNTI for aligned moves.  For unaligned head and tail,
108 	 * normal move is performed.  As such, it is not incorrect, if
109 	 * only somewhat slower, to use normal copyin.  All uses
110 	 * except shmem_pwrite_fast() have the destination mapped WC.
111 	 */
112 	return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0));
113 }
114 #define	__copy_from_user_inatomic_nocache(to, from, n)	\
115 	__copy_from_user_inatomic((to), (from), (n))
116 
117 #endif					/* _LINUXKPI_LINUX_UACCESS_H_ */
118