1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 François Tigeot
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef _LINUXKPI_LINUX_UACCESS_H_
32 #define _LINUXKPI_LINUX_UACCESS_H_
33
34 #include <sys/param.h>
35 #include <sys/lock.h>
36 #include <sys/proc.h>
37
38 #include <vm/vm.h>
39 #include <vm/vm_extern.h>
40
41 #include <linux/compiler.h>
42
43 #define VERIFY_READ VM_PROT_READ
44 #define VERIFY_WRITE VM_PROT_WRITE
45
46 #define __get_user(_x, _p) ({ \
47 int __err; \
48 __typeof(*(_p)) __x; \
49 __err = linux_copyin((_p), &(__x), sizeof(*(_p))); \
50 (_x) = __x; \
51 __err; \
52 })
53
54 #define __put_user(_x, _p) ({ \
55 __typeof(*(_p)) __x = (_x); \
56 linux_copyout(&(__x), (_p), sizeof(*(_p))); \
57 })
58 #define get_user(_x, _p) linux_copyin((_p), &(_x), sizeof(*(_p)))
59 #define put_user(_x, _p) __put_user(_x, _p)
60 #define clear_user(...) linux_clear_user(__VA_ARGS__)
61
62 #define access_ok(a,b) linux_access_ok(a,b)
63
64 extern int linux_copyin(const void *uaddr, void *kaddr, size_t len);
65 extern int linux_copyout(const void *kaddr, void *uaddr, size_t len);
66 extern size_t linux_clear_user(void *uaddr, size_t len);
67 extern int linux_access_ok(const void *uaddr, size_t len);
68
69 /*
70 * NOTE: Each pagefault_disable() call must have a corresponding
71 * pagefault_enable() call in the same scope. The former creates a new
72 * block and defines a temporary variable, and the latter uses the
73 * temporary variable and closes the block. Failure to balance the
74 * calls will result in a compile-time error.
75 */
76 #define pagefault_disable(void) do { \
77 int __saved_pflags = \
78 vm_fault_disable_pagefaults()
79
80 #define pagefault_enable(void) \
81 vm_fault_enable_pagefaults(__saved_pflags); \
82 } while (0)
83
84 static inline bool
pagefault_disabled(void)85 pagefault_disabled(void)
86 {
87 return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
88 }
89
90 static inline int
__copy_to_user_inatomic(void __user * to,const void * from,unsigned n)91 __copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
92 {
93
94 return (copyout_nofault(from, to, n) != 0 ? n : 0);
95 }
96 #define __copy_to_user_inatomic_nocache(to, from, n) \
97 __copy_to_user_inatomic((to), (from), (n))
98
99 static inline unsigned long
__copy_from_user_inatomic(void * to,const void __user * from,unsigned long n)100 __copy_from_user_inatomic(void *to, const void __user *from,
101 unsigned long n)
102 {
103 /*
104 * XXXKIB. Equivalent Linux function is implemented using
105 * MOVNTI for aligned moves. For unaligned head and tail,
106 * normal move is performed. As such, it is not incorrect, if
107 * only somewhat slower, to use normal copyin. All uses
108 * except shmem_pwrite_fast() have the destination mapped WC.
109 */
110 return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0));
111 }
112 #define __copy_from_user_inatomic_nocache(to, from, n) \
113 __copy_from_user_inatomic((to), (from), (n))
114
115 #endif /* _LINUXKPI_LINUX_UACCESS_H_ */
116