1 /* 2 * Based on arch/arm/include/asm/uaccess.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_UACCESS_H 19 #define __ASM_UACCESS_H 20 21 /* 22 * User space memory access functions 23 */ 24 #include <linux/string.h> 25 #include <linux/thread_info.h> 26 27 #include <asm/alternative.h> 28 #include <asm/cpufeature.h> 29 #include <asm/ptrace.h> 30 #include <asm/sysreg.h> 31 #include <asm/errno.h> 32 #include <asm/memory.h> 33 #include <asm/compiler.h> 34 35 #define VERIFY_READ 0 36 #define VERIFY_WRITE 1 37 38 /* 39 * The exception table consists of pairs of addresses: the first is the 40 * address of an instruction that is allowed to fault, and the second is 41 * the address at which the program should continue. No registers are 42 * modified, so it is entirely up to the continuation code to figure out 43 * what to do. 44 * 45 * All the routines below use bits of fixup code that are out of line 46 * with the main instruction path. This means when everything is well, 47 * we don't even have to jump over them. Further, they do not intrude 48 * on our cache or tlb entries. 49 */ 50 51 struct exception_table_entry 52 { 53 unsigned long insn, fixup; 54 }; 55 56 extern int fixup_exception(struct pt_regs *regs); 57 58 #define KERNEL_DS (-1UL) 59 #define get_ds() (KERNEL_DS) 60 61 #define USER_DS TASK_SIZE_64 62 #define get_fs() (current_thread_info()->addr_limit) 63 64 static inline void set_fs(mm_segment_t fs) 65 { 66 current_thread_info()->addr_limit = fs; 67 } 68 69 #define segment_eq(a, b) ((a) == (b)) 70 71 /* 72 * Return 1 if addr < current->addr_limit, 0 otherwise. 73 */ 74 #define __addr_ok(addr) \ 75 ({ \ 76 unsigned long flag; \ 77 asm("cmp %1, %0; cset %0, lo" \ 78 : "=&r" (flag) \ 79 : "r" (addr), "0" (current_thread_info()->addr_limit) \ 80 : "cc"); \ 81 flag; \ 82 }) 83 84 /* 85 * Test whether a block of memory is a valid user space address. 86 * Returns 1 if the range is valid, 0 otherwise. 87 * 88 * This is equivalent to the following test: 89 * (u65)addr + (u65)size <= current->addr_limit 90 * 91 * This needs 65-bit arithmetic. 92 */ 93 #define __range_ok(addr, size) \ 94 ({ \ 95 unsigned long flag, roksum; \ 96 __chk_user_ptr(addr); \ 97 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ 98 : "=&r" (flag), "=&r" (roksum) \ 99 : "1" (addr), "Ir" (size), \ 100 "r" (current_thread_info()->addr_limit) \ 101 : "cc"); \ 102 flag; \ 103 }) 104 105 #define access_ok(type, addr, size) __range_ok(addr, size) 106 #define user_addr_max get_fs 107 108 /* 109 * The "__xxx" versions of the user access functions do not verify the address 110 * space - it must have been done previously with a separate "access_ok()" 111 * call. 112 * 113 * The "__xxx_error" versions set the third argument to -EFAULT if an error 114 * occurs, and leave it unchanged on success. 115 */ 116 #define __get_user_asm(instr, reg, x, addr, err) \ 117 asm volatile( \ 118 "1: " instr " " reg "1, [%2]\n" \ 119 "2:\n" \ 120 " .section .fixup, \"ax\"\n" \ 121 " .align 2\n" \ 122 "3: mov %w0, %3\n" \ 123 " mov %1, #0\n" \ 124 " b 2b\n" \ 125 " .previous\n" \ 126 " .section __ex_table,\"a\"\n" \ 127 " .align 3\n" \ 128 " .quad 1b, 3b\n" \ 129 " .previous" \ 130 : "+r" (err), "=&r" (x) \ 131 : "r" (addr), "i" (-EFAULT)) 132 133 #define __get_user_err(x, ptr, err) \ 134 do { \ 135 unsigned long __gu_val; \ 136 __chk_user_ptr(ptr); \ 137 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 138 CONFIG_ARM64_PAN)); \ 139 switch (sizeof(*(ptr))) { \ 140 case 1: \ 141 __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \ 142 break; \ 143 case 2: \ 144 __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \ 145 break; \ 146 case 4: \ 147 __get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \ 148 break; \ 149 case 8: \ 150 __get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \ 151 break; \ 152 default: \ 153 BUILD_BUG(); \ 154 } \ 155 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 156 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 157 CONFIG_ARM64_PAN)); \ 158 } while (0) 159 160 #define __get_user(x, ptr) \ 161 ({ \ 162 int __gu_err = 0; \ 163 __get_user_err((x), (ptr), __gu_err); \ 164 __gu_err; \ 165 }) 166 167 #define __get_user_error(x, ptr, err) \ 168 ({ \ 169 __get_user_err((x), (ptr), (err)); \ 170 (void)0; \ 171 }) 172 173 #define __get_user_unaligned __get_user 174 175 #define get_user(x, ptr) \ 176 ({ \ 177 __typeof__(*(ptr)) __user *__p = (ptr); \ 178 might_fault(); \ 179 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ 180 __get_user((x), __p) : \ 181 ((x) = 0, -EFAULT); \ 182 }) 183 184 #define __put_user_asm(instr, reg, x, addr, err) \ 185 asm volatile( \ 186 "1: " instr " " reg "1, [%2]\n" \ 187 "2:\n" \ 188 " .section .fixup,\"ax\"\n" \ 189 " .align 2\n" \ 190 "3: mov %w0, %3\n" \ 191 " b 2b\n" \ 192 " .previous\n" \ 193 " .section __ex_table,\"a\"\n" \ 194 " .align 3\n" \ 195 " .quad 1b, 3b\n" \ 196 " .previous" \ 197 : "+r" (err) \ 198 : "r" (x), "r" (addr), "i" (-EFAULT)) 199 200 #define __put_user_err(x, ptr, err) \ 201 do { \ 202 __typeof__(*(ptr)) __pu_val = (x); \ 203 __chk_user_ptr(ptr); \ 204 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 205 CONFIG_ARM64_PAN)); \ 206 switch (sizeof(*(ptr))) { \ 207 case 1: \ 208 __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \ 209 break; \ 210 case 2: \ 211 __put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \ 212 break; \ 213 case 4: \ 214 __put_user_asm("str", "%w", __pu_val, (ptr), (err)); \ 215 break; \ 216 case 8: \ 217 __put_user_asm("str", "%", __pu_val, (ptr), (err)); \ 218 break; \ 219 default: \ 220 BUILD_BUG(); \ 221 } \ 222 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 223 CONFIG_ARM64_PAN)); \ 224 } while (0) 225 226 #define __put_user(x, ptr) \ 227 ({ \ 228 int __pu_err = 0; \ 229 __put_user_err((x), (ptr), __pu_err); \ 230 __pu_err; \ 231 }) 232 233 #define __put_user_error(x, ptr, err) \ 234 ({ \ 235 __put_user_err((x), (ptr), (err)); \ 236 (void)0; \ 237 }) 238 239 #define __put_user_unaligned __put_user 240 241 #define put_user(x, ptr) \ 242 ({ \ 243 __typeof__(*(ptr)) __user *__p = (ptr); \ 244 might_fault(); \ 245 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ 246 __put_user((x), __p) : \ 247 -EFAULT; \ 248 }) 249 250 extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); 251 extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); 252 extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); 253 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 254 255 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 256 { 257 if (access_ok(VERIFY_READ, from, n)) 258 n = __copy_from_user(to, from, n); 259 else /* security hole - plug it */ 260 memset(to, 0, n); 261 return n; 262 } 263 264 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) 265 { 266 if (access_ok(VERIFY_WRITE, to, n)) 267 n = __copy_to_user(to, from, n); 268 return n; 269 } 270 271 static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) 272 { 273 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) 274 n = __copy_in_user(to, from, n); 275 return n; 276 } 277 278 #define __copy_to_user_inatomic __copy_to_user 279 #define __copy_from_user_inatomic __copy_from_user 280 281 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 282 { 283 if (access_ok(VERIFY_WRITE, to, n)) 284 n = __clear_user(to, n); 285 return n; 286 } 287 288 extern long strncpy_from_user(char *dest, const char __user *src, long count); 289 290 extern __must_check long strlen_user(const char __user *str); 291 extern __must_check long strnlen_user(const char __user *str, long n); 292 293 #endif /* __ASM_UACCESS_H */ 294