1 /* 2 * Based on arch/arm/include/asm/uaccess.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_UACCESS_H 19 #define __ASM_UACCESS_H 20 21 #include <asm/alternative.h> 22 #include <asm/kernel-pgtable.h> 23 #include <asm/sysreg.h> 24 25 /* 26 * User space memory access functions 27 */ 28 #include <linux/bitops.h> 29 #include <linux/kasan-checks.h> 30 #include <linux/string.h> 31 32 #include <asm/cpufeature.h> 33 #include <asm/ptrace.h> 34 #include <asm/memory.h> 35 #include <asm/compiler.h> 36 37 /* 38 * The exception table consists of pairs of relative offsets: the first 39 * is the relative offset to an instruction that is allowed to fault, 40 * and the second is the relative offset at which the program should 41 * continue. No registers are modified, so it is entirely up to the 42 * continuation code to figure out what to do. 43 * 44 * All the routines below use bits of fixup code that are out of line 45 * with the main instruction path. This means when everything is well, 46 * we don't even have to jump over them. Further, they do not intrude 47 * on our cache or tlb entries. 48 */ 49 50 struct exception_table_entry 51 { 52 int insn, fixup; 53 }; 54 55 #define ARCH_HAS_RELATIVE_EXTABLE 56 57 extern int fixup_exception(struct pt_regs *regs); 58 59 #define KERNEL_DS (-1UL) 60 #define get_ds() (KERNEL_DS) 61 62 #define USER_DS TASK_SIZE_64 63 #define get_fs() (current_thread_info()->addr_limit) 64 65 static inline void set_fs(mm_segment_t fs) 66 { 67 current_thread_info()->addr_limit = fs; 68 69 /* 70 * Enable/disable UAO so that copy_to_user() etc can access 71 * kernel memory with the unprivileged instructions. 72 */ 73 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) 74 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); 75 else 76 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, 77 CONFIG_ARM64_UAO)); 78 } 79 80 #define segment_eq(a, b) ((a) == (b)) 81 82 /* 83 * Test whether a block of memory is a valid user space address. 84 * Returns 1 if the range is valid, 0 otherwise. 85 * 86 * This is equivalent to the following test: 87 * (u65)addr + (u65)size <= current->addr_limit 88 * 89 * This needs 65-bit arithmetic. 90 */ 91 #define __range_ok(addr, size) \ 92 ({ \ 93 unsigned long flag, roksum; \ 94 __chk_user_ptr(addr); \ 95 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ 96 : "=&r" (flag), "=&r" (roksum) \ 97 : "1" (addr), "Ir" (size), \ 98 "r" (current_thread_info()->addr_limit) \ 99 : "cc"); \ 100 flag; \ 101 }) 102 103 /* 104 * When dealing with data aborts or instruction traps we may end up with 105 * a tagged userland pointer. Clear the tag to get a sane pointer to pass 106 * on to access_ok(), for instance. 107 */ 108 #define untagged_addr(addr) sign_extend64(addr, 55) 109 110 #define access_ok(type, addr, size) __range_ok(addr, size) 111 #define user_addr_max get_fs 112 113 #define _ASM_EXTABLE(from, to) \ 114 " .pushsection __ex_table, \"a\"\n" \ 115 " .align 3\n" \ 116 " .long (" #from " - .), (" #to " - .)\n" \ 117 " .popsection\n" 118 119 /* 120 * User access enabling/disabling. 121 */ 122 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 123 static inline void __uaccess_ttbr0_disable(void) 124 { 125 unsigned long ttbr; 126 127 /* reserved_ttbr0 placed at the end of swapper_pg_dir */ 128 ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; 129 write_sysreg(ttbr, ttbr0_el1); 130 isb(); 131 } 132 133 static inline void __uaccess_ttbr0_enable(void) 134 { 135 unsigned long flags; 136 137 /* 138 * Disable interrupts to avoid preemption between reading the 'ttbr0' 139 * variable and the MSR. A context switch could trigger an ASID 140 * roll-over and an update of 'ttbr0'. 141 */ 142 local_irq_save(flags); 143 write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); 144 isb(); 145 local_irq_restore(flags); 146 } 147 148 static inline bool uaccess_ttbr0_disable(void) 149 { 150 if (!system_uses_ttbr0_pan()) 151 return false; 152 __uaccess_ttbr0_disable(); 153 return true; 154 } 155 156 static inline bool uaccess_ttbr0_enable(void) 157 { 158 if (!system_uses_ttbr0_pan()) 159 return false; 160 __uaccess_ttbr0_enable(); 161 return true; 162 } 163 #else 164 static inline bool uaccess_ttbr0_disable(void) 165 { 166 return false; 167 } 168 169 static inline bool uaccess_ttbr0_enable(void) 170 { 171 return false; 172 } 173 #endif 174 175 #define __uaccess_disable(alt) \ 176 do { \ 177 if (!uaccess_ttbr0_disable()) \ 178 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 179 CONFIG_ARM64_PAN)); \ 180 } while (0) 181 182 #define __uaccess_enable(alt) \ 183 do { \ 184 if (!uaccess_ttbr0_enable()) \ 185 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 186 CONFIG_ARM64_PAN)); \ 187 } while (0) 188 189 static inline void uaccess_disable(void) 190 { 191 __uaccess_disable(ARM64_HAS_PAN); 192 } 193 194 static inline void uaccess_enable(void) 195 { 196 __uaccess_enable(ARM64_HAS_PAN); 197 } 198 199 /* 200 * These functions are no-ops when UAO is present. 201 */ 202 static inline void uaccess_disable_not_uao(void) 203 { 204 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); 205 } 206 207 static inline void uaccess_enable_not_uao(void) 208 { 209 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); 210 } 211 212 /* 213 * The "__xxx" versions of the user access functions do not verify the address 214 * space - it must have been done previously with a separate "access_ok()" 215 * call. 216 * 217 * The "__xxx_error" versions set the third argument to -EFAULT if an error 218 * occurs, and leave it unchanged on success. 219 */ 220 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 221 asm volatile( \ 222 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 223 alt_instr " " reg "1, [%2]\n", feature) \ 224 "2:\n" \ 225 " .section .fixup, \"ax\"\n" \ 226 " .align 2\n" \ 227 "3: mov %w0, %3\n" \ 228 " mov %1, #0\n" \ 229 " b 2b\n" \ 230 " .previous\n" \ 231 _ASM_EXTABLE(1b, 3b) \ 232 : "+r" (err), "=&r" (x) \ 233 : "r" (addr), "i" (-EFAULT)) 234 235 #define __get_user_err(x, ptr, err) \ 236 do { \ 237 unsigned long __gu_val; \ 238 __chk_user_ptr(ptr); \ 239 uaccess_enable_not_uao(); \ 240 switch (sizeof(*(ptr))) { \ 241 case 1: \ 242 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 243 (err), ARM64_HAS_UAO); \ 244 break; \ 245 case 2: \ 246 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 247 (err), ARM64_HAS_UAO); \ 248 break; \ 249 case 4: \ 250 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 251 (err), ARM64_HAS_UAO); \ 252 break; \ 253 case 8: \ 254 __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \ 255 (err), ARM64_HAS_UAO); \ 256 break; \ 257 default: \ 258 BUILD_BUG(); \ 259 } \ 260 uaccess_disable_not_uao(); \ 261 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 262 } while (0) 263 264 #define __get_user(x, ptr) \ 265 ({ \ 266 int __gu_err = 0; \ 267 __get_user_err((x), (ptr), __gu_err); \ 268 __gu_err; \ 269 }) 270 271 #define __get_user_error(x, ptr, err) \ 272 ({ \ 273 __get_user_err((x), (ptr), (err)); \ 274 (void)0; \ 275 }) 276 277 #define __get_user_unaligned __get_user 278 279 #define get_user(x, ptr) \ 280 ({ \ 281 __typeof__(*(ptr)) __user *__p = (ptr); \ 282 might_fault(); \ 283 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ 284 __get_user((x), __p) : \ 285 ((x) = 0, -EFAULT); \ 286 }) 287 288 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 289 asm volatile( \ 290 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 291 alt_instr " " reg "1, [%2]\n", feature) \ 292 "2:\n" \ 293 " .section .fixup,\"ax\"\n" \ 294 " .align 2\n" \ 295 "3: mov %w0, %3\n" \ 296 " b 2b\n" \ 297 " .previous\n" \ 298 _ASM_EXTABLE(1b, 3b) \ 299 : "+r" (err) \ 300 : "r" (x), "r" (addr), "i" (-EFAULT)) 301 302 #define __put_user_err(x, ptr, err) \ 303 do { \ 304 __typeof__(*(ptr)) __pu_val = (x); \ 305 __chk_user_ptr(ptr); \ 306 uaccess_enable_not_uao(); \ 307 switch (sizeof(*(ptr))) { \ 308 case 1: \ 309 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 310 (err), ARM64_HAS_UAO); \ 311 break; \ 312 case 2: \ 313 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 314 (err), ARM64_HAS_UAO); \ 315 break; \ 316 case 4: \ 317 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ 318 (err), ARM64_HAS_UAO); \ 319 break; \ 320 case 8: \ 321 __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \ 322 (err), ARM64_HAS_UAO); \ 323 break; \ 324 default: \ 325 BUILD_BUG(); \ 326 } \ 327 uaccess_disable_not_uao(); \ 328 } while (0) 329 330 #define __put_user(x, ptr) \ 331 ({ \ 332 int __pu_err = 0; \ 333 __put_user_err((x), (ptr), __pu_err); \ 334 __pu_err; \ 335 }) 336 337 #define __put_user_error(x, ptr, err) \ 338 ({ \ 339 __put_user_err((x), (ptr), (err)); \ 340 (void)0; \ 341 }) 342 343 #define __put_user_unaligned __put_user 344 345 #define put_user(x, ptr) \ 346 ({ \ 347 __typeof__(*(ptr)) __user *__p = (ptr); \ 348 might_fault(); \ 349 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ 350 __put_user((x), __p) : \ 351 -EFAULT; \ 352 }) 353 354 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 355 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 356 extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); 357 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 358 359 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) 360 { 361 kasan_check_write(to, n); 362 check_object_size(to, n, false); 363 return __arch_copy_from_user(to, from, n); 364 } 365 366 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) 367 { 368 kasan_check_read(from, n); 369 check_object_size(from, n, true); 370 return __arch_copy_to_user(to, from, n); 371 } 372 373 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 374 { 375 unsigned long res = n; 376 kasan_check_write(to, n); 377 check_object_size(to, n, false); 378 379 if (access_ok(VERIFY_READ, from, n)) { 380 res = __arch_copy_from_user(to, from, n); 381 } 382 if (unlikely(res)) 383 memset(to + (n - res), 0, res); 384 return res; 385 } 386 387 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) 388 { 389 kasan_check_read(from, n); 390 check_object_size(from, n, true); 391 392 if (access_ok(VERIFY_WRITE, to, n)) { 393 n = __arch_copy_to_user(to, from, n); 394 } 395 return n; 396 } 397 398 static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) 399 { 400 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) 401 n = __copy_in_user(to, from, n); 402 return n; 403 } 404 405 #define __copy_to_user_inatomic __copy_to_user 406 #define __copy_from_user_inatomic __copy_from_user 407 408 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 409 { 410 if (access_ok(VERIFY_WRITE, to, n)) 411 n = __clear_user(to, n); 412 return n; 413 } 414 415 extern long strncpy_from_user(char *dest, const char __user *src, long count); 416 417 extern __must_check long strlen_user(const char __user *str); 418 extern __must_check long strnlen_user(const char __user *str, long n); 419 420 #endif /* __ASM_UACCESS_H */ 421