1 /* 2 * Based on arch/arm/include/asm/uaccess.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_UACCESS_H 19 #define __ASM_UACCESS_H 20 21 #include <asm/alternative.h> 22 #include <asm/kernel-pgtable.h> 23 #include <asm/sysreg.h> 24 25 #ifndef __ASSEMBLY__ 26 27 /* 28 * User space memory access functions 29 */ 30 #include <linux/bitops.h> 31 #include <linux/kasan-checks.h> 32 #include <linux/string.h> 33 #include <linux/thread_info.h> 34 35 #include <asm/cpufeature.h> 36 #include <asm/ptrace.h> 37 #include <asm/errno.h> 38 #include <asm/memory.h> 39 #include <asm/compiler.h> 40 41 #define VERIFY_READ 0 42 #define VERIFY_WRITE 1 43 44 /* 45 * The exception table consists of pairs of relative offsets: the first 46 * is the relative offset to an instruction that is allowed to fault, 47 * and the second is the relative offset at which the program should 48 * continue. No registers are modified, so it is entirely up to the 49 * continuation code to figure out what to do. 50 * 51 * All the routines below use bits of fixup code that are out of line 52 * with the main instruction path. This means when everything is well, 53 * we don't even have to jump over them. Further, they do not intrude 54 * on our cache or tlb entries. 55 */ 56 57 struct exception_table_entry 58 { 59 int insn, fixup; 60 }; 61 62 #define ARCH_HAS_RELATIVE_EXTABLE 63 64 extern int fixup_exception(struct pt_regs *regs); 65 66 #define KERNEL_DS (-1UL) 67 #define get_ds() (KERNEL_DS) 68 69 #define USER_DS TASK_SIZE_64 70 #define get_fs() (current_thread_info()->addr_limit) 71 72 static inline void set_fs(mm_segment_t fs) 73 { 74 current_thread_info()->addr_limit = fs; 75 76 /* 77 * Enable/disable UAO so that copy_to_user() etc can access 78 * kernel memory with the unprivileged instructions. 79 */ 80 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) 81 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); 82 else 83 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, 84 CONFIG_ARM64_UAO)); 85 } 86 87 #define segment_eq(a, b) ((a) == (b)) 88 89 /* 90 * Test whether a block of memory is a valid user space address. 91 * Returns 1 if the range is valid, 0 otherwise. 92 * 93 * This is equivalent to the following test: 94 * (u65)addr + (u65)size <= current->addr_limit 95 * 96 * This needs 65-bit arithmetic. 97 */ 98 #define __range_ok(addr, size) \ 99 ({ \ 100 unsigned long flag, roksum; \ 101 __chk_user_ptr(addr); \ 102 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ 103 : "=&r" (flag), "=&r" (roksum) \ 104 : "1" (addr), "Ir" (size), \ 105 "r" (current_thread_info()->addr_limit) \ 106 : "cc"); \ 107 flag; \ 108 }) 109 110 /* 111 * When dealing with data aborts or instruction traps we may end up with 112 * a tagged userland pointer. Clear the tag to get a sane pointer to pass 113 * on to access_ok(), for instance. 114 */ 115 #define untagged_addr(addr) sign_extend64(addr, 55) 116 117 #define access_ok(type, addr, size) __range_ok(addr, size) 118 #define user_addr_max get_fs 119 120 #define _ASM_EXTABLE(from, to) \ 121 " .pushsection __ex_table, \"a\"\n" \ 122 " .align 3\n" \ 123 " .long (" #from " - .), (" #to " - .)\n" \ 124 " .popsection\n" 125 126 /* 127 * User access enabling/disabling. 128 */ 129 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 130 static inline void __uaccess_ttbr0_disable(void) 131 { 132 unsigned long ttbr; 133 134 /* reserved_ttbr0 placed at the end of swapper_pg_dir */ 135 ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; 136 write_sysreg(ttbr, ttbr0_el1); 137 isb(); 138 } 139 140 static inline void __uaccess_ttbr0_enable(void) 141 { 142 unsigned long flags; 143 144 /* 145 * Disable interrupts to avoid preemption between reading the 'ttbr0' 146 * variable and the MSR. A context switch could trigger an ASID 147 * roll-over and an update of 'ttbr0'. 148 */ 149 local_irq_save(flags); 150 write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); 151 isb(); 152 local_irq_restore(flags); 153 } 154 155 static inline bool uaccess_ttbr0_disable(void) 156 { 157 if (!system_uses_ttbr0_pan()) 158 return false; 159 __uaccess_ttbr0_disable(); 160 return true; 161 } 162 163 static inline bool uaccess_ttbr0_enable(void) 164 { 165 if (!system_uses_ttbr0_pan()) 166 return false; 167 __uaccess_ttbr0_enable(); 168 return true; 169 } 170 #else 171 static inline bool uaccess_ttbr0_disable(void) 172 { 173 return false; 174 } 175 176 static inline bool uaccess_ttbr0_enable(void) 177 { 178 return false; 179 } 180 #endif 181 182 #define __uaccess_disable(alt) \ 183 do { \ 184 if (!uaccess_ttbr0_disable()) \ 185 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 186 CONFIG_ARM64_PAN)); \ 187 } while (0) 188 189 #define __uaccess_enable(alt) \ 190 do { \ 191 if (!uaccess_ttbr0_enable()) \ 192 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 193 CONFIG_ARM64_PAN)); \ 194 } while (0) 195 196 static inline void uaccess_disable(void) 197 { 198 __uaccess_disable(ARM64_HAS_PAN); 199 } 200 201 static inline void uaccess_enable(void) 202 { 203 __uaccess_enable(ARM64_HAS_PAN); 204 } 205 206 /* 207 * These functions are no-ops when UAO is present. 208 */ 209 static inline void uaccess_disable_not_uao(void) 210 { 211 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); 212 } 213 214 static inline void uaccess_enable_not_uao(void) 215 { 216 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); 217 } 218 219 /* 220 * The "__xxx" versions of the user access functions do not verify the address 221 * space - it must have been done previously with a separate "access_ok()" 222 * call. 223 * 224 * The "__xxx_error" versions set the third argument to -EFAULT if an error 225 * occurs, and leave it unchanged on success. 226 */ 227 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 228 asm volatile( \ 229 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 230 alt_instr " " reg "1, [%2]\n", feature) \ 231 "2:\n" \ 232 " .section .fixup, \"ax\"\n" \ 233 " .align 2\n" \ 234 "3: mov %w0, %3\n" \ 235 " mov %1, #0\n" \ 236 " b 2b\n" \ 237 " .previous\n" \ 238 _ASM_EXTABLE(1b, 3b) \ 239 : "+r" (err), "=&r" (x) \ 240 : "r" (addr), "i" (-EFAULT)) 241 242 #define __get_user_err(x, ptr, err) \ 243 do { \ 244 unsigned long __gu_val; \ 245 __chk_user_ptr(ptr); \ 246 uaccess_enable_not_uao(); \ 247 switch (sizeof(*(ptr))) { \ 248 case 1: \ 249 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 250 (err), ARM64_HAS_UAO); \ 251 break; \ 252 case 2: \ 253 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 254 (err), ARM64_HAS_UAO); \ 255 break; \ 256 case 4: \ 257 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 258 (err), ARM64_HAS_UAO); \ 259 break; \ 260 case 8: \ 261 __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \ 262 (err), ARM64_HAS_UAO); \ 263 break; \ 264 default: \ 265 BUILD_BUG(); \ 266 } \ 267 uaccess_disable_not_uao(); \ 268 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 269 } while (0) 270 271 #define __get_user(x, ptr) \ 272 ({ \ 273 int __gu_err = 0; \ 274 __get_user_err((x), (ptr), __gu_err); \ 275 __gu_err; \ 276 }) 277 278 #define __get_user_error(x, ptr, err) \ 279 ({ \ 280 __get_user_err((x), (ptr), (err)); \ 281 (void)0; \ 282 }) 283 284 #define __get_user_unaligned __get_user 285 286 #define get_user(x, ptr) \ 287 ({ \ 288 __typeof__(*(ptr)) __user *__p = (ptr); \ 289 might_fault(); \ 290 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ 291 __get_user((x), __p) : \ 292 ((x) = 0, -EFAULT); \ 293 }) 294 295 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 296 asm volatile( \ 297 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 298 alt_instr " " reg "1, [%2]\n", feature) \ 299 "2:\n" \ 300 " .section .fixup,\"ax\"\n" \ 301 " .align 2\n" \ 302 "3: mov %w0, %3\n" \ 303 " b 2b\n" \ 304 " .previous\n" \ 305 _ASM_EXTABLE(1b, 3b) \ 306 : "+r" (err) \ 307 : "r" (x), "r" (addr), "i" (-EFAULT)) 308 309 #define __put_user_err(x, ptr, err) \ 310 do { \ 311 __typeof__(*(ptr)) __pu_val = (x); \ 312 __chk_user_ptr(ptr); \ 313 uaccess_enable_not_uao(); \ 314 switch (sizeof(*(ptr))) { \ 315 case 1: \ 316 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 317 (err), ARM64_HAS_UAO); \ 318 break; \ 319 case 2: \ 320 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 321 (err), ARM64_HAS_UAO); \ 322 break; \ 323 case 4: \ 324 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ 325 (err), ARM64_HAS_UAO); \ 326 break; \ 327 case 8: \ 328 __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \ 329 (err), ARM64_HAS_UAO); \ 330 break; \ 331 default: \ 332 BUILD_BUG(); \ 333 } \ 334 uaccess_disable_not_uao(); \ 335 } while (0) 336 337 #define __put_user(x, ptr) \ 338 ({ \ 339 int __pu_err = 0; \ 340 __put_user_err((x), (ptr), __pu_err); \ 341 __pu_err; \ 342 }) 343 344 #define __put_user_error(x, ptr, err) \ 345 ({ \ 346 __put_user_err((x), (ptr), (err)); \ 347 (void)0; \ 348 }) 349 350 #define __put_user_unaligned __put_user 351 352 #define put_user(x, ptr) \ 353 ({ \ 354 __typeof__(*(ptr)) __user *__p = (ptr); \ 355 might_fault(); \ 356 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ 357 __put_user((x), __p) : \ 358 -EFAULT; \ 359 }) 360 361 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 362 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 363 extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); 364 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 365 366 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) 367 { 368 kasan_check_write(to, n); 369 check_object_size(to, n, false); 370 return __arch_copy_from_user(to, from, n); 371 } 372 373 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) 374 { 375 kasan_check_read(from, n); 376 check_object_size(from, n, true); 377 return __arch_copy_to_user(to, from, n); 378 } 379 380 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 381 { 382 unsigned long res = n; 383 kasan_check_write(to, n); 384 385 if (access_ok(VERIFY_READ, from, n)) { 386 check_object_size(to, n, false); 387 res = __arch_copy_from_user(to, from, n); 388 } 389 if (unlikely(res)) 390 memset(to + (n - res), 0, res); 391 return res; 392 } 393 394 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) 395 { 396 kasan_check_read(from, n); 397 398 if (access_ok(VERIFY_WRITE, to, n)) { 399 check_object_size(from, n, true); 400 n = __arch_copy_to_user(to, from, n); 401 } 402 return n; 403 } 404 405 static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) 406 { 407 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) 408 n = __copy_in_user(to, from, n); 409 return n; 410 } 411 412 #define __copy_to_user_inatomic __copy_to_user 413 #define __copy_from_user_inatomic __copy_from_user 414 415 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 416 { 417 if (access_ok(VERIFY_WRITE, to, n)) 418 n = __clear_user(to, n); 419 return n; 420 } 421 422 extern long strncpy_from_user(char *dest, const char __user *src, long count); 423 424 extern __must_check long strlen_user(const char __user *str); 425 extern __must_check long strnlen_user(const char __user *str, long n); 426 427 #else /* __ASSEMBLY__ */ 428 429 #include <asm/assembler.h> 430 431 /* 432 * User access enabling/disabling macros. 433 */ 434 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 435 .macro __uaccess_ttbr0_disable, tmp1 436 mrs \tmp1, ttbr1_el1 // swapper_pg_dir 437 add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir 438 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 439 isb 440 .endm 441 442 .macro __uaccess_ttbr0_enable, tmp1 443 get_thread_info \tmp1 444 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 445 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 446 isb 447 .endm 448 449 .macro uaccess_ttbr0_disable, tmp1 450 alternative_if_not ARM64_HAS_PAN 451 __uaccess_ttbr0_disable \tmp1 452 alternative_else_nop_endif 453 .endm 454 455 .macro uaccess_ttbr0_enable, tmp1, tmp2 456 alternative_if_not ARM64_HAS_PAN 457 save_and_disable_irq \tmp2 // avoid preemption 458 __uaccess_ttbr0_enable \tmp1 459 restore_irq \tmp2 460 alternative_else_nop_endif 461 .endm 462 #else 463 .macro uaccess_ttbr0_disable, tmp1 464 .endm 465 466 .macro uaccess_ttbr0_enable, tmp1, tmp2 467 .endm 468 #endif 469 470 /* 471 * These macros are no-ops when UAO is present. 472 */ 473 .macro uaccess_disable_not_uao, tmp1 474 uaccess_ttbr0_disable \tmp1 475 alternative_if ARM64_ALT_PAN_NOT_UAO 476 SET_PSTATE_PAN(1) 477 alternative_else_nop_endif 478 .endm 479 480 .macro uaccess_enable_not_uao, tmp1, tmp2 481 uaccess_ttbr0_enable \tmp1, \tmp2 482 alternative_if ARM64_ALT_PAN_NOT_UAO 483 SET_PSTATE_PAN(0) 484 alternative_else_nop_endif 485 .endm 486 487 #endif /* __ASSEMBLY__ */ 488 489 #endif /* __ASM_UACCESS_H */ 490