1 /* 2 * Based on arch/arm/include/asm/uaccess.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_UACCESS_H 19 #define __ASM_UACCESS_H 20 21 #include <asm/alternative.h> 22 #include <asm/kernel-pgtable.h> 23 #include <asm/sysreg.h> 24 25 /* 26 * User space memory access functions 27 */ 28 #include <linux/bitops.h> 29 #include <linux/kasan-checks.h> 30 #include <linux/string.h> 31 32 #include <asm/cpufeature.h> 33 #include <asm/ptrace.h> 34 #include <asm/memory.h> 35 #include <asm/extable.h> 36 37 #define get_ds() (KERNEL_DS) 38 #define get_fs() (current_thread_info()->addr_limit) 39 40 static inline void set_fs(mm_segment_t fs) 41 { 42 current_thread_info()->addr_limit = fs; 43 44 /* 45 * Prevent a mispredicted conditional call to set_fs from forwarding 46 * the wrong address limit to access_ok under speculation. 47 */ 48 dsb(nsh); 49 isb(); 50 51 /* On user-mode return, check fs is correct */ 52 set_thread_flag(TIF_FSCHECK); 53 54 /* 55 * Enable/disable UAO so that copy_to_user() etc can access 56 * kernel memory with the unprivileged instructions. 57 */ 58 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) 59 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); 60 else 61 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, 62 CONFIG_ARM64_UAO)); 63 } 64 65 #define segment_eq(a, b) ((a) == (b)) 66 67 /* 68 * Test whether a block of memory is a valid user space address. 69 * Returns 1 if the range is valid, 0 otherwise. 70 * 71 * This is equivalent to the following test: 72 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 73 */ 74 static inline unsigned long __range_ok(const void __user *addr, unsigned long size) 75 { 76 unsigned long ret, limit = current_thread_info()->addr_limit; 77 78 __chk_user_ptr(addr); 79 asm volatile( 80 // A + B <= C + 1 for all A,B,C, in four easy steps: 81 // 1: X = A + B; X' = X % 2^64 82 " adds %0, %3, %2\n" 83 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 84 " csel %1, xzr, %1, hi\n" 85 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' 86 // to compensate for the carry flag being set in step 4. For 87 // X > 2^64, X' merely has to remain nonzero, which it does. 88 " csinv %0, %0, xzr, cc\n" 89 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 90 // comes from the carry in being clear. Otherwise, we are 91 // testing X' - C == 0, subject to the previous adjustments. 92 " sbcs xzr, %0, %1\n" 93 " cset %0, ls\n" 94 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); 95 96 return ret; 97 } 98 99 /* 100 * When dealing with data aborts, watchpoints, or instruction traps we may end 101 * up with a tagged userland pointer. Clear the tag to get a sane pointer to 102 * pass on to access_ok(), for instance. 103 */ 104 #define untagged_addr(addr) sign_extend64(addr, 55) 105 106 #define access_ok(type, addr, size) __range_ok(addr, size) 107 #define user_addr_max get_fs 108 109 #define _ASM_EXTABLE(from, to) \ 110 " .pushsection __ex_table, \"a\"\n" \ 111 " .align 3\n" \ 112 " .long (" #from " - .), (" #to " - .)\n" \ 113 " .popsection\n" 114 115 /* 116 * User access enabling/disabling. 117 */ 118 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 119 static inline void __uaccess_ttbr0_disable(void) 120 { 121 unsigned long flags, ttbr; 122 123 local_irq_save(flags); 124 ttbr = read_sysreg(ttbr1_el1); 125 ttbr &= ~TTBR_ASID_MASK; 126 /* reserved_ttbr0 placed before swapper_pg_dir */ 127 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); 128 isb(); 129 /* Set reserved ASID */ 130 write_sysreg(ttbr, ttbr1_el1); 131 isb(); 132 local_irq_restore(flags); 133 } 134 135 static inline void __uaccess_ttbr0_enable(void) 136 { 137 unsigned long flags, ttbr0, ttbr1; 138 139 /* 140 * Disable interrupts to avoid preemption between reading the 'ttbr0' 141 * variable and the MSR. A context switch could trigger an ASID 142 * roll-over and an update of 'ttbr0'. 143 */ 144 local_irq_save(flags); 145 ttbr0 = READ_ONCE(current_thread_info()->ttbr0); 146 147 /* Restore active ASID */ 148 ttbr1 = read_sysreg(ttbr1_el1); 149 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ 150 ttbr1 |= ttbr0 & TTBR_ASID_MASK; 151 write_sysreg(ttbr1, ttbr1_el1); 152 isb(); 153 154 /* Restore user page table */ 155 write_sysreg(ttbr0, ttbr0_el1); 156 isb(); 157 local_irq_restore(flags); 158 } 159 160 static inline bool uaccess_ttbr0_disable(void) 161 { 162 if (!system_uses_ttbr0_pan()) 163 return false; 164 __uaccess_ttbr0_disable(); 165 return true; 166 } 167 168 static inline bool uaccess_ttbr0_enable(void) 169 { 170 if (!system_uses_ttbr0_pan()) 171 return false; 172 __uaccess_ttbr0_enable(); 173 return true; 174 } 175 #else 176 static inline bool uaccess_ttbr0_disable(void) 177 { 178 return false; 179 } 180 181 static inline bool uaccess_ttbr0_enable(void) 182 { 183 return false; 184 } 185 #endif 186 187 static inline void __uaccess_disable_hw_pan(void) 188 { 189 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, 190 CONFIG_ARM64_PAN)); 191 } 192 193 static inline void __uaccess_enable_hw_pan(void) 194 { 195 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, 196 CONFIG_ARM64_PAN)); 197 } 198 199 #define __uaccess_disable(alt) \ 200 do { \ 201 if (!uaccess_ttbr0_disable()) \ 202 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 203 CONFIG_ARM64_PAN)); \ 204 } while (0) 205 206 #define __uaccess_enable(alt) \ 207 do { \ 208 if (!uaccess_ttbr0_enable()) \ 209 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 210 CONFIG_ARM64_PAN)); \ 211 } while (0) 212 213 static inline void uaccess_disable(void) 214 { 215 __uaccess_disable(ARM64_HAS_PAN); 216 } 217 218 static inline void uaccess_enable(void) 219 { 220 __uaccess_enable(ARM64_HAS_PAN); 221 } 222 223 /* 224 * These functions are no-ops when UAO is present. 225 */ 226 static inline void uaccess_disable_not_uao(void) 227 { 228 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); 229 } 230 231 static inline void uaccess_enable_not_uao(void) 232 { 233 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); 234 } 235 236 /* 237 * Sanitise a uaccess pointer such that it becomes NULL if above the 238 * current addr_limit. 239 */ 240 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) 241 static inline void __user *__uaccess_mask_ptr(const void __user *ptr) 242 { 243 void __user *safe_ptr; 244 245 asm volatile( 246 " bics xzr, %1, %2\n" 247 " csel %0, %1, xzr, eq\n" 248 : "=&r" (safe_ptr) 249 : "r" (ptr), "r" (current_thread_info()->addr_limit) 250 : "cc"); 251 252 csdb(); 253 return safe_ptr; 254 } 255 256 /* 257 * The "__xxx" versions of the user access functions do not verify the address 258 * space - it must have been done previously with a separate "access_ok()" 259 * call. 260 * 261 * The "__xxx_error" versions set the third argument to -EFAULT if an error 262 * occurs, and leave it unchanged on success. 263 */ 264 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 265 asm volatile( \ 266 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 267 alt_instr " " reg "1, [%2]\n", feature) \ 268 "2:\n" \ 269 " .section .fixup, \"ax\"\n" \ 270 " .align 2\n" \ 271 "3: mov %w0, %3\n" \ 272 " mov %1, #0\n" \ 273 " b 2b\n" \ 274 " .previous\n" \ 275 _ASM_EXTABLE(1b, 3b) \ 276 : "+r" (err), "=&r" (x) \ 277 : "r" (addr), "i" (-EFAULT)) 278 279 #define __get_user_err(x, ptr, err) \ 280 do { \ 281 unsigned long __gu_val; \ 282 __chk_user_ptr(ptr); \ 283 uaccess_enable_not_uao(); \ 284 switch (sizeof(*(ptr))) { \ 285 case 1: \ 286 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 287 (err), ARM64_HAS_UAO); \ 288 break; \ 289 case 2: \ 290 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 291 (err), ARM64_HAS_UAO); \ 292 break; \ 293 case 4: \ 294 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 295 (err), ARM64_HAS_UAO); \ 296 break; \ 297 case 8: \ 298 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ 299 (err), ARM64_HAS_UAO); \ 300 break; \ 301 default: \ 302 BUILD_BUG(); \ 303 } \ 304 uaccess_disable_not_uao(); \ 305 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 306 } while (0) 307 308 #define __get_user_check(x, ptr, err) \ 309 ({ \ 310 __typeof__(*(ptr)) __user *__p = (ptr); \ 311 might_fault(); \ 312 if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ 313 __p = uaccess_mask_ptr(__p); \ 314 __get_user_err((x), __p, (err)); \ 315 } else { \ 316 (x) = 0; (err) = -EFAULT; \ 317 } \ 318 }) 319 320 #define __get_user_error(x, ptr, err) \ 321 ({ \ 322 __get_user_check((x), (ptr), (err)); \ 323 (void)0; \ 324 }) 325 326 #define __get_user(x, ptr) \ 327 ({ \ 328 int __gu_err = 0; \ 329 __get_user_check((x), (ptr), __gu_err); \ 330 __gu_err; \ 331 }) 332 333 #define get_user __get_user 334 335 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 336 asm volatile( \ 337 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 338 alt_instr " " reg "1, [%2]\n", feature) \ 339 "2:\n" \ 340 " .section .fixup,\"ax\"\n" \ 341 " .align 2\n" \ 342 "3: mov %w0, %3\n" \ 343 " b 2b\n" \ 344 " .previous\n" \ 345 _ASM_EXTABLE(1b, 3b) \ 346 : "+r" (err) \ 347 : "r" (x), "r" (addr), "i" (-EFAULT)) 348 349 #define __put_user_err(x, ptr, err) \ 350 do { \ 351 __typeof__(*(ptr)) __pu_val = (x); \ 352 __chk_user_ptr(ptr); \ 353 uaccess_enable_not_uao(); \ 354 switch (sizeof(*(ptr))) { \ 355 case 1: \ 356 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 357 (err), ARM64_HAS_UAO); \ 358 break; \ 359 case 2: \ 360 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 361 (err), ARM64_HAS_UAO); \ 362 break; \ 363 case 4: \ 364 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ 365 (err), ARM64_HAS_UAO); \ 366 break; \ 367 case 8: \ 368 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ 369 (err), ARM64_HAS_UAO); \ 370 break; \ 371 default: \ 372 BUILD_BUG(); \ 373 } \ 374 uaccess_disable_not_uao(); \ 375 } while (0) 376 377 #define __put_user_check(x, ptr, err) \ 378 ({ \ 379 __typeof__(*(ptr)) __user *__p = (ptr); \ 380 might_fault(); \ 381 if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ 382 __p = uaccess_mask_ptr(__p); \ 383 __put_user_err((x), __p, (err)); \ 384 } else { \ 385 (err) = -EFAULT; \ 386 } \ 387 }) 388 389 #define __put_user_error(x, ptr, err) \ 390 ({ \ 391 __put_user_check((x), (ptr), (err)); \ 392 (void)0; \ 393 }) 394 395 #define __put_user(x, ptr) \ 396 ({ \ 397 int __pu_err = 0; \ 398 __put_user_check((x), (ptr), __pu_err); \ 399 __pu_err; \ 400 }) 401 402 #define put_user __put_user 403 404 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 405 #define raw_copy_from_user(to, from, n) \ 406 ({ \ 407 __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ 408 }) 409 410 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 411 #define raw_copy_to_user(to, from, n) \ 412 ({ \ 413 __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ 414 }) 415 416 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); 417 #define raw_copy_in_user(to, from, n) \ 418 ({ \ 419 __arch_copy_in_user(__uaccess_mask_ptr(to), \ 420 __uaccess_mask_ptr(from), (n)); \ 421 }) 422 423 #define INLINE_COPY_TO_USER 424 #define INLINE_COPY_FROM_USER 425 426 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); 427 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) 428 { 429 if (access_ok(VERIFY_WRITE, to, n)) 430 n = __arch_clear_user(__uaccess_mask_ptr(to), n); 431 return n; 432 } 433 #define clear_user __clear_user 434 435 extern long strncpy_from_user(char *dest, const char __user *src, long count); 436 437 extern __must_check long strnlen_user(const char __user *str, long n); 438 439 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 440 struct page; 441 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); 442 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 443 444 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 445 { 446 kasan_check_write(dst, size); 447 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); 448 } 449 #endif 450 451 #endif /* __ASM_UACCESS_H */ 452