1 /* 2 * Based on arch/arm/include/asm/uaccess.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_UACCESS_H 19 #define __ASM_UACCESS_H 20 21 #include <asm/alternative.h> 22 #include <asm/kernel-pgtable.h> 23 #include <asm/sysreg.h> 24 25 /* 26 * User space memory access functions 27 */ 28 #include <linux/bitops.h> 29 #include <linux/kasan-checks.h> 30 #include <linux/string.h> 31 32 #include <asm/cpufeature.h> 33 #include <asm/ptrace.h> 34 #include <asm/memory.h> 35 #include <asm/extable.h> 36 37 #define get_fs() (current_thread_info()->addr_limit) 38 39 static inline void set_fs(mm_segment_t fs) 40 { 41 current_thread_info()->addr_limit = fs; 42 43 /* 44 * Prevent a mispredicted conditional call to set_fs from forwarding 45 * the wrong address limit to access_ok under speculation. 46 */ 47 spec_bar(); 48 49 /* On user-mode return, check fs is correct */ 50 set_thread_flag(TIF_FSCHECK); 51 52 /* 53 * Enable/disable UAO so that copy_to_user() etc can access 54 * kernel memory with the unprivileged instructions. 55 */ 56 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) 57 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); 58 else 59 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, 60 CONFIG_ARM64_UAO)); 61 } 62 63 #define segment_eq(a, b) ((a) == (b)) 64 65 /* 66 * Test whether a block of memory is a valid user space address. 67 * Returns 1 if the range is valid, 0 otherwise. 68 * 69 * This is equivalent to the following test: 70 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 71 */ 72 static inline unsigned long __range_ok(const void __user *addr, unsigned long size) 73 { 74 unsigned long ret, limit = current_thread_info()->addr_limit; 75 76 __chk_user_ptr(addr); 77 asm volatile( 78 // A + B <= C + 1 for all A,B,C, in four easy steps: 79 // 1: X = A + B; X' = X % 2^64 80 " adds %0, %3, %2\n" 81 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 82 " csel %1, xzr, %1, hi\n" 83 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' 84 // to compensate for the carry flag being set in step 4. For 85 // X > 2^64, X' merely has to remain nonzero, which it does. 86 " csinv %0, %0, xzr, cc\n" 87 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 88 // comes from the carry in being clear. Otherwise, we are 89 // testing X' - C == 0, subject to the previous adjustments. 90 " sbcs xzr, %0, %1\n" 91 " cset %0, ls\n" 92 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); 93 94 return ret; 95 } 96 97 #define access_ok(addr, size) __range_ok(addr, size) 98 #define user_addr_max get_fs 99 100 #define _ASM_EXTABLE(from, to) \ 101 " .pushsection __ex_table, \"a\"\n" \ 102 " .align 3\n" \ 103 " .long (" #from " - .), (" #to " - .)\n" \ 104 " .popsection\n" 105 106 /* 107 * User access enabling/disabling. 108 */ 109 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 110 static inline void __uaccess_ttbr0_disable(void) 111 { 112 unsigned long flags, ttbr; 113 114 local_irq_save(flags); 115 ttbr = read_sysreg(ttbr1_el1); 116 ttbr &= ~TTBR_ASID_MASK; 117 /* reserved_ttbr0 placed before swapper_pg_dir */ 118 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); 119 isb(); 120 /* Set reserved ASID */ 121 write_sysreg(ttbr, ttbr1_el1); 122 isb(); 123 local_irq_restore(flags); 124 } 125 126 static inline void __uaccess_ttbr0_enable(void) 127 { 128 unsigned long flags, ttbr0, ttbr1; 129 130 /* 131 * Disable interrupts to avoid preemption between reading the 'ttbr0' 132 * variable and the MSR. A context switch could trigger an ASID 133 * roll-over and an update of 'ttbr0'. 134 */ 135 local_irq_save(flags); 136 ttbr0 = READ_ONCE(current_thread_info()->ttbr0); 137 138 /* Restore active ASID */ 139 ttbr1 = read_sysreg(ttbr1_el1); 140 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ 141 ttbr1 |= ttbr0 & TTBR_ASID_MASK; 142 write_sysreg(ttbr1, ttbr1_el1); 143 isb(); 144 145 /* Restore user page table */ 146 write_sysreg(ttbr0, ttbr0_el1); 147 isb(); 148 local_irq_restore(flags); 149 } 150 151 static inline bool uaccess_ttbr0_disable(void) 152 { 153 if (!system_uses_ttbr0_pan()) 154 return false; 155 __uaccess_ttbr0_disable(); 156 return true; 157 } 158 159 static inline bool uaccess_ttbr0_enable(void) 160 { 161 if (!system_uses_ttbr0_pan()) 162 return false; 163 __uaccess_ttbr0_enable(); 164 return true; 165 } 166 #else 167 static inline bool uaccess_ttbr0_disable(void) 168 { 169 return false; 170 } 171 172 static inline bool uaccess_ttbr0_enable(void) 173 { 174 return false; 175 } 176 #endif 177 178 static inline void __uaccess_disable_hw_pan(void) 179 { 180 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, 181 CONFIG_ARM64_PAN)); 182 } 183 184 static inline void __uaccess_enable_hw_pan(void) 185 { 186 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, 187 CONFIG_ARM64_PAN)); 188 } 189 190 #define __uaccess_disable(alt) \ 191 do { \ 192 if (!uaccess_ttbr0_disable()) \ 193 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 194 CONFIG_ARM64_PAN)); \ 195 } while (0) 196 197 #define __uaccess_enable(alt) \ 198 do { \ 199 if (!uaccess_ttbr0_enable()) \ 200 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 201 CONFIG_ARM64_PAN)); \ 202 } while (0) 203 204 static inline void uaccess_disable(void) 205 { 206 __uaccess_disable(ARM64_HAS_PAN); 207 } 208 209 static inline void uaccess_enable(void) 210 { 211 __uaccess_enable(ARM64_HAS_PAN); 212 } 213 214 /* 215 * These functions are no-ops when UAO is present. 216 */ 217 static inline void uaccess_disable_not_uao(void) 218 { 219 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); 220 } 221 222 static inline void uaccess_enable_not_uao(void) 223 { 224 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); 225 } 226 227 /* 228 * Sanitise a uaccess pointer such that it becomes NULL if above the 229 * current addr_limit. 230 */ 231 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) 232 static inline void __user *__uaccess_mask_ptr(const void __user *ptr) 233 { 234 void __user *safe_ptr; 235 236 asm volatile( 237 " bics xzr, %1, %2\n" 238 " csel %0, %1, xzr, eq\n" 239 : "=&r" (safe_ptr) 240 : "r" (ptr), "r" (current_thread_info()->addr_limit) 241 : "cc"); 242 243 csdb(); 244 return safe_ptr; 245 } 246 247 /* 248 * The "__xxx" versions of the user access functions do not verify the address 249 * space - it must have been done previously with a separate "access_ok()" 250 * call. 251 * 252 * The "__xxx_error" versions set the third argument to -EFAULT if an error 253 * occurs, and leave it unchanged on success. 254 */ 255 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 256 asm volatile( \ 257 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 258 alt_instr " " reg "1, [%2]\n", feature) \ 259 "2:\n" \ 260 " .section .fixup, \"ax\"\n" \ 261 " .align 2\n" \ 262 "3: mov %w0, %3\n" \ 263 " mov %1, #0\n" \ 264 " b 2b\n" \ 265 " .previous\n" \ 266 _ASM_EXTABLE(1b, 3b) \ 267 : "+r" (err), "=&r" (x) \ 268 : "r" (addr), "i" (-EFAULT)) 269 270 #define __raw_get_user(x, ptr, err) \ 271 do { \ 272 unsigned long __gu_val; \ 273 __chk_user_ptr(ptr); \ 274 uaccess_enable_not_uao(); \ 275 switch (sizeof(*(ptr))) { \ 276 case 1: \ 277 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 278 (err), ARM64_HAS_UAO); \ 279 break; \ 280 case 2: \ 281 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 282 (err), ARM64_HAS_UAO); \ 283 break; \ 284 case 4: \ 285 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 286 (err), ARM64_HAS_UAO); \ 287 break; \ 288 case 8: \ 289 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ 290 (err), ARM64_HAS_UAO); \ 291 break; \ 292 default: \ 293 BUILD_BUG(); \ 294 } \ 295 uaccess_disable_not_uao(); \ 296 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 297 } while (0) 298 299 #define __get_user_error(x, ptr, err) \ 300 do { \ 301 __typeof__(*(ptr)) __user *__p = (ptr); \ 302 might_fault(); \ 303 if (access_ok(__p, sizeof(*__p))) { \ 304 __p = uaccess_mask_ptr(__p); \ 305 __raw_get_user((x), __p, (err)); \ 306 } else { \ 307 (x) = 0; (err) = -EFAULT; \ 308 } \ 309 } while (0) 310 311 #define __get_user(x, ptr) \ 312 ({ \ 313 int __gu_err = 0; \ 314 __get_user_error((x), (ptr), __gu_err); \ 315 __gu_err; \ 316 }) 317 318 #define get_user __get_user 319 320 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 321 asm volatile( \ 322 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 323 alt_instr " " reg "1, [%2]\n", feature) \ 324 "2:\n" \ 325 " .section .fixup,\"ax\"\n" \ 326 " .align 2\n" \ 327 "3: mov %w0, %3\n" \ 328 " b 2b\n" \ 329 " .previous\n" \ 330 _ASM_EXTABLE(1b, 3b) \ 331 : "+r" (err) \ 332 : "r" (x), "r" (addr), "i" (-EFAULT)) 333 334 #define __raw_put_user(x, ptr, err) \ 335 do { \ 336 __typeof__(*(ptr)) __pu_val = (x); \ 337 __chk_user_ptr(ptr); \ 338 uaccess_enable_not_uao(); \ 339 switch (sizeof(*(ptr))) { \ 340 case 1: \ 341 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 342 (err), ARM64_HAS_UAO); \ 343 break; \ 344 case 2: \ 345 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 346 (err), ARM64_HAS_UAO); \ 347 break; \ 348 case 4: \ 349 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ 350 (err), ARM64_HAS_UAO); \ 351 break; \ 352 case 8: \ 353 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ 354 (err), ARM64_HAS_UAO); \ 355 break; \ 356 default: \ 357 BUILD_BUG(); \ 358 } \ 359 uaccess_disable_not_uao(); \ 360 } while (0) 361 362 #define __put_user_error(x, ptr, err) \ 363 do { \ 364 __typeof__(*(ptr)) __user *__p = (ptr); \ 365 might_fault(); \ 366 if (access_ok(__p, sizeof(*__p))) { \ 367 __p = uaccess_mask_ptr(__p); \ 368 __raw_put_user((x), __p, (err)); \ 369 } else { \ 370 (err) = -EFAULT; \ 371 } \ 372 } while (0) 373 374 #define __put_user(x, ptr) \ 375 ({ \ 376 int __pu_err = 0; \ 377 __put_user_error((x), (ptr), __pu_err); \ 378 __pu_err; \ 379 }) 380 381 #define put_user __put_user 382 383 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 384 #define raw_copy_from_user(to, from, n) \ 385 ({ \ 386 __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ 387 }) 388 389 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 390 #define raw_copy_to_user(to, from, n) \ 391 ({ \ 392 __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ 393 }) 394 395 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); 396 #define raw_copy_in_user(to, from, n) \ 397 ({ \ 398 __arch_copy_in_user(__uaccess_mask_ptr(to), \ 399 __uaccess_mask_ptr(from), (n)); \ 400 }) 401 402 #define INLINE_COPY_TO_USER 403 #define INLINE_COPY_FROM_USER 404 405 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); 406 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) 407 { 408 if (access_ok(to, n)) 409 n = __arch_clear_user(__uaccess_mask_ptr(to), n); 410 return n; 411 } 412 #define clear_user __clear_user 413 414 extern long strncpy_from_user(char *dest, const char __user *src, long count); 415 416 extern __must_check long strnlen_user(const char __user *str, long n); 417 418 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 419 struct page; 420 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); 421 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 422 423 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 424 { 425 kasan_check_write(dst, size); 426 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); 427 } 428 #endif 429 430 #endif /* __ASM_UACCESS_H */ 431