1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * 5 * This file was copied from include/asm-generic/uaccess.h 6 */ 7 8 #ifndef _ASM_RISCV_UACCESS_H 9 #define _ASM_RISCV_UACCESS_H 10 11 #include <asm/asm-extable.h> 12 #include <asm/cpufeature.h> 13 #include <asm/pgtable.h> /* for TASK_SIZE */ 14 15 #ifdef CONFIG_RISCV_ISA_SUPM 16 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr) 17 { 18 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) { 19 u8 pmlen = mm->context.pmlen; 20 21 /* Virtual addresses are sign-extended; physical addresses are zero-extended. */ 22 if (IS_ENABLED(CONFIG_MMU)) 23 return (long)(addr << pmlen) >> pmlen; 24 else 25 return (addr << pmlen) >> pmlen; 26 } 27 28 return addr; 29 } 30 31 #define untagged_addr(addr) ({ \ 32 unsigned long __addr = (__force unsigned long)(addr); \ 33 (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \ 34 }) 35 36 #define untagged_addr_remote(mm, addr) ({ \ 37 unsigned long __addr = (__force unsigned long)(addr); \ 38 mmap_assert_locked(mm); \ 39 (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ 40 }) 41 42 #define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size)) 43 #else 44 #define untagged_addr(addr) (addr) 45 #endif 46 47 /* 48 * User space memory access functions 49 */ 50 #ifdef CONFIG_MMU 51 #include <linux/errno.h> 52 #include <linux/compiler.h> 53 #include <linux/thread_info.h> 54 #include <asm/byteorder.h> 55 #include <asm/extable.h> 56 #include <asm/asm.h> 57 #include <asm-generic/access_ok.h> 58 59 #define __enable_user_access() \ 60 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory") 61 #define __disable_user_access() \ 62 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory") 63 64 /* 65 * The exception table consists of pairs of addresses: the first is the 66 * address of an instruction that is allowed to fault, and the second is 67 * the address at which the program should continue. No registers are 68 * modified, so it is entirely up to the continuation code to figure out 69 * what to do. 70 * 71 * All the routines below use bits of fixup code that are out of line 72 * with the main instruction path. This means when everything is well, 73 * we don't even have to jump over them. Further, they do not intrude 74 * on our cache or tlb entries. 75 */ 76 77 #define __LSW 0 78 #define __MSW 1 79 80 /* 81 * The "__xxx" versions of the user access functions do not verify the address 82 * space - it must have been done previously with a separate "access_ok()" 83 * call. 84 */ 85 86 #define __get_user_asm(insn, x, ptr, err) \ 87 do { \ 88 __typeof__(x) __x; \ 89 __asm__ __volatile__ ( \ 90 "1:\n" \ 91 " " insn " %1, %2\n" \ 92 "2:\n" \ 93 _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \ 94 : "+r" (err), "=&r" (__x) \ 95 : "m" (*(ptr))); \ 96 (x) = __x; \ 97 } while (0) 98 99 #ifdef CONFIG_64BIT 100 #define __get_user_8(x, ptr, err) \ 101 __get_user_asm("ld", x, ptr, err) 102 #else /* !CONFIG_64BIT */ 103 #define __get_user_8(x, ptr, err) \ 104 do { \ 105 u32 __user *__ptr = (u32 __user *)(ptr); \ 106 u32 __lo, __hi; \ 107 __asm__ __volatile__ ( \ 108 "1:\n" \ 109 " lw %1, %3\n" \ 110 "2:\n" \ 111 " lw %2, %4\n" \ 112 "3:\n" \ 113 _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \ 114 _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \ 115 : "+r" (err), "=&r" (__lo), "=r" (__hi) \ 116 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \ 117 if (err) \ 118 __hi = 0; \ 119 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \ 120 (((u64)__hi << 32) | __lo))); \ 121 } while (0) 122 #endif /* CONFIG_64BIT */ 123 124 #define __get_user_nocheck(x, __gu_ptr, __gu_err) \ 125 do { \ 126 switch (sizeof(*__gu_ptr)) { \ 127 case 1: \ 128 __get_user_asm("lb", (x), __gu_ptr, __gu_err); \ 129 break; \ 130 case 2: \ 131 __get_user_asm("lh", (x), __gu_ptr, __gu_err); \ 132 break; \ 133 case 4: \ 134 __get_user_asm("lw", (x), __gu_ptr, __gu_err); \ 135 break; \ 136 case 8: \ 137 __get_user_8((x), __gu_ptr, __gu_err); \ 138 break; \ 139 default: \ 140 BUILD_BUG(); \ 141 } \ 142 } while (0) 143 144 /** 145 * __get_user: - Get a simple variable from user space, with less checking. 146 * @x: Variable to store result. 147 * @ptr: Source address, in user space. 148 * 149 * Context: User context only. This function may sleep. 150 * 151 * This macro copies a single simple variable from user space to kernel 152 * space. It supports simple types like char and int, but not larger 153 * data types like structures or arrays. 154 * 155 * @ptr must have pointer-to-simple-variable type, and the result of 156 * dereferencing @ptr must be assignable to @x without a cast. 157 * 158 * Caller must check the pointer with access_ok() before calling this 159 * function. 160 * 161 * Returns zero on success, or -EFAULT on error. 162 * On error, the variable @x is set to zero. 163 */ 164 #define __get_user(x, ptr) \ 165 ({ \ 166 const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ 167 long __gu_err = 0; \ 168 \ 169 __chk_user_ptr(__gu_ptr); \ 170 \ 171 __enable_user_access(); \ 172 __get_user_nocheck(x, __gu_ptr, __gu_err); \ 173 __disable_user_access(); \ 174 \ 175 __gu_err; \ 176 }) 177 178 /** 179 * get_user: - Get a simple variable from user space. 180 * @x: Variable to store result. 181 * @ptr: Source address, in user space. 182 * 183 * Context: User context only. This function may sleep. 184 * 185 * This macro copies a single simple variable from user space to kernel 186 * space. It supports simple types like char and int, but not larger 187 * data types like structures or arrays. 188 * 189 * @ptr must have pointer-to-simple-variable type, and the result of 190 * dereferencing @ptr must be assignable to @x without a cast. 191 * 192 * Returns zero on success, or -EFAULT on error. 193 * On error, the variable @x is set to zero. 194 */ 195 #define get_user(x, ptr) \ 196 ({ \ 197 const __typeof__(*(ptr)) __user *__p = (ptr); \ 198 might_fault(); \ 199 access_ok(__p, sizeof(*__p)) ? \ 200 __get_user((x), __p) : \ 201 ((x) = (__force __typeof__(x))0, -EFAULT); \ 202 }) 203 204 #define __put_user_asm(insn, x, ptr, err) \ 205 do { \ 206 __typeof__(*(ptr)) __x = x; \ 207 __asm__ __volatile__ ( \ 208 "1:\n" \ 209 " " insn " %z2, %1\n" \ 210 "2:\n" \ 211 _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \ 212 : "+r" (err), "=m" (*(ptr)) \ 213 : "rJ" (__x)); \ 214 } while (0) 215 216 #ifdef CONFIG_64BIT 217 #define __put_user_8(x, ptr, err) \ 218 __put_user_asm("sd", x, ptr, err) 219 #else /* !CONFIG_64BIT */ 220 #define __put_user_8(x, ptr, err) \ 221 do { \ 222 u32 __user *__ptr = (u32 __user *)(ptr); \ 223 u64 __x = (__typeof__((x)-(x)))(x); \ 224 __asm__ __volatile__ ( \ 225 "1:\n" \ 226 " sw %z3, %1\n" \ 227 "2:\n" \ 228 " sw %z4, %2\n" \ 229 "3:\n" \ 230 _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ 231 _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ 232 : "+r" (err), \ 233 "=m" (__ptr[__LSW]), \ 234 "=m" (__ptr[__MSW]) \ 235 : "rJ" (__x), "rJ" (__x >> 32)); \ 236 } while (0) 237 #endif /* CONFIG_64BIT */ 238 239 #define __put_user_nocheck(x, __gu_ptr, __pu_err) \ 240 do { \ 241 switch (sizeof(*__gu_ptr)) { \ 242 case 1: \ 243 __put_user_asm("sb", (x), __gu_ptr, __pu_err); \ 244 break; \ 245 case 2: \ 246 __put_user_asm("sh", (x), __gu_ptr, __pu_err); \ 247 break; \ 248 case 4: \ 249 __put_user_asm("sw", (x), __gu_ptr, __pu_err); \ 250 break; \ 251 case 8: \ 252 __put_user_8((x), __gu_ptr, __pu_err); \ 253 break; \ 254 default: \ 255 BUILD_BUG(); \ 256 } \ 257 } while (0) 258 259 /** 260 * __put_user: - Write a simple value into user space, with less checking. 261 * @x: Value to copy to user space. 262 * @ptr: Destination address, in user space. 263 * 264 * Context: User context only. This function may sleep. 265 * 266 * This macro copies a single simple value from kernel space to user 267 * space. It supports simple types like char and int, but not larger 268 * data types like structures or arrays. 269 * 270 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 271 * to the result of dereferencing @ptr. The value of @x is copied to avoid 272 * re-ordering where @x is evaluated inside the block that enables user-space 273 * access (thus bypassing user space protection if @x is a function). 274 * 275 * Caller must check the pointer with access_ok() before calling this 276 * function. 277 * 278 * Returns zero on success, or -EFAULT on error. 279 */ 280 #define __put_user(x, ptr) \ 281 ({ \ 282 __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ 283 __typeof__(*__gu_ptr) __val = (x); \ 284 long __pu_err = 0; \ 285 \ 286 __chk_user_ptr(__gu_ptr); \ 287 \ 288 __enable_user_access(); \ 289 __put_user_nocheck(__val, __gu_ptr, __pu_err); \ 290 __disable_user_access(); \ 291 \ 292 __pu_err; \ 293 }) 294 295 /** 296 * put_user: - Write a simple value into user space. 297 * @x: Value to copy to user space. 298 * @ptr: Destination address, in user space. 299 * 300 * Context: User context only. This function may sleep. 301 * 302 * This macro copies a single simple value from kernel space to user 303 * space. It supports simple types like char and int, but not larger 304 * data types like structures or arrays. 305 * 306 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 307 * to the result of dereferencing @ptr. 308 * 309 * Returns zero on success, or -EFAULT on error. 310 */ 311 #define put_user(x, ptr) \ 312 ({ \ 313 __typeof__(*(ptr)) __user *__p = (ptr); \ 314 might_fault(); \ 315 access_ok(__p, sizeof(*__p)) ? \ 316 __put_user((x), __p) : \ 317 -EFAULT; \ 318 }) 319 320 321 unsigned long __must_check __asm_copy_to_user(void __user *to, 322 const void *from, unsigned long n); 323 unsigned long __must_check __asm_copy_from_user(void *to, 324 const void __user *from, unsigned long n); 325 326 static inline unsigned long 327 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 328 { 329 return __asm_copy_from_user(to, untagged_addr(from), n); 330 } 331 332 static inline unsigned long 333 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 334 { 335 return __asm_copy_to_user(untagged_addr(to), from, n); 336 } 337 338 extern long strncpy_from_user(char *dest, const char __user *src, long count); 339 340 extern long __must_check strnlen_user(const char __user *str, long n); 341 342 extern 343 unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 344 345 static inline 346 unsigned long __must_check clear_user(void __user *to, unsigned long n) 347 { 348 might_fault(); 349 return access_ok(to, n) ? 350 __clear_user(untagged_addr(to), n) : n; 351 } 352 353 #define __get_kernel_nofault(dst, src, type, err_label) \ 354 do { \ 355 long __kr_err = 0; \ 356 \ 357 __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \ 358 if (unlikely(__kr_err)) \ 359 goto err_label; \ 360 } while (0) 361 362 #define __put_kernel_nofault(dst, src, type, err_label) \ 363 do { \ 364 long __kr_err = 0; \ 365 \ 366 __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \ 367 if (unlikely(__kr_err)) \ 368 goto err_label; \ 369 } while (0) 370 371 #else /* CONFIG_MMU */ 372 #include <asm-generic/uaccess.h> 373 #endif /* CONFIG_MMU */ 374 #endif /* _ASM_RISCV_UACCESS_H */ 375