1 #ifndef __ALPHA_UACCESS_H 2 #define __ALPHA_UACCESS_H 3 4 /* 5 * The fs value determines whether argument validity checking should be 6 * performed or not. If get_fs() == USER_DS, checking is performed, with 7 * get_fs() == KERNEL_DS, checking is bypassed. 8 * 9 * Or at least it did once upon a time. Nowadays it is a mask that 10 * defines which bits of the address space are off limits. This is a 11 * wee bit faster than the above. 12 * 13 * For historical reasons, these macros are grossly misnamed. 14 */ 15 16 #define KERNEL_DS ((mm_segment_t) { 0UL }) 17 #define USER_DS ((mm_segment_t) { -0x40000000000UL }) 18 19 #define get_fs() (current_thread_info()->addr_limit) 20 #define get_ds() (KERNEL_DS) 21 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 22 23 #define segment_eq(a, b) ((a).seg == (b).seg) 24 25 /* 26 * Is a address valid? This does a straightforward calculation rather 27 * than tests. 28 * 29 * Address valid if: 30 * - "addr" doesn't have any high-bits set 31 * - AND "size" doesn't have any high-bits set 32 * - AND "addr+size" doesn't have any high-bits set 33 * - OR we are in kernel mode. 34 */ 35 #define __access_ok(addr, size, segment) \ 36 (((segment).seg & (addr | size | (addr+size))) == 0) 37 38 #define access_ok(type, addr, size) \ 39 ({ \ 40 __chk_user_ptr(addr); \ 41 __access_ok(((unsigned long)(addr)), (size), get_fs()); \ 42 }) 43 44 /* 45 * These are the main single-value transfer routines. They automatically 46 * use the right size if we just have the right pointer type. 47 * 48 * As the alpha uses the same address space for kernel and user 49 * data, we can just do these as direct assignments. (Of course, the 50 * exception handling means that it's no longer "just"...) 51 * 52 * Careful to not 53 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 54 * (b) require any knowledge of processes at this stage 55 */ 56 #define put_user(x, ptr) \ 57 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) 58 #define get_user(x, ptr) \ 59 __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) 60 61 /* 62 * The "__xxx" versions do not do address space checking, useful when 63 * doing multiple accesses to the same area (the programmer has to do the 64 * checks by hand with "access_ok()") 65 */ 66 #define __put_user(x, ptr) \ 67 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 68 #define __get_user(x, ptr) \ 69 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 70 71 /* 72 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to 73 * encode the bits we need for resolving the exception. See the 74 * more extensive comments with fixup_inline_exception below for 75 * more information. 76 */ 77 78 extern void __get_user_unknown(void); 79 80 #define __get_user_nocheck(x, ptr, size) \ 81 ({ \ 82 long __gu_err = 0; \ 83 unsigned long __gu_val; \ 84 __chk_user_ptr(ptr); \ 85 switch (size) { \ 86 case 1: __get_user_8(ptr); break; \ 87 case 2: __get_user_16(ptr); break; \ 88 case 4: __get_user_32(ptr); break; \ 89 case 8: __get_user_64(ptr); break; \ 90 default: __get_user_unknown(); break; \ 91 } \ 92 (x) = (__force __typeof__(*(ptr))) __gu_val; \ 93 __gu_err; \ 94 }) 95 96 #define __get_user_check(x, ptr, size, segment) \ 97 ({ \ 98 long __gu_err = -EFAULT; \ 99 unsigned long __gu_val = 0; \ 100 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 101 if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ 102 __gu_err = 0; \ 103 switch (size) { \ 104 case 1: __get_user_8(__gu_addr); break; \ 105 case 2: __get_user_16(__gu_addr); break; \ 106 case 4: __get_user_32(__gu_addr); break; \ 107 case 8: __get_user_64(__gu_addr); break; \ 108 default: __get_user_unknown(); break; \ 109 } \ 110 } \ 111 (x) = (__force __typeof__(*(ptr))) __gu_val; \ 112 __gu_err; \ 113 }) 114 115 struct __large_struct { unsigned long buf[100]; }; 116 #define __m(x) (*(struct __large_struct __user *)(x)) 117 118 #define __get_user_64(addr) \ 119 __asm__("1: ldq %0,%2\n" \ 120 "2:\n" \ 121 ".section __ex_table,\"a\"\n" \ 122 " .long 1b - .\n" \ 123 " lda %0, 2b-1b(%1)\n" \ 124 ".previous" \ 125 : "=r"(__gu_val), "=r"(__gu_err) \ 126 : "m"(__m(addr)), "1"(__gu_err)) 127 128 #define __get_user_32(addr) \ 129 __asm__("1: ldl %0,%2\n" \ 130 "2:\n" \ 131 ".section __ex_table,\"a\"\n" \ 132 " .long 1b - .\n" \ 133 " lda %0, 2b-1b(%1)\n" \ 134 ".previous" \ 135 : "=r"(__gu_val), "=r"(__gu_err) \ 136 : "m"(__m(addr)), "1"(__gu_err)) 137 138 #ifdef __alpha_bwx__ 139 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 140 141 #define __get_user_16(addr) \ 142 __asm__("1: ldwu %0,%2\n" \ 143 "2:\n" \ 144 ".section __ex_table,\"a\"\n" \ 145 " .long 1b - .\n" \ 146 " lda %0, 2b-1b(%1)\n" \ 147 ".previous" \ 148 : "=r"(__gu_val), "=r"(__gu_err) \ 149 : "m"(__m(addr)), "1"(__gu_err)) 150 151 #define __get_user_8(addr) \ 152 __asm__("1: ldbu %0,%2\n" \ 153 "2:\n" \ 154 ".section __ex_table,\"a\"\n" \ 155 " .long 1b - .\n" \ 156 " lda %0, 2b-1b(%1)\n" \ 157 ".previous" \ 158 : "=r"(__gu_val), "=r"(__gu_err) \ 159 : "m"(__m(addr)), "1"(__gu_err)) 160 #else 161 /* Unfortunately, we can't get an unaligned access trap for the sub-word 162 load, so we have to do a general unaligned operation. */ 163 164 #define __get_user_16(addr) \ 165 { \ 166 long __gu_tmp; \ 167 __asm__("1: ldq_u %0,0(%3)\n" \ 168 "2: ldq_u %1,1(%3)\n" \ 169 " extwl %0,%3,%0\n" \ 170 " extwh %1,%3,%1\n" \ 171 " or %0,%1,%0\n" \ 172 "3:\n" \ 173 ".section __ex_table,\"a\"\n" \ 174 " .long 1b - .\n" \ 175 " lda %0, 3b-1b(%2)\n" \ 176 " .long 2b - .\n" \ 177 " lda %0, 3b-2b(%2)\n" \ 178 ".previous" \ 179 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ 180 : "r"(addr), "2"(__gu_err)); \ 181 } 182 183 #define __get_user_8(addr) \ 184 __asm__("1: ldq_u %0,0(%2)\n" \ 185 " extbl %0,%2,%0\n" \ 186 "2:\n" \ 187 ".section __ex_table,\"a\"\n" \ 188 " .long 1b - .\n" \ 189 " lda %0, 2b-1b(%1)\n" \ 190 ".previous" \ 191 : "=&r"(__gu_val), "=r"(__gu_err) \ 192 : "r"(addr), "1"(__gu_err)) 193 #endif 194 195 extern void __put_user_unknown(void); 196 197 #define __put_user_nocheck(x, ptr, size) \ 198 ({ \ 199 long __pu_err = 0; \ 200 __chk_user_ptr(ptr); \ 201 switch (size) { \ 202 case 1: __put_user_8(x, ptr); break; \ 203 case 2: __put_user_16(x, ptr); break; \ 204 case 4: __put_user_32(x, ptr); break; \ 205 case 8: __put_user_64(x, ptr); break; \ 206 default: __put_user_unknown(); break; \ 207 } \ 208 __pu_err; \ 209 }) 210 211 #define __put_user_check(x, ptr, size, segment) \ 212 ({ \ 213 long __pu_err = -EFAULT; \ 214 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 215 if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ 216 __pu_err = 0; \ 217 switch (size) { \ 218 case 1: __put_user_8(x, __pu_addr); break; \ 219 case 2: __put_user_16(x, __pu_addr); break; \ 220 case 4: __put_user_32(x, __pu_addr); break; \ 221 case 8: __put_user_64(x, __pu_addr); break; \ 222 default: __put_user_unknown(); break; \ 223 } \ 224 } \ 225 __pu_err; \ 226 }) 227 228 /* 229 * The "__put_user_xx()" macros tell gcc they read from memory 230 * instead of writing: this is because they do not write to 231 * any memory gcc knows about, so there are no aliasing issues 232 */ 233 #define __put_user_64(x, addr) \ 234 __asm__ __volatile__("1: stq %r2,%1\n" \ 235 "2:\n" \ 236 ".section __ex_table,\"a\"\n" \ 237 " .long 1b - .\n" \ 238 " lda $31,2b-1b(%0)\n" \ 239 ".previous" \ 240 : "=r"(__pu_err) \ 241 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) 242 243 #define __put_user_32(x, addr) \ 244 __asm__ __volatile__("1: stl %r2,%1\n" \ 245 "2:\n" \ 246 ".section __ex_table,\"a\"\n" \ 247 " .long 1b - .\n" \ 248 " lda $31,2b-1b(%0)\n" \ 249 ".previous" \ 250 : "=r"(__pu_err) \ 251 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 252 253 #ifdef __alpha_bwx__ 254 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 255 256 #define __put_user_16(x, addr) \ 257 __asm__ __volatile__("1: stw %r2,%1\n" \ 258 "2:\n" \ 259 ".section __ex_table,\"a\"\n" \ 260 " .long 1b - .\n" \ 261 " lda $31,2b-1b(%0)\n" \ 262 ".previous" \ 263 : "=r"(__pu_err) \ 264 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 265 266 #define __put_user_8(x, addr) \ 267 __asm__ __volatile__("1: stb %r2,%1\n" \ 268 "2:\n" \ 269 ".section __ex_table,\"a\"\n" \ 270 " .long 1b - .\n" \ 271 " lda $31,2b-1b(%0)\n" \ 272 ".previous" \ 273 : "=r"(__pu_err) \ 274 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 275 #else 276 /* Unfortunately, we can't get an unaligned access trap for the sub-word 277 write, so we have to do a general unaligned operation. */ 278 279 #define __put_user_16(x, addr) \ 280 { \ 281 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ 282 __asm__ __volatile__( \ 283 "1: ldq_u %2,1(%5)\n" \ 284 "2: ldq_u %1,0(%5)\n" \ 285 " inswh %6,%5,%4\n" \ 286 " inswl %6,%5,%3\n" \ 287 " mskwh %2,%5,%2\n" \ 288 " mskwl %1,%5,%1\n" \ 289 " or %2,%4,%2\n" \ 290 " or %1,%3,%1\n" \ 291 "3: stq_u %2,1(%5)\n" \ 292 "4: stq_u %1,0(%5)\n" \ 293 "5:\n" \ 294 ".section __ex_table,\"a\"\n" \ 295 " .long 1b - .\n" \ 296 " lda $31, 5b-1b(%0)\n" \ 297 " .long 2b - .\n" \ 298 " lda $31, 5b-2b(%0)\n" \ 299 " .long 3b - .\n" \ 300 " lda $31, 5b-3b(%0)\n" \ 301 " .long 4b - .\n" \ 302 " lda $31, 5b-4b(%0)\n" \ 303 ".previous" \ 304 : "=r"(__pu_err), "=&r"(__pu_tmp1), \ 305 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ 306 "=&r"(__pu_tmp4) \ 307 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ 308 } 309 310 #define __put_user_8(x, addr) \ 311 { \ 312 long __pu_tmp1, __pu_tmp2; \ 313 __asm__ __volatile__( \ 314 "1: ldq_u %1,0(%4)\n" \ 315 " insbl %3,%4,%2\n" \ 316 " mskbl %1,%4,%1\n" \ 317 " or %1,%2,%1\n" \ 318 "2: stq_u %1,0(%4)\n" \ 319 "3:\n" \ 320 ".section __ex_table,\"a\"\n" \ 321 " .long 1b - .\n" \ 322 " lda $31, 3b-1b(%0)\n" \ 323 " .long 2b - .\n" \ 324 " lda $31, 3b-2b(%0)\n" \ 325 ".previous" \ 326 : "=r"(__pu_err), \ 327 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ 328 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ 329 } 330 #endif 331 332 333 /* 334 * Complex access routines 335 */ 336 337 /* This little bit of silliness is to get the GP loaded for a function 338 that ordinarily wouldn't. Otherwise we could have it done by the macro 339 directly, which can be optimized the linker. */ 340 #ifdef MODULE 341 #define __module_address(sym) "r"(sym), 342 #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym 343 #else 344 #define __module_address(sym) 345 #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" 346 #endif 347 348 extern void __copy_user(void); 349 350 extern inline long 351 __copy_tofrom_user_nocheck(void *to, const void *from, long len) 352 { 353 register void * __cu_to __asm__("$6") = to; 354 register const void * __cu_from __asm__("$7") = from; 355 register long __cu_len __asm__("$0") = len; 356 357 __asm__ __volatile__( 358 __module_call(28, 3, __copy_user) 359 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) 360 : __module_address(__copy_user) 361 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) 362 : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 363 364 return __cu_len; 365 } 366 367 #define __copy_to_user(to, from, n) \ 368 ({ \ 369 __chk_user_ptr(to); \ 370 __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ 371 }) 372 #define __copy_from_user(to, from, n) \ 373 ({ \ 374 __chk_user_ptr(from); \ 375 __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ 376 }) 377 378 #define __copy_to_user_inatomic __copy_to_user 379 #define __copy_from_user_inatomic __copy_from_user 380 381 extern inline long 382 copy_to_user(void __user *to, const void *from, long n) 383 { 384 if (likely(__access_ok((unsigned long)to, n, get_fs()))) 385 n = __copy_tofrom_user_nocheck((__force void *)to, from, n); 386 return n; 387 } 388 389 extern inline long 390 copy_from_user(void *to, const void __user *from, long n) 391 { 392 long res = n; 393 if (likely(__access_ok((unsigned long)from, n, get_fs()))) 394 res = __copy_from_user_inatomic(to, from, n); 395 if (unlikely(res)) 396 memset(to + (n - res), 0, res); 397 return res; 398 } 399 400 extern void __do_clear_user(void); 401 402 extern inline long 403 __clear_user(void __user *to, long len) 404 { 405 register void __user * __cl_to __asm__("$6") = to; 406 register long __cl_len __asm__("$0") = len; 407 __asm__ __volatile__( 408 __module_call(28, 2, __do_clear_user) 409 : "=r"(__cl_len), "=r"(__cl_to) 410 : __module_address(__do_clear_user) 411 "0"(__cl_len), "1"(__cl_to) 412 : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 413 return __cl_len; 414 } 415 416 extern inline long 417 clear_user(void __user *to, long len) 418 { 419 if (__access_ok((unsigned long)to, len, get_fs())) 420 len = __clear_user(to, len); 421 return len; 422 } 423 424 #undef __module_address 425 #undef __module_call 426 427 #define user_addr_max() \ 428 (uaccess_kernel() ? ~0UL : TASK_SIZE) 429 430 extern long strncpy_from_user(char *dest, const char __user *src, long count); 431 extern __must_check long strlen_user(const char __user *str); 432 extern __must_check long strnlen_user(const char __user *str, long n); 433 434 /* 435 * About the exception table: 436 * 437 * - insn is a 32-bit pc-relative offset from the faulting insn. 438 * - nextinsn is a 16-bit offset off of the faulting instruction 439 * (not off of the *next* instruction as branches are). 440 * - errreg is the register in which to place -EFAULT. 441 * - valreg is the final target register for the load sequence 442 * and will be zeroed. 443 * 444 * Either errreg or valreg may be $31, in which case nothing happens. 445 * 446 * The exception fixup information "just so happens" to be arranged 447 * as in a MEM format instruction. This lets us emit our three 448 * values like so: 449 * 450 * lda valreg, nextinsn(errreg) 451 * 452 */ 453 454 struct exception_table_entry 455 { 456 signed int insn; 457 union exception_fixup { 458 unsigned unit; 459 struct { 460 signed int nextinsn : 16; 461 unsigned int errreg : 5; 462 unsigned int valreg : 5; 463 } bits; 464 } fixup; 465 }; 466 467 /* Returns the new pc */ 468 #define fixup_exception(map_reg, _fixup, pc) \ 469 ({ \ 470 if ((_fixup)->fixup.bits.valreg != 31) \ 471 map_reg((_fixup)->fixup.bits.valreg) = 0; \ 472 if ((_fixup)->fixup.bits.errreg != 31) \ 473 map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ 474 (pc) + (_fixup)->fixup.bits.nextinsn; \ 475 }) 476 477 #define ARCH_HAS_RELATIVE_EXTABLE 478 479 #define swap_ex_entry_fixup(a, b, tmp, delta) \ 480 do { \ 481 (a)->fixup.unit = (b)->fixup.unit; \ 482 (b)->fixup.unit = (tmp).fixup.unit; \ 483 } while (0) 484 485 486 #endif /* __ALPHA_UACCESS_H */ 487