1 #ifndef __ALPHA_UACCESS_H 2 #define __ALPHA_UACCESS_H 3 4 #include <linux/errno.h> 5 #include <linux/sched.h> 6 7 8 /* 9 * The fs value determines whether argument validity checking should be 10 * performed or not. If get_fs() == USER_DS, checking is performed, with 11 * get_fs() == KERNEL_DS, checking is bypassed. 12 * 13 * Or at least it did once upon a time. Nowadays it is a mask that 14 * defines which bits of the address space are off limits. This is a 15 * wee bit faster than the above. 16 * 17 * For historical reasons, these macros are grossly misnamed. 18 */ 19 20 #define KERNEL_DS ((mm_segment_t) { 0UL }) 21 #define USER_DS ((mm_segment_t) { -0x40000000000UL }) 22 23 #define VERIFY_READ 0 24 #define VERIFY_WRITE 1 25 26 #define get_fs() (current_thread_info()->addr_limit) 27 #define get_ds() (KERNEL_DS) 28 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 29 30 #define segment_eq(a, b) ((a).seg == (b).seg) 31 32 /* 33 * Is a address valid? This does a straightforward calculation rather 34 * than tests. 35 * 36 * Address valid if: 37 * - "addr" doesn't have any high-bits set 38 * - AND "size" doesn't have any high-bits set 39 * - AND "addr+size" doesn't have any high-bits set 40 * - OR we are in kernel mode. 41 */ 42 #define __access_ok(addr, size, segment) \ 43 (((segment).seg & (addr | size | (addr+size))) == 0) 44 45 #define access_ok(type, addr, size) \ 46 ({ \ 47 __chk_user_ptr(addr); \ 48 __access_ok(((unsigned long)(addr)), (size), get_fs()); \ 49 }) 50 51 /* 52 * These are the main single-value transfer routines. They automatically 53 * use the right size if we just have the right pointer type. 54 * 55 * As the alpha uses the same address space for kernel and user 56 * data, we can just do these as direct assignments. (Of course, the 57 * exception handling means that it's no longer "just"...) 58 * 59 * Careful to not 60 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 61 * (b) require any knowledge of processes at this stage 62 */ 63 #define put_user(x, ptr) \ 64 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) 65 #define get_user(x, ptr) \ 66 __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) 67 68 /* 69 * The "__xxx" versions do not do address space checking, useful when 70 * doing multiple accesses to the same area (the programmer has to do the 71 * checks by hand with "access_ok()") 72 */ 73 #define __put_user(x, ptr) \ 74 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 75 #define __get_user(x, ptr) \ 76 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 77 78 /* 79 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to 80 * encode the bits we need for resolving the exception. See the 81 * more extensive comments with fixup_inline_exception below for 82 * more information. 83 */ 84 85 extern void __get_user_unknown(void); 86 87 #define __get_user_nocheck(x, ptr, size) \ 88 ({ \ 89 long __gu_err = 0; \ 90 unsigned long __gu_val; \ 91 __chk_user_ptr(ptr); \ 92 switch (size) { \ 93 case 1: __get_user_8(ptr); break; \ 94 case 2: __get_user_16(ptr); break; \ 95 case 4: __get_user_32(ptr); break; \ 96 case 8: __get_user_64(ptr); break; \ 97 default: __get_user_unknown(); break; \ 98 } \ 99 (x) = (__force __typeof__(*(ptr))) __gu_val; \ 100 __gu_err; \ 101 }) 102 103 #define __get_user_check(x, ptr, size, segment) \ 104 ({ \ 105 long __gu_err = -EFAULT; \ 106 unsigned long __gu_val = 0; \ 107 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 108 if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ 109 __gu_err = 0; \ 110 switch (size) { \ 111 case 1: __get_user_8(__gu_addr); break; \ 112 case 2: __get_user_16(__gu_addr); break; \ 113 case 4: __get_user_32(__gu_addr); break; \ 114 case 8: __get_user_64(__gu_addr); break; \ 115 default: __get_user_unknown(); break; \ 116 } \ 117 } \ 118 (x) = (__force __typeof__(*(ptr))) __gu_val; \ 119 __gu_err; \ 120 }) 121 122 struct __large_struct { unsigned long buf[100]; }; 123 #define __m(x) (*(struct __large_struct __user *)(x)) 124 125 #define __get_user_64(addr) \ 126 __asm__("1: ldq %0,%2\n" \ 127 "2:\n" \ 128 ".section __ex_table,\"a\"\n" \ 129 " .long 1b - .\n" \ 130 " lda %0, 2b-1b(%1)\n" \ 131 ".previous" \ 132 : "=r"(__gu_val), "=r"(__gu_err) \ 133 : "m"(__m(addr)), "1"(__gu_err)) 134 135 #define __get_user_32(addr) \ 136 __asm__("1: ldl %0,%2\n" \ 137 "2:\n" \ 138 ".section __ex_table,\"a\"\n" \ 139 " .long 1b - .\n" \ 140 " lda %0, 2b-1b(%1)\n" \ 141 ".previous" \ 142 : "=r"(__gu_val), "=r"(__gu_err) \ 143 : "m"(__m(addr)), "1"(__gu_err)) 144 145 #ifdef __alpha_bwx__ 146 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 147 148 #define __get_user_16(addr) \ 149 __asm__("1: ldwu %0,%2\n" \ 150 "2:\n" \ 151 ".section __ex_table,\"a\"\n" \ 152 " .long 1b - .\n" \ 153 " lda %0, 2b-1b(%1)\n" \ 154 ".previous" \ 155 : "=r"(__gu_val), "=r"(__gu_err) \ 156 : "m"(__m(addr)), "1"(__gu_err)) 157 158 #define __get_user_8(addr) \ 159 __asm__("1: ldbu %0,%2\n" \ 160 "2:\n" \ 161 ".section __ex_table,\"a\"\n" \ 162 " .long 1b - .\n" \ 163 " lda %0, 2b-1b(%1)\n" \ 164 ".previous" \ 165 : "=r"(__gu_val), "=r"(__gu_err) \ 166 : "m"(__m(addr)), "1"(__gu_err)) 167 #else 168 /* Unfortunately, we can't get an unaligned access trap for the sub-word 169 load, so we have to do a general unaligned operation. */ 170 171 #define __get_user_16(addr) \ 172 { \ 173 long __gu_tmp; \ 174 __asm__("1: ldq_u %0,0(%3)\n" \ 175 "2: ldq_u %1,1(%3)\n" \ 176 " extwl %0,%3,%0\n" \ 177 " extwh %1,%3,%1\n" \ 178 " or %0,%1,%0\n" \ 179 "3:\n" \ 180 ".section __ex_table,\"a\"\n" \ 181 " .long 1b - .\n" \ 182 " lda %0, 3b-1b(%2)\n" \ 183 " .long 2b - .\n" \ 184 " lda %0, 3b-2b(%2)\n" \ 185 ".previous" \ 186 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ 187 : "r"(addr), "2"(__gu_err)); \ 188 } 189 190 #define __get_user_8(addr) \ 191 __asm__("1: ldq_u %0,0(%2)\n" \ 192 " extbl %0,%2,%0\n" \ 193 "2:\n" \ 194 ".section __ex_table,\"a\"\n" \ 195 " .long 1b - .\n" \ 196 " lda %0, 2b-1b(%1)\n" \ 197 ".previous" \ 198 : "=&r"(__gu_val), "=r"(__gu_err) \ 199 : "r"(addr), "1"(__gu_err)) 200 #endif 201 202 extern void __put_user_unknown(void); 203 204 #define __put_user_nocheck(x, ptr, size) \ 205 ({ \ 206 long __pu_err = 0; \ 207 __chk_user_ptr(ptr); \ 208 switch (size) { \ 209 case 1: __put_user_8(x, ptr); break; \ 210 case 2: __put_user_16(x, ptr); break; \ 211 case 4: __put_user_32(x, ptr); break; \ 212 case 8: __put_user_64(x, ptr); break; \ 213 default: __put_user_unknown(); break; \ 214 } \ 215 __pu_err; \ 216 }) 217 218 #define __put_user_check(x, ptr, size, segment) \ 219 ({ \ 220 long __pu_err = -EFAULT; \ 221 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 222 if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ 223 __pu_err = 0; \ 224 switch (size) { \ 225 case 1: __put_user_8(x, __pu_addr); break; \ 226 case 2: __put_user_16(x, __pu_addr); break; \ 227 case 4: __put_user_32(x, __pu_addr); break; \ 228 case 8: __put_user_64(x, __pu_addr); break; \ 229 default: __put_user_unknown(); break; \ 230 } \ 231 } \ 232 __pu_err; \ 233 }) 234 235 /* 236 * The "__put_user_xx()" macros tell gcc they read from memory 237 * instead of writing: this is because they do not write to 238 * any memory gcc knows about, so there are no aliasing issues 239 */ 240 #define __put_user_64(x, addr) \ 241 __asm__ __volatile__("1: stq %r2,%1\n" \ 242 "2:\n" \ 243 ".section __ex_table,\"a\"\n" \ 244 " .long 1b - .\n" \ 245 " lda $31,2b-1b(%0)\n" \ 246 ".previous" \ 247 : "=r"(__pu_err) \ 248 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) 249 250 #define __put_user_32(x, addr) \ 251 __asm__ __volatile__("1: stl %r2,%1\n" \ 252 "2:\n" \ 253 ".section __ex_table,\"a\"\n" \ 254 " .long 1b - .\n" \ 255 " lda $31,2b-1b(%0)\n" \ 256 ".previous" \ 257 : "=r"(__pu_err) \ 258 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 259 260 #ifdef __alpha_bwx__ 261 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 262 263 #define __put_user_16(x, addr) \ 264 __asm__ __volatile__("1: stw %r2,%1\n" \ 265 "2:\n" \ 266 ".section __ex_table,\"a\"\n" \ 267 " .long 1b - .\n" \ 268 " lda $31,2b-1b(%0)\n" \ 269 ".previous" \ 270 : "=r"(__pu_err) \ 271 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 272 273 #define __put_user_8(x, addr) \ 274 __asm__ __volatile__("1: stb %r2,%1\n" \ 275 "2:\n" \ 276 ".section __ex_table,\"a\"\n" \ 277 " .long 1b - .\n" \ 278 " lda $31,2b-1b(%0)\n" \ 279 ".previous" \ 280 : "=r"(__pu_err) \ 281 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 282 #else 283 /* Unfortunately, we can't get an unaligned access trap for the sub-word 284 write, so we have to do a general unaligned operation. */ 285 286 #define __put_user_16(x, addr) \ 287 { \ 288 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ 289 __asm__ __volatile__( \ 290 "1: ldq_u %2,1(%5)\n" \ 291 "2: ldq_u %1,0(%5)\n" \ 292 " inswh %6,%5,%4\n" \ 293 " inswl %6,%5,%3\n" \ 294 " mskwh %2,%5,%2\n" \ 295 " mskwl %1,%5,%1\n" \ 296 " or %2,%4,%2\n" \ 297 " or %1,%3,%1\n" \ 298 "3: stq_u %2,1(%5)\n" \ 299 "4: stq_u %1,0(%5)\n" \ 300 "5:\n" \ 301 ".section __ex_table,\"a\"\n" \ 302 " .long 1b - .\n" \ 303 " lda $31, 5b-1b(%0)\n" \ 304 " .long 2b - .\n" \ 305 " lda $31, 5b-2b(%0)\n" \ 306 " .long 3b - .\n" \ 307 " lda $31, 5b-3b(%0)\n" \ 308 " .long 4b - .\n" \ 309 " lda $31, 5b-4b(%0)\n" \ 310 ".previous" \ 311 : "=r"(__pu_err), "=&r"(__pu_tmp1), \ 312 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ 313 "=&r"(__pu_tmp4) \ 314 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ 315 } 316 317 #define __put_user_8(x, addr) \ 318 { \ 319 long __pu_tmp1, __pu_tmp2; \ 320 __asm__ __volatile__( \ 321 "1: ldq_u %1,0(%4)\n" \ 322 " insbl %3,%4,%2\n" \ 323 " mskbl %1,%4,%1\n" \ 324 " or %1,%2,%1\n" \ 325 "2: stq_u %1,0(%4)\n" \ 326 "3:\n" \ 327 ".section __ex_table,\"a\"\n" \ 328 " .long 1b - .\n" \ 329 " lda $31, 3b-1b(%0)\n" \ 330 " .long 2b - .\n" \ 331 " lda $31, 3b-2b(%0)\n" \ 332 ".previous" \ 333 : "=r"(__pu_err), \ 334 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ 335 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ 336 } 337 #endif 338 339 340 /* 341 * Complex access routines 342 */ 343 344 /* This little bit of silliness is to get the GP loaded for a function 345 that ordinarily wouldn't. Otherwise we could have it done by the macro 346 directly, which can be optimized the linker. */ 347 #ifdef MODULE 348 #define __module_address(sym) "r"(sym), 349 #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym 350 #else 351 #define __module_address(sym) 352 #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" 353 #endif 354 355 extern void __copy_user(void); 356 357 extern inline long 358 __copy_tofrom_user_nocheck(void *to, const void *from, long len) 359 { 360 register void * __cu_to __asm__("$6") = to; 361 register const void * __cu_from __asm__("$7") = from; 362 register long __cu_len __asm__("$0") = len; 363 364 __asm__ __volatile__( 365 __module_call(28, 3, __copy_user) 366 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) 367 : __module_address(__copy_user) 368 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) 369 : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 370 371 return __cu_len; 372 } 373 374 #define __copy_to_user(to, from, n) \ 375 ({ \ 376 __chk_user_ptr(to); \ 377 __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ 378 }) 379 #define __copy_from_user(to, from, n) \ 380 ({ \ 381 __chk_user_ptr(from); \ 382 __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ 383 }) 384 385 #define __copy_to_user_inatomic __copy_to_user 386 #define __copy_from_user_inatomic __copy_from_user 387 388 extern inline long 389 copy_to_user(void __user *to, const void *from, long n) 390 { 391 if (likely(__access_ok((unsigned long)to, n, get_fs()))) 392 n = __copy_tofrom_user_nocheck((__force void *)to, from, n); 393 return n; 394 } 395 396 extern inline long 397 copy_from_user(void *to, const void __user *from, long n) 398 { 399 long res = n; 400 if (likely(__access_ok((unsigned long)from, n, get_fs()))) 401 res = __copy_from_user_inatomic(to, from, n); 402 if (unlikely(res)) 403 memset(to + (n - res), 0, res); 404 return res; 405 } 406 407 extern void __do_clear_user(void); 408 409 extern inline long 410 __clear_user(void __user *to, long len) 411 { 412 register void __user * __cl_to __asm__("$6") = to; 413 register long __cl_len __asm__("$0") = len; 414 __asm__ __volatile__( 415 __module_call(28, 2, __do_clear_user) 416 : "=r"(__cl_len), "=r"(__cl_to) 417 : __module_address(__do_clear_user) 418 "0"(__cl_len), "1"(__cl_to) 419 : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 420 return __cl_len; 421 } 422 423 extern inline long 424 clear_user(void __user *to, long len) 425 { 426 if (__access_ok((unsigned long)to, len, get_fs())) 427 len = __clear_user(to, len); 428 return len; 429 } 430 431 #undef __module_address 432 #undef __module_call 433 434 #define user_addr_max() \ 435 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 436 437 extern long strncpy_from_user(char *dest, const char __user *src, long count); 438 extern __must_check long strlen_user(const char __user *str); 439 extern __must_check long strnlen_user(const char __user *str, long n); 440 441 /* 442 * About the exception table: 443 * 444 * - insn is a 32-bit pc-relative offset from the faulting insn. 445 * - nextinsn is a 16-bit offset off of the faulting instruction 446 * (not off of the *next* instruction as branches are). 447 * - errreg is the register in which to place -EFAULT. 448 * - valreg is the final target register for the load sequence 449 * and will be zeroed. 450 * 451 * Either errreg or valreg may be $31, in which case nothing happens. 452 * 453 * The exception fixup information "just so happens" to be arranged 454 * as in a MEM format instruction. This lets us emit our three 455 * values like so: 456 * 457 * lda valreg, nextinsn(errreg) 458 * 459 */ 460 461 struct exception_table_entry 462 { 463 signed int insn; 464 union exception_fixup { 465 unsigned unit; 466 struct { 467 signed int nextinsn : 16; 468 unsigned int errreg : 5; 469 unsigned int valreg : 5; 470 } bits; 471 } fixup; 472 }; 473 474 /* Returns the new pc */ 475 #define fixup_exception(map_reg, _fixup, pc) \ 476 ({ \ 477 if ((_fixup)->fixup.bits.valreg != 31) \ 478 map_reg((_fixup)->fixup.bits.valreg) = 0; \ 479 if ((_fixup)->fixup.bits.errreg != 31) \ 480 map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ 481 (pc) + (_fixup)->fixup.bits.nextinsn; \ 482 }) 483 484 #define ARCH_HAS_RELATIVE_EXTABLE 485 486 #define swap_ex_entry_fixup(a, b, tmp, delta) \ 487 do { \ 488 (a)->fixup.unit = (b)->fixup.unit; \ 489 (b)->fixup.unit = (tmp).fixup.unit; \ 490 } while (0) 491 492 493 #endif /* __ALPHA_UACCESS_H */ 494