1 #ifndef _ARCH_POWERPC_UACCESS_H 2 #define _ARCH_POWERPC_UACCESS_H 3 4 #ifdef __KERNEL__ 5 #ifndef __ASSEMBLY__ 6 7 #include <linux/sched.h> 8 #include <linux/errno.h> 9 #include <asm/asm-compat.h> 10 #include <asm/ppc_asm.h> 11 #include <asm/processor.h> 12 #include <asm/page.h> 13 14 #define VERIFY_READ 0 15 #define VERIFY_WRITE 1 16 17 /* 18 * The fs value determines whether argument validity checking should be 19 * performed or not. If get_fs() == USER_DS, checking is performed, with 20 * get_fs() == KERNEL_DS, checking is bypassed. 21 * 22 * For historical reasons, these macros are grossly misnamed. 23 * 24 * The fs/ds values are now the highest legal address in the "segment". 25 * This simplifies the checking in the routines below. 26 */ 27 28 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 29 30 #define KERNEL_DS MAKE_MM_SEG(~0UL) 31 #ifdef __powerpc64__ 32 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ 33 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) 34 #else 35 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 36 #endif 37 38 #define get_ds() (KERNEL_DS) 39 #define get_fs() (current->thread.fs) 40 #define set_fs(val) (current->thread.fs = (val)) 41 42 #define segment_eq(a, b) ((a).seg == (b).seg) 43 44 #define user_addr_max() (get_fs().seg) 45 46 #ifdef __powerpc64__ 47 /* 48 * This check is sufficient because there is a large enough 49 * gap between user addresses and the kernel addresses 50 */ 51 #define __access_ok(addr, size, segment) \ 52 (((addr) <= (segment).seg) && ((size) <= (segment).seg)) 53 54 #else 55 56 #define __access_ok(addr, size, segment) \ 57 (((addr) <= (segment).seg) && \ 58 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) 59 60 #endif 61 62 #define access_ok(type, addr, size) \ 63 (__chk_user_ptr(addr), \ 64 __access_ok((__force unsigned long)(addr), (size), get_fs())) 65 66 /* 67 * The exception table consists of pairs of addresses: the first is the 68 * address of an instruction that is allowed to fault, and the second is 69 * the address at which the program should continue. No registers are 70 * modified, so it is entirely up to the continuation code to figure out 71 * what to do. 72 * 73 * All the routines below use bits of fixup code that are out of line 74 * with the main instruction path. This means when everything is well, 75 * we don't even have to jump over them. Further, they do not intrude 76 * on our cache or tlb entries. 77 */ 78 79 struct exception_table_entry { 80 unsigned long insn; 81 unsigned long fixup; 82 }; 83 84 /* 85 * These are the main single-value transfer routines. They automatically 86 * use the right size if we just have the right pointer type. 87 * 88 * This gets kind of ugly. We want to return _two_ values in "get_user()" 89 * and yet we don't want to do any pointers, because that is too much 90 * of a performance impact. Thus we have a few rather ugly macros here, 91 * and hide all the ugliness from the user. 92 * 93 * The "__xxx" versions of the user access functions are versions that 94 * do not verify the address space, that must have been done previously 95 * with a separate "access_ok()" call (this is used when we do multiple 96 * accesses to the same area of user memory). 97 * 98 * As we use the same address space for kernel and user data on the 99 * PowerPC, we can just do these as direct assignments. (Of course, the 100 * exception handling means that it's no longer "just"...) 101 * 102 */ 103 #define get_user(x, ptr) \ 104 __get_user_check((x), (ptr), sizeof(*(ptr))) 105 #define put_user(x, ptr) \ 106 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 107 108 #define __get_user(x, ptr) \ 109 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 110 #define __put_user(x, ptr) \ 111 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 112 113 #define __get_user_inatomic(x, ptr) \ 114 __get_user_nosleep((x), (ptr), sizeof(*(ptr))) 115 #define __put_user_inatomic(x, ptr) \ 116 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 117 118 #define __get_user_unaligned __get_user 119 #define __put_user_unaligned __put_user 120 121 extern long __put_user_bad(void); 122 123 /* 124 * We don't tell gcc that we are accessing memory, but this is OK 125 * because we do not write to any memory gcc knows about, so there 126 * are no aliasing issues. 127 */ 128 #define __put_user_asm(x, addr, err, op) \ 129 __asm__ __volatile__( \ 130 "1: " op " %1,0(%2) # put_user\n" \ 131 "2:\n" \ 132 ".section .fixup,\"ax\"\n" \ 133 "3: li %0,%3\n" \ 134 " b 2b\n" \ 135 ".previous\n" \ 136 EX_TABLE(1b, 3b) \ 137 : "=r" (err) \ 138 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 139 140 #ifdef __powerpc64__ 141 #define __put_user_asm2(x, ptr, retval) \ 142 __put_user_asm(x, ptr, retval, "std") 143 #else /* __powerpc64__ */ 144 #define __put_user_asm2(x, addr, err) \ 145 __asm__ __volatile__( \ 146 "1: stw %1,0(%2)\n" \ 147 "2: stw %1+1,4(%2)\n" \ 148 "3:\n" \ 149 ".section .fixup,\"ax\"\n" \ 150 "4: li %0,%3\n" \ 151 " b 3b\n" \ 152 ".previous\n" \ 153 EX_TABLE(1b, 4b) \ 154 EX_TABLE(2b, 4b) \ 155 : "=r" (err) \ 156 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 157 #endif /* __powerpc64__ */ 158 159 #define __put_user_size(x, ptr, size, retval) \ 160 do { \ 161 retval = 0; \ 162 switch (size) { \ 163 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ 164 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ 165 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ 166 case 8: __put_user_asm2(x, ptr, retval); break; \ 167 default: __put_user_bad(); \ 168 } \ 169 } while (0) 170 171 #define __put_user_nocheck(x, ptr, size) \ 172 ({ \ 173 long __pu_err; \ 174 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 175 if (!is_kernel_addr((unsigned long)__pu_addr)) \ 176 might_fault(); \ 177 __chk_user_ptr(ptr); \ 178 __put_user_size((x), __pu_addr, (size), __pu_err); \ 179 __pu_err; \ 180 }) 181 182 #define __put_user_check(x, ptr, size) \ 183 ({ \ 184 long __pu_err = -EFAULT; \ 185 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 186 might_fault(); \ 187 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 188 __put_user_size((x), __pu_addr, (size), __pu_err); \ 189 __pu_err; \ 190 }) 191 192 #define __put_user_nosleep(x, ptr, size) \ 193 ({ \ 194 long __pu_err; \ 195 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 196 __chk_user_ptr(ptr); \ 197 __put_user_size((x), __pu_addr, (size), __pu_err); \ 198 __pu_err; \ 199 }) 200 201 202 extern long __get_user_bad(void); 203 204 #define __get_user_asm(x, addr, err, op) \ 205 __asm__ __volatile__( \ 206 "1: "op" %1,0(%2) # get_user\n" \ 207 "2:\n" \ 208 ".section .fixup,\"ax\"\n" \ 209 "3: li %0,%3\n" \ 210 " li %1,0\n" \ 211 " b 2b\n" \ 212 ".previous\n" \ 213 EX_TABLE(1b, 3b) \ 214 : "=r" (err), "=r" (x) \ 215 : "b" (addr), "i" (-EFAULT), "0" (err)) 216 217 #ifdef __powerpc64__ 218 #define __get_user_asm2(x, addr, err) \ 219 __get_user_asm(x, addr, err, "ld") 220 #else /* __powerpc64__ */ 221 #define __get_user_asm2(x, addr, err) \ 222 __asm__ __volatile__( \ 223 "1: lwz %1,0(%2)\n" \ 224 "2: lwz %1+1,4(%2)\n" \ 225 "3:\n" \ 226 ".section .fixup,\"ax\"\n" \ 227 "4: li %0,%3\n" \ 228 " li %1,0\n" \ 229 " li %1+1,0\n" \ 230 " b 3b\n" \ 231 ".previous\n" \ 232 EX_TABLE(1b, 4b) \ 233 EX_TABLE(2b, 4b) \ 234 : "=r" (err), "=&r" (x) \ 235 : "b" (addr), "i" (-EFAULT), "0" (err)) 236 #endif /* __powerpc64__ */ 237 238 #define __get_user_size(x, ptr, size, retval) \ 239 do { \ 240 retval = 0; \ 241 __chk_user_ptr(ptr); \ 242 if (size > sizeof(x)) \ 243 (x) = __get_user_bad(); \ 244 switch (size) { \ 245 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ 246 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ 247 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ 248 case 8: __get_user_asm2(x, ptr, retval); break; \ 249 default: (x) = __get_user_bad(); \ 250 } \ 251 } while (0) 252 253 #define __get_user_nocheck(x, ptr, size) \ 254 ({ \ 255 long __gu_err; \ 256 unsigned long __gu_val; \ 257 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 258 __chk_user_ptr(ptr); \ 259 if (!is_kernel_addr((unsigned long)__gu_addr)) \ 260 might_fault(); \ 261 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 262 (x) = (__typeof__(*(ptr)))__gu_val; \ 263 __gu_err; \ 264 }) 265 266 #define __get_user_check(x, ptr, size) \ 267 ({ \ 268 long __gu_err = -EFAULT; \ 269 unsigned long __gu_val = 0; \ 270 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 271 might_fault(); \ 272 if (access_ok(VERIFY_READ, __gu_addr, (size))) \ 273 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 274 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 275 __gu_err; \ 276 }) 277 278 #define __get_user_nosleep(x, ptr, size) \ 279 ({ \ 280 long __gu_err; \ 281 unsigned long __gu_val; \ 282 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 283 __chk_user_ptr(ptr); \ 284 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 285 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 286 __gu_err; \ 287 }) 288 289 290 /* more complex routines */ 291 292 extern unsigned long __copy_tofrom_user(void __user *to, 293 const void __user *from, unsigned long size); 294 295 #ifndef __powerpc64__ 296 297 static inline unsigned long copy_from_user(void *to, 298 const void __user *from, unsigned long n) 299 { 300 if (likely(access_ok(VERIFY_READ, from, n))) { 301 check_object_size(to, n, false); 302 return __copy_tofrom_user((__force void __user *)to, from, n); 303 } 304 memset(to, 0, n); 305 return n; 306 } 307 308 static inline unsigned long copy_to_user(void __user *to, 309 const void *from, unsigned long n) 310 { 311 if (access_ok(VERIFY_WRITE, to, n)) { 312 check_object_size(from, n, true); 313 return __copy_tofrom_user(to, (__force void __user *)from, n); 314 } 315 return n; 316 } 317 318 #else /* __powerpc64__ */ 319 320 #define __copy_in_user(to, from, size) \ 321 __copy_tofrom_user((to), (from), (size)) 322 323 extern unsigned long copy_from_user(void *to, const void __user *from, 324 unsigned long n); 325 extern unsigned long copy_to_user(void __user *to, const void *from, 326 unsigned long n); 327 extern unsigned long copy_in_user(void __user *to, const void __user *from, 328 unsigned long n); 329 330 #endif /* __powerpc64__ */ 331 332 static inline unsigned long __copy_from_user_inatomic(void *to, 333 const void __user *from, unsigned long n) 334 { 335 if (__builtin_constant_p(n) && (n <= 8)) { 336 unsigned long ret = 1; 337 338 switch (n) { 339 case 1: 340 __get_user_size(*(u8 *)to, from, 1, ret); 341 break; 342 case 2: 343 __get_user_size(*(u16 *)to, from, 2, ret); 344 break; 345 case 4: 346 __get_user_size(*(u32 *)to, from, 4, ret); 347 break; 348 case 8: 349 __get_user_size(*(u64 *)to, from, 8, ret); 350 break; 351 } 352 if (ret == 0) 353 return 0; 354 } 355 356 check_object_size(to, n, false); 357 358 return __copy_tofrom_user((__force void __user *)to, from, n); 359 } 360 361 static inline unsigned long __copy_to_user_inatomic(void __user *to, 362 const void *from, unsigned long n) 363 { 364 if (__builtin_constant_p(n) && (n <= 8)) { 365 unsigned long ret = 1; 366 367 switch (n) { 368 case 1: 369 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); 370 break; 371 case 2: 372 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); 373 break; 374 case 4: 375 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); 376 break; 377 case 8: 378 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); 379 break; 380 } 381 if (ret == 0) 382 return 0; 383 } 384 385 check_object_size(from, n, true); 386 387 return __copy_tofrom_user(to, (__force const void __user *)from, n); 388 } 389 390 static inline unsigned long __copy_from_user(void *to, 391 const void __user *from, unsigned long size) 392 { 393 might_fault(); 394 return __copy_from_user_inatomic(to, from, size); 395 } 396 397 static inline unsigned long __copy_to_user(void __user *to, 398 const void *from, unsigned long size) 399 { 400 might_fault(); 401 return __copy_to_user_inatomic(to, from, size); 402 } 403 404 extern unsigned long __clear_user(void __user *addr, unsigned long size); 405 406 static inline unsigned long clear_user(void __user *addr, unsigned long size) 407 { 408 might_fault(); 409 if (likely(access_ok(VERIFY_WRITE, addr, size))) 410 return __clear_user(addr, size); 411 return size; 412 } 413 414 extern long strncpy_from_user(char *dst, const char __user *src, long count); 415 extern __must_check long strlen_user(const char __user *str); 416 extern __must_check long strnlen_user(const char __user *str, long n); 417 418 #endif /* __ASSEMBLY__ */ 419 #endif /* __KERNEL__ */ 420 421 #endif /* _ARCH_POWERPC_UACCESS_H */ 422