1 /* 2 * linux/kernel/compat.c 3 * 4 * Kernel compatibililty routines for e.g. 32 bit syscall support 5 * on 64 bit kernels. 6 * 7 * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/linkage.h> 15 #include <linux/compat.h> 16 #include <linux/errno.h> 17 #include <linux/time.h> 18 #include <linux/signal.h> 19 #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ 20 #include <linux/syscalls.h> 21 #include <linux/unistd.h> 22 #include <linux/security.h> 23 #include <linux/timex.h> 24 #include <linux/export.h> 25 #include <linux/migrate.h> 26 #include <linux/posix-timers.h> 27 #include <linux/times.h> 28 #include <linux/ptrace.h> 29 #include <linux/gfp.h> 30 31 #include <asm/uaccess.h> 32 33 /* 34 * Note that the native side is already converted to a timespec, because 35 * that's what we want anyway. 36 */ 37 static int compat_get_timeval(struct timespec *o, 38 struct compat_timeval __user *i) 39 { 40 long usec; 41 42 if (get_user(o->tv_sec, &i->tv_sec) || 43 get_user(usec, &i->tv_usec)) 44 return -EFAULT; 45 o->tv_nsec = usec * 1000; 46 return 0; 47 } 48 49 static int compat_put_timeval(struct compat_timeval __user *o, 50 struct timeval *i) 51 { 52 return (put_user(i->tv_sec, &o->tv_sec) || 53 put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; 54 } 55 56 static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) 57 { 58 memset(txc, 0, sizeof(struct timex)); 59 60 if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || 61 __get_user(txc->modes, &utp->modes) || 62 __get_user(txc->offset, &utp->offset) || 63 __get_user(txc->freq, &utp->freq) || 64 __get_user(txc->maxerror, &utp->maxerror) || 65 __get_user(txc->esterror, &utp->esterror) || 66 __get_user(txc->status, &utp->status) || 67 __get_user(txc->constant, &utp->constant) || 68 __get_user(txc->precision, &utp->precision) || 69 __get_user(txc->tolerance, &utp->tolerance) || 70 __get_user(txc->time.tv_sec, &utp->time.tv_sec) || 71 __get_user(txc->time.tv_usec, &utp->time.tv_usec) || 72 __get_user(txc->tick, &utp->tick) || 73 __get_user(txc->ppsfreq, &utp->ppsfreq) || 74 __get_user(txc->jitter, &utp->jitter) || 75 __get_user(txc->shift, &utp->shift) || 76 __get_user(txc->stabil, &utp->stabil) || 77 __get_user(txc->jitcnt, &utp->jitcnt) || 78 __get_user(txc->calcnt, &utp->calcnt) || 79 __get_user(txc->errcnt, &utp->errcnt) || 80 __get_user(txc->stbcnt, &utp->stbcnt)) 81 return -EFAULT; 82 83 return 0; 84 } 85 86 static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc) 87 { 88 if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || 89 __put_user(txc->modes, &utp->modes) || 90 __put_user(txc->offset, &utp->offset) || 91 __put_user(txc->freq, &utp->freq) || 92 __put_user(txc->maxerror, &utp->maxerror) || 93 __put_user(txc->esterror, &utp->esterror) || 94 __put_user(txc->status, &utp->status) || 95 __put_user(txc->constant, &utp->constant) || 96 __put_user(txc->precision, &utp->precision) || 97 __put_user(txc->tolerance, &utp->tolerance) || 98 __put_user(txc->time.tv_sec, &utp->time.tv_sec) || 99 __put_user(txc->time.tv_usec, &utp->time.tv_usec) || 100 __put_user(txc->tick, &utp->tick) || 101 __put_user(txc->ppsfreq, &utp->ppsfreq) || 102 __put_user(txc->jitter, &utp->jitter) || 103 __put_user(txc->shift, &utp->shift) || 104 __put_user(txc->stabil, &utp->stabil) || 105 __put_user(txc->jitcnt, &utp->jitcnt) || 106 __put_user(txc->calcnt, &utp->calcnt) || 107 __put_user(txc->errcnt, &utp->errcnt) || 108 __put_user(txc->stbcnt, &utp->stbcnt) || 109 __put_user(txc->tai, &utp->tai)) 110 return -EFAULT; 111 return 0; 112 } 113 114 asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, 115 struct timezone __user *tz) 116 { 117 if (tv) { 118 struct timeval ktv; 119 do_gettimeofday(&ktv); 120 if (compat_put_timeval(tv, &ktv)) 121 return -EFAULT; 122 } 123 if (tz) { 124 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) 125 return -EFAULT; 126 } 127 128 return 0; 129 } 130 131 asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, 132 struct timezone __user *tz) 133 { 134 struct timespec kts; 135 struct timezone ktz; 136 137 if (tv) { 138 if (compat_get_timeval(&kts, tv)) 139 return -EFAULT; 140 } 141 if (tz) { 142 if (copy_from_user(&ktz, tz, sizeof(ktz))) 143 return -EFAULT; 144 } 145 146 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 147 } 148 149 int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) 150 { 151 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || 152 __get_user(ts->tv_sec, &cts->tv_sec) || 153 __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; 154 } 155 156 int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) 157 { 158 return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || 159 __put_user(ts->tv_sec, &cts->tv_sec) || 160 __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; 161 } 162 EXPORT_SYMBOL_GPL(put_compat_timespec); 163 164 static long compat_nanosleep_restart(struct restart_block *restart) 165 { 166 struct compat_timespec __user *rmtp; 167 struct timespec rmt; 168 mm_segment_t oldfs; 169 long ret; 170 171 restart->nanosleep.rmtp = (struct timespec __user *) &rmt; 172 oldfs = get_fs(); 173 set_fs(KERNEL_DS); 174 ret = hrtimer_nanosleep_restart(restart); 175 set_fs(oldfs); 176 177 if (ret) { 178 rmtp = restart->nanosleep.compat_rmtp; 179 180 if (rmtp && put_compat_timespec(&rmt, rmtp)) 181 return -EFAULT; 182 } 183 184 return ret; 185 } 186 187 asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, 188 struct compat_timespec __user *rmtp) 189 { 190 struct timespec tu, rmt; 191 mm_segment_t oldfs; 192 long ret; 193 194 if (get_compat_timespec(&tu, rqtp)) 195 return -EFAULT; 196 197 if (!timespec_valid(&tu)) 198 return -EINVAL; 199 200 oldfs = get_fs(); 201 set_fs(KERNEL_DS); 202 ret = hrtimer_nanosleep(&tu, 203 rmtp ? (struct timespec __user *)&rmt : NULL, 204 HRTIMER_MODE_REL, CLOCK_MONOTONIC); 205 set_fs(oldfs); 206 207 if (ret) { 208 struct restart_block *restart 209 = ¤t_thread_info()->restart_block; 210 211 restart->fn = compat_nanosleep_restart; 212 restart->nanosleep.compat_rmtp = rmtp; 213 214 if (rmtp && put_compat_timespec(&rmt, rmtp)) 215 return -EFAULT; 216 } 217 218 return ret; 219 } 220 221 static inline long get_compat_itimerval(struct itimerval *o, 222 struct compat_itimerval __user *i) 223 { 224 return (!access_ok(VERIFY_READ, i, sizeof(*i)) || 225 (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | 226 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | 227 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | 228 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); 229 } 230 231 static inline long put_compat_itimerval(struct compat_itimerval __user *o, 232 struct itimerval *i) 233 { 234 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || 235 (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | 236 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | 237 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | 238 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); 239 } 240 241 asmlinkage long compat_sys_getitimer(int which, 242 struct compat_itimerval __user *it) 243 { 244 struct itimerval kit; 245 int error; 246 247 error = do_getitimer(which, &kit); 248 if (!error && put_compat_itimerval(it, &kit)) 249 error = -EFAULT; 250 return error; 251 } 252 253 asmlinkage long compat_sys_setitimer(int which, 254 struct compat_itimerval __user *in, 255 struct compat_itimerval __user *out) 256 { 257 struct itimerval kin, kout; 258 int error; 259 260 if (in) { 261 if (get_compat_itimerval(&kin, in)) 262 return -EFAULT; 263 } else 264 memset(&kin, 0, sizeof(kin)); 265 266 error = do_setitimer(which, &kin, out ? &kout : NULL); 267 if (error || !out) 268 return error; 269 if (put_compat_itimerval(out, &kout)) 270 return -EFAULT; 271 return 0; 272 } 273 274 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 275 { 276 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 277 } 278 279 asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) 280 { 281 if (tbuf) { 282 struct tms tms; 283 struct compat_tms tmp; 284 285 do_sys_times(&tms); 286 /* Convert our struct tms to the compat version. */ 287 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 288 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 289 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 290 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 291 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 292 return -EFAULT; 293 } 294 force_successful_syscall_return(); 295 return compat_jiffies_to_clock_t(jiffies); 296 } 297 298 #ifdef __ARCH_WANT_SYS_SIGPENDING 299 300 /* 301 * Assumption: old_sigset_t and compat_old_sigset_t are both 302 * types that can be passed to put_user()/get_user(). 303 */ 304 305 asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) 306 { 307 old_sigset_t s; 308 long ret; 309 mm_segment_t old_fs = get_fs(); 310 311 set_fs(KERNEL_DS); 312 ret = sys_sigpending((old_sigset_t __user *) &s); 313 set_fs(old_fs); 314 if (ret == 0) 315 ret = put_user(s, set); 316 return ret; 317 } 318 319 #endif 320 321 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 322 323 asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, 324 compat_old_sigset_t __user *oset) 325 { 326 old_sigset_t s; 327 long ret; 328 mm_segment_t old_fs; 329 330 if (set && get_user(s, set)) 331 return -EFAULT; 332 old_fs = get_fs(); 333 set_fs(KERNEL_DS); 334 ret = sys_sigprocmask(how, 335 set ? (old_sigset_t __user *) &s : NULL, 336 oset ? (old_sigset_t __user *) &s : NULL); 337 set_fs(old_fs); 338 if (ret == 0) 339 if (oset) 340 ret = put_user(s, oset); 341 return ret; 342 } 343 344 #endif 345 346 asmlinkage long compat_sys_setrlimit(unsigned int resource, 347 struct compat_rlimit __user *rlim) 348 { 349 struct rlimit r; 350 351 if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || 352 __get_user(r.rlim_cur, &rlim->rlim_cur) || 353 __get_user(r.rlim_max, &rlim->rlim_max)) 354 return -EFAULT; 355 356 if (r.rlim_cur == COMPAT_RLIM_INFINITY) 357 r.rlim_cur = RLIM_INFINITY; 358 if (r.rlim_max == COMPAT_RLIM_INFINITY) 359 r.rlim_max = RLIM_INFINITY; 360 return do_prlimit(current, resource, &r, NULL); 361 } 362 363 #ifdef COMPAT_RLIM_OLD_INFINITY 364 365 asmlinkage long compat_sys_old_getrlimit(unsigned int resource, 366 struct compat_rlimit __user *rlim) 367 { 368 struct rlimit r; 369 int ret; 370 mm_segment_t old_fs = get_fs(); 371 372 set_fs(KERNEL_DS); 373 ret = sys_old_getrlimit(resource, &r); 374 set_fs(old_fs); 375 376 if (!ret) { 377 if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY) 378 r.rlim_cur = COMPAT_RLIM_INFINITY; 379 if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY) 380 r.rlim_max = COMPAT_RLIM_INFINITY; 381 382 if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || 383 __put_user(r.rlim_cur, &rlim->rlim_cur) || 384 __put_user(r.rlim_max, &rlim->rlim_max)) 385 return -EFAULT; 386 } 387 return ret; 388 } 389 390 #endif 391 392 asmlinkage long compat_sys_getrlimit(unsigned int resource, 393 struct compat_rlimit __user *rlim) 394 { 395 struct rlimit r; 396 int ret; 397 398 ret = do_prlimit(current, resource, NULL, &r); 399 if (!ret) { 400 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 401 r.rlim_cur = COMPAT_RLIM_INFINITY; 402 if (r.rlim_max > COMPAT_RLIM_INFINITY) 403 r.rlim_max = COMPAT_RLIM_INFINITY; 404 405 if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || 406 __put_user(r.rlim_cur, &rlim->rlim_cur) || 407 __put_user(r.rlim_max, &rlim->rlim_max)) 408 return -EFAULT; 409 } 410 return ret; 411 } 412 413 int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru) 414 { 415 if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) || 416 __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) || 417 __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) || 418 __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) || 419 __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) || 420 __put_user(r->ru_maxrss, &ru->ru_maxrss) || 421 __put_user(r->ru_ixrss, &ru->ru_ixrss) || 422 __put_user(r->ru_idrss, &ru->ru_idrss) || 423 __put_user(r->ru_isrss, &ru->ru_isrss) || 424 __put_user(r->ru_minflt, &ru->ru_minflt) || 425 __put_user(r->ru_majflt, &ru->ru_majflt) || 426 __put_user(r->ru_nswap, &ru->ru_nswap) || 427 __put_user(r->ru_inblock, &ru->ru_inblock) || 428 __put_user(r->ru_oublock, &ru->ru_oublock) || 429 __put_user(r->ru_msgsnd, &ru->ru_msgsnd) || 430 __put_user(r->ru_msgrcv, &ru->ru_msgrcv) || 431 __put_user(r->ru_nsignals, &ru->ru_nsignals) || 432 __put_user(r->ru_nvcsw, &ru->ru_nvcsw) || 433 __put_user(r->ru_nivcsw, &ru->ru_nivcsw)) 434 return -EFAULT; 435 return 0; 436 } 437 438 asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) 439 { 440 struct rusage r; 441 int ret; 442 mm_segment_t old_fs = get_fs(); 443 444 set_fs(KERNEL_DS); 445 ret = sys_getrusage(who, (struct rusage __user *) &r); 446 set_fs(old_fs); 447 448 if (ret) 449 return ret; 450 451 if (put_compat_rusage(&r, ru)) 452 return -EFAULT; 453 454 return 0; 455 } 456 457 asmlinkage long 458 compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, 459 struct compat_rusage __user *ru) 460 { 461 if (!ru) { 462 return sys_wait4(pid, stat_addr, options, NULL); 463 } else { 464 struct rusage r; 465 int ret; 466 unsigned int status; 467 mm_segment_t old_fs = get_fs(); 468 469 set_fs (KERNEL_DS); 470 ret = sys_wait4(pid, 471 (stat_addr ? 472 (unsigned int __user *) &status : NULL), 473 options, (struct rusage __user *) &r); 474 set_fs (old_fs); 475 476 if (ret > 0) { 477 if (put_compat_rusage(&r, ru)) 478 return -EFAULT; 479 if (stat_addr && put_user(status, stat_addr)) 480 return -EFAULT; 481 } 482 return ret; 483 } 484 } 485 486 asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, 487 struct compat_siginfo __user *uinfo, int options, 488 struct compat_rusage __user *uru) 489 { 490 siginfo_t info; 491 struct rusage ru; 492 long ret; 493 mm_segment_t old_fs = get_fs(); 494 495 memset(&info, 0, sizeof(info)); 496 497 set_fs(KERNEL_DS); 498 ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, 499 uru ? (struct rusage __user *)&ru : NULL); 500 set_fs(old_fs); 501 502 if ((ret < 0) || (info.si_signo == 0)) 503 return ret; 504 505 if (uru) { 506 ret = put_compat_rusage(&ru, uru); 507 if (ret) 508 return ret; 509 } 510 511 BUG_ON(info.si_code & __SI_MASK); 512 info.si_code |= __SI_CHLD; 513 return copy_siginfo_to_user32(uinfo, &info); 514 } 515 516 static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, 517 unsigned len, struct cpumask *new_mask) 518 { 519 unsigned long *k; 520 521 if (len < cpumask_size()) 522 memset(new_mask, 0, cpumask_size()); 523 else if (len > cpumask_size()) 524 len = cpumask_size(); 525 526 k = cpumask_bits(new_mask); 527 return compat_get_bitmap(k, user_mask_ptr, len * 8); 528 } 529 530 asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, 531 unsigned int len, 532 compat_ulong_t __user *user_mask_ptr) 533 { 534 cpumask_var_t new_mask; 535 int retval; 536 537 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 538 return -ENOMEM; 539 540 retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); 541 if (retval) 542 goto out; 543 544 retval = sched_setaffinity(pid, new_mask); 545 out: 546 free_cpumask_var(new_mask); 547 return retval; 548 } 549 550 asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, 551 compat_ulong_t __user *user_mask_ptr) 552 { 553 int ret; 554 cpumask_var_t mask; 555 556 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 557 return -EINVAL; 558 if (len & (sizeof(compat_ulong_t)-1)) 559 return -EINVAL; 560 561 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 562 return -ENOMEM; 563 564 ret = sched_getaffinity(pid, mask); 565 if (ret == 0) { 566 size_t retlen = min_t(size_t, len, cpumask_size()); 567 568 if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) 569 ret = -EFAULT; 570 else 571 ret = retlen; 572 } 573 free_cpumask_var(mask); 574 575 return ret; 576 } 577 578 int get_compat_itimerspec(struct itimerspec *dst, 579 const struct compat_itimerspec __user *src) 580 { 581 if (get_compat_timespec(&dst->it_interval, &src->it_interval) || 582 get_compat_timespec(&dst->it_value, &src->it_value)) 583 return -EFAULT; 584 return 0; 585 } 586 587 int put_compat_itimerspec(struct compat_itimerspec __user *dst, 588 const struct itimerspec *src) 589 { 590 if (put_compat_timespec(&src->it_interval, &dst->it_interval) || 591 put_compat_timespec(&src->it_value, &dst->it_value)) 592 return -EFAULT; 593 return 0; 594 } 595 596 long compat_sys_timer_create(clockid_t which_clock, 597 struct compat_sigevent __user *timer_event_spec, 598 timer_t __user *created_timer_id) 599 { 600 struct sigevent __user *event = NULL; 601 602 if (timer_event_spec) { 603 struct sigevent kevent; 604 605 event = compat_alloc_user_space(sizeof(*event)); 606 if (get_compat_sigevent(&kevent, timer_event_spec) || 607 copy_to_user(event, &kevent, sizeof(*event))) 608 return -EFAULT; 609 } 610 611 return sys_timer_create(which_clock, event, created_timer_id); 612 } 613 614 long compat_sys_timer_settime(timer_t timer_id, int flags, 615 struct compat_itimerspec __user *new, 616 struct compat_itimerspec __user *old) 617 { 618 long err; 619 mm_segment_t oldfs; 620 struct itimerspec newts, oldts; 621 622 if (!new) 623 return -EINVAL; 624 if (get_compat_itimerspec(&newts, new)) 625 return -EFAULT; 626 oldfs = get_fs(); 627 set_fs(KERNEL_DS); 628 err = sys_timer_settime(timer_id, flags, 629 (struct itimerspec __user *) &newts, 630 (struct itimerspec __user *) &oldts); 631 set_fs(oldfs); 632 if (!err && old && put_compat_itimerspec(old, &oldts)) 633 return -EFAULT; 634 return err; 635 } 636 637 long compat_sys_timer_gettime(timer_t timer_id, 638 struct compat_itimerspec __user *setting) 639 { 640 long err; 641 mm_segment_t oldfs; 642 struct itimerspec ts; 643 644 oldfs = get_fs(); 645 set_fs(KERNEL_DS); 646 err = sys_timer_gettime(timer_id, 647 (struct itimerspec __user *) &ts); 648 set_fs(oldfs); 649 if (!err && put_compat_itimerspec(setting, &ts)) 650 return -EFAULT; 651 return err; 652 } 653 654 long compat_sys_clock_settime(clockid_t which_clock, 655 struct compat_timespec __user *tp) 656 { 657 long err; 658 mm_segment_t oldfs; 659 struct timespec ts; 660 661 if (get_compat_timespec(&ts, tp)) 662 return -EFAULT; 663 oldfs = get_fs(); 664 set_fs(KERNEL_DS); 665 err = sys_clock_settime(which_clock, 666 (struct timespec __user *) &ts); 667 set_fs(oldfs); 668 return err; 669 } 670 671 long compat_sys_clock_gettime(clockid_t which_clock, 672 struct compat_timespec __user *tp) 673 { 674 long err; 675 mm_segment_t oldfs; 676 struct timespec ts; 677 678 oldfs = get_fs(); 679 set_fs(KERNEL_DS); 680 err = sys_clock_gettime(which_clock, 681 (struct timespec __user *) &ts); 682 set_fs(oldfs); 683 if (!err && put_compat_timespec(&ts, tp)) 684 return -EFAULT; 685 return err; 686 } 687 688 long compat_sys_clock_adjtime(clockid_t which_clock, 689 struct compat_timex __user *utp) 690 { 691 struct timex txc; 692 mm_segment_t oldfs; 693 int err, ret; 694 695 err = compat_get_timex(&txc, utp); 696 if (err) 697 return err; 698 699 oldfs = get_fs(); 700 set_fs(KERNEL_DS); 701 ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); 702 set_fs(oldfs); 703 704 err = compat_put_timex(utp, &txc); 705 if (err) 706 return err; 707 708 return ret; 709 } 710 711 long compat_sys_clock_getres(clockid_t which_clock, 712 struct compat_timespec __user *tp) 713 { 714 long err; 715 mm_segment_t oldfs; 716 struct timespec ts; 717 718 oldfs = get_fs(); 719 set_fs(KERNEL_DS); 720 err = sys_clock_getres(which_clock, 721 (struct timespec __user *) &ts); 722 set_fs(oldfs); 723 if (!err && tp && put_compat_timespec(&ts, tp)) 724 return -EFAULT; 725 return err; 726 } 727 728 static long compat_clock_nanosleep_restart(struct restart_block *restart) 729 { 730 long err; 731 mm_segment_t oldfs; 732 struct timespec tu; 733 struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; 734 735 restart->nanosleep.rmtp = (struct timespec __user *) &tu; 736 oldfs = get_fs(); 737 set_fs(KERNEL_DS); 738 err = clock_nanosleep_restart(restart); 739 set_fs(oldfs); 740 741 if ((err == -ERESTART_RESTARTBLOCK) && rmtp && 742 put_compat_timespec(&tu, rmtp)) 743 return -EFAULT; 744 745 if (err == -ERESTART_RESTARTBLOCK) { 746 restart->fn = compat_clock_nanosleep_restart; 747 restart->nanosleep.compat_rmtp = rmtp; 748 } 749 return err; 750 } 751 752 long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, 753 struct compat_timespec __user *rqtp, 754 struct compat_timespec __user *rmtp) 755 { 756 long err; 757 mm_segment_t oldfs; 758 struct timespec in, out; 759 struct restart_block *restart; 760 761 if (get_compat_timespec(&in, rqtp)) 762 return -EFAULT; 763 764 oldfs = get_fs(); 765 set_fs(KERNEL_DS); 766 err = sys_clock_nanosleep(which_clock, flags, 767 (struct timespec __user *) &in, 768 (struct timespec __user *) &out); 769 set_fs(oldfs); 770 771 if ((err == -ERESTART_RESTARTBLOCK) && rmtp && 772 put_compat_timespec(&out, rmtp)) 773 return -EFAULT; 774 775 if (err == -ERESTART_RESTARTBLOCK) { 776 restart = ¤t_thread_info()->restart_block; 777 restart->fn = compat_clock_nanosleep_restart; 778 restart->nanosleep.compat_rmtp = rmtp; 779 } 780 return err; 781 } 782 783 /* 784 * We currently only need the following fields from the sigevent 785 * structure: sigev_value, sigev_signo, sig_notify and (sometimes 786 * sigev_notify_thread_id). The others are handled in user mode. 787 * We also assume that copying sigev_value.sival_int is sufficient 788 * to keep all the bits of sigev_value.sival_ptr intact. 789 */ 790 int get_compat_sigevent(struct sigevent *event, 791 const struct compat_sigevent __user *u_event) 792 { 793 memset(event, 0, sizeof(*event)); 794 return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || 795 __get_user(event->sigev_value.sival_int, 796 &u_event->sigev_value.sival_int) || 797 __get_user(event->sigev_signo, &u_event->sigev_signo) || 798 __get_user(event->sigev_notify, &u_event->sigev_notify) || 799 __get_user(event->sigev_notify_thread_id, 800 &u_event->sigev_notify_thread_id)) 801 ? -EFAULT : 0; 802 } 803 804 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, 805 unsigned long bitmap_size) 806 { 807 int i, j; 808 unsigned long m; 809 compat_ulong_t um; 810 unsigned long nr_compat_longs; 811 812 /* align bitmap up to nearest compat_long_t boundary */ 813 bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); 814 815 if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) 816 return -EFAULT; 817 818 nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); 819 820 for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { 821 m = 0; 822 823 for (j = 0; j < sizeof(m)/sizeof(um); j++) { 824 /* 825 * We dont want to read past the end of the userspace 826 * bitmap. We must however ensure the end of the 827 * kernel bitmap is zeroed. 828 */ 829 if (nr_compat_longs-- > 0) { 830 if (__get_user(um, umask)) 831 return -EFAULT; 832 } else { 833 um = 0; 834 } 835 836 umask++; 837 m |= (long)um << (j * BITS_PER_COMPAT_LONG); 838 } 839 *mask++ = m; 840 } 841 842 return 0; 843 } 844 845 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, 846 unsigned long bitmap_size) 847 { 848 int i, j; 849 unsigned long m; 850 compat_ulong_t um; 851 unsigned long nr_compat_longs; 852 853 /* align bitmap up to nearest compat_long_t boundary */ 854 bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); 855 856 if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) 857 return -EFAULT; 858 859 nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); 860 861 for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { 862 m = *mask++; 863 864 for (j = 0; j < sizeof(m)/sizeof(um); j++) { 865 um = m; 866 867 /* 868 * We dont want to write past the end of the userspace 869 * bitmap. 870 */ 871 if (nr_compat_longs-- > 0) { 872 if (__put_user(um, umask)) 873 return -EFAULT; 874 } 875 876 umask++; 877 m >>= 4*sizeof(um); 878 m >>= 4*sizeof(um); 879 } 880 } 881 882 return 0; 883 } 884 885 void 886 sigset_from_compat (sigset_t *set, compat_sigset_t *compat) 887 { 888 switch (_NSIG_WORDS) { 889 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); 890 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); 891 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); 892 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); 893 } 894 } 895 EXPORT_SYMBOL_GPL(sigset_from_compat); 896 897 asmlinkage long 898 compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, 899 struct compat_siginfo __user *uinfo, 900 struct compat_timespec __user *uts, compat_size_t sigsetsize) 901 { 902 compat_sigset_t s32; 903 sigset_t s; 904 struct timespec t; 905 siginfo_t info; 906 long ret; 907 908 if (sigsetsize != sizeof(sigset_t)) 909 return -EINVAL; 910 911 if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) 912 return -EFAULT; 913 sigset_from_compat(&s, &s32); 914 915 if (uts) { 916 if (get_compat_timespec(&t, uts)) 917 return -EFAULT; 918 } 919 920 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 921 922 if (ret > 0 && uinfo) { 923 if (copy_siginfo_to_user32(uinfo, &info)) 924 ret = -EFAULT; 925 } 926 927 return ret; 928 929 } 930 931 asmlinkage long 932 compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, 933 struct compat_siginfo __user *uinfo) 934 { 935 siginfo_t info; 936 937 if (copy_siginfo_from_user32(&info, uinfo)) 938 return -EFAULT; 939 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 940 } 941 942 #ifdef __ARCH_WANT_COMPAT_SYS_TIME 943 944 /* compat_time_t is a 32 bit "long" and needs to get converted. */ 945 946 asmlinkage long compat_sys_time(compat_time_t __user * tloc) 947 { 948 compat_time_t i; 949 struct timeval tv; 950 951 do_gettimeofday(&tv); 952 i = tv.tv_sec; 953 954 if (tloc) { 955 if (put_user(i,tloc)) 956 return -EFAULT; 957 } 958 force_successful_syscall_return(); 959 return i; 960 } 961 962 asmlinkage long compat_sys_stime(compat_time_t __user *tptr) 963 { 964 struct timespec tv; 965 int err; 966 967 if (get_user(tv.tv_sec, tptr)) 968 return -EFAULT; 969 970 tv.tv_nsec = 0; 971 972 err = security_settime(&tv, NULL); 973 if (err) 974 return err; 975 976 do_settimeofday(&tv); 977 return 0; 978 } 979 980 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ 981 982 #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND 983 asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize) 984 { 985 sigset_t newset; 986 compat_sigset_t newset32; 987 988 /* XXX: Don't preclude handling different sized sigset_t's. */ 989 if (sigsetsize != sizeof(sigset_t)) 990 return -EINVAL; 991 992 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) 993 return -EFAULT; 994 sigset_from_compat(&newset, &newset32); 995 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 996 997 current->saved_sigmask = current->blocked; 998 set_current_blocked(&newset); 999 1000 current->state = TASK_INTERRUPTIBLE; 1001 schedule(); 1002 set_restore_sigmask(); 1003 return -ERESTARTNOHAND; 1004 } 1005 #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ 1006 1007 asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) 1008 { 1009 struct timex txc; 1010 int err, ret; 1011 1012 err = compat_get_timex(&txc, utp); 1013 if (err) 1014 return err; 1015 1016 ret = do_adjtimex(&txc); 1017 1018 err = compat_put_timex(utp, &txc); 1019 if (err) 1020 return err; 1021 1022 return ret; 1023 } 1024 1025 #ifdef CONFIG_NUMA 1026 asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, 1027 compat_uptr_t __user *pages32, 1028 const int __user *nodes, 1029 int __user *status, 1030 int flags) 1031 { 1032 const void __user * __user *pages; 1033 int i; 1034 1035 pages = compat_alloc_user_space(nr_pages * sizeof(void *)); 1036 for (i = 0; i < nr_pages; i++) { 1037 compat_uptr_t p; 1038 1039 if (get_user(p, pages32 + i) || 1040 put_user(compat_ptr(p), pages + i)) 1041 return -EFAULT; 1042 } 1043 return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); 1044 } 1045 1046 asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, 1047 compat_ulong_t maxnode, 1048 const compat_ulong_t __user *old_nodes, 1049 const compat_ulong_t __user *new_nodes) 1050 { 1051 unsigned long __user *old = NULL; 1052 unsigned long __user *new = NULL; 1053 nodemask_t tmp_mask; 1054 unsigned long nr_bits; 1055 unsigned long size; 1056 1057 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1058 size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1059 if (old_nodes) { 1060 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1061 return -EFAULT; 1062 old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1063 if (new_nodes) 1064 new = old + size / sizeof(unsigned long); 1065 if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1066 return -EFAULT; 1067 } 1068 if (new_nodes) { 1069 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1070 return -EFAULT; 1071 if (new == NULL) 1072 new = compat_alloc_user_space(size); 1073 if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1074 return -EFAULT; 1075 } 1076 return sys_migrate_pages(pid, nr_bits + 1, old, new); 1077 } 1078 #endif 1079 1080 struct compat_sysinfo { 1081 s32 uptime; 1082 u32 loads[3]; 1083 u32 totalram; 1084 u32 freeram; 1085 u32 sharedram; 1086 u32 bufferram; 1087 u32 totalswap; 1088 u32 freeswap; 1089 u16 procs; 1090 u16 pad; 1091 u32 totalhigh; 1092 u32 freehigh; 1093 u32 mem_unit; 1094 char _f[20-2*sizeof(u32)-sizeof(int)]; 1095 }; 1096 1097 asmlinkage long 1098 compat_sys_sysinfo(struct compat_sysinfo __user *info) 1099 { 1100 struct sysinfo s; 1101 1102 do_sysinfo(&s); 1103 1104 /* Check to see if any memory value is too large for 32-bit and scale 1105 * down if needed 1106 */ 1107 if ((s.totalram >> 32) || (s.totalswap >> 32)) { 1108 int bitcount = 0; 1109 1110 while (s.mem_unit < PAGE_SIZE) { 1111 s.mem_unit <<= 1; 1112 bitcount++; 1113 } 1114 1115 s.totalram >>= bitcount; 1116 s.freeram >>= bitcount; 1117 s.sharedram >>= bitcount; 1118 s.bufferram >>= bitcount; 1119 s.totalswap >>= bitcount; 1120 s.freeswap >>= bitcount; 1121 s.totalhigh >>= bitcount; 1122 s.freehigh >>= bitcount; 1123 } 1124 1125 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || 1126 __put_user (s.uptime, &info->uptime) || 1127 __put_user (s.loads[0], &info->loads[0]) || 1128 __put_user (s.loads[1], &info->loads[1]) || 1129 __put_user (s.loads[2], &info->loads[2]) || 1130 __put_user (s.totalram, &info->totalram) || 1131 __put_user (s.freeram, &info->freeram) || 1132 __put_user (s.sharedram, &info->sharedram) || 1133 __put_user (s.bufferram, &info->bufferram) || 1134 __put_user (s.totalswap, &info->totalswap) || 1135 __put_user (s.freeswap, &info->freeswap) || 1136 __put_user (s.procs, &info->procs) || 1137 __put_user (s.totalhigh, &info->totalhigh) || 1138 __put_user (s.freehigh, &info->freehigh) || 1139 __put_user (s.mem_unit, &info->mem_unit)) 1140 return -EFAULT; 1141 1142 return 0; 1143 } 1144 1145 /* 1146 * Allocate user-space memory for the duration of a single system call, 1147 * in order to marshall parameters inside a compat thunk. 1148 */ 1149 void __user *compat_alloc_user_space(unsigned long len) 1150 { 1151 void __user *ptr; 1152 1153 /* If len would occupy more than half of the entire compat space... */ 1154 if (unlikely(len > (((compat_uptr_t)~0) >> 1))) 1155 return NULL; 1156 1157 ptr = arch_compat_alloc_user_space(len); 1158 1159 if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) 1160 return NULL; 1161 1162 return ptr; 1163 } 1164 EXPORT_SYMBOL_GPL(compat_alloc_user_space); 1165