xref: /linux/kernel/compat.c (revision 69fb09f6ccdb2f070557fd1f4c56c4d646694c8e)
1 /*
2  *  linux/kernel/compat.c
3  *
4  *  Kernel compatibililty routines for e.g. 32 bit syscall support
5  *  on 64 bit kernels.
6  *
7  *  Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License version 2 as
11  *  published by the Free Software Foundation.
12  */
13 
14 #include <linux/linkage.h>
15 #include <linux/compat.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>	/* for MAX_SCHEDULE_TIMEOUT */
20 #include <linux/syscalls.h>
21 #include <linux/unistd.h>
22 #include <linux/security.h>
23 #include <linux/timex.h>
24 #include <linux/export.h>
25 #include <linux/migrate.h>
26 #include <linux/posix-timers.h>
27 #include <linux/times.h>
28 #include <linux/ptrace.h>
29 #include <linux/gfp.h>
30 
31 #include <linux/uaccess.h>
32 
33 int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
34 {
35 	struct compat_timex tx32;
36 
37 	if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
38 		return -EFAULT;
39 
40 	txc->modes = tx32.modes;
41 	txc->offset = tx32.offset;
42 	txc->freq = tx32.freq;
43 	txc->maxerror = tx32.maxerror;
44 	txc->esterror = tx32.esterror;
45 	txc->status = tx32.status;
46 	txc->constant = tx32.constant;
47 	txc->precision = tx32.precision;
48 	txc->tolerance = tx32.tolerance;
49 	txc->time.tv_sec = tx32.time.tv_sec;
50 	txc->time.tv_usec = tx32.time.tv_usec;
51 	txc->tick = tx32.tick;
52 	txc->ppsfreq = tx32.ppsfreq;
53 	txc->jitter = tx32.jitter;
54 	txc->shift = tx32.shift;
55 	txc->stabil = tx32.stabil;
56 	txc->jitcnt = tx32.jitcnt;
57 	txc->calcnt = tx32.calcnt;
58 	txc->errcnt = tx32.errcnt;
59 	txc->stbcnt = tx32.stbcnt;
60 
61 	return 0;
62 }
63 
64 int compat_put_timex(struct compat_timex __user *utp, const struct timex *txc)
65 {
66 	struct compat_timex tx32;
67 
68 	memset(&tx32, 0, sizeof(struct compat_timex));
69 	tx32.modes = txc->modes;
70 	tx32.offset = txc->offset;
71 	tx32.freq = txc->freq;
72 	tx32.maxerror = txc->maxerror;
73 	tx32.esterror = txc->esterror;
74 	tx32.status = txc->status;
75 	tx32.constant = txc->constant;
76 	tx32.precision = txc->precision;
77 	tx32.tolerance = txc->tolerance;
78 	tx32.time.tv_sec = txc->time.tv_sec;
79 	tx32.time.tv_usec = txc->time.tv_usec;
80 	tx32.tick = txc->tick;
81 	tx32.ppsfreq = txc->ppsfreq;
82 	tx32.jitter = txc->jitter;
83 	tx32.shift = txc->shift;
84 	tx32.stabil = txc->stabil;
85 	tx32.jitcnt = txc->jitcnt;
86 	tx32.calcnt = txc->calcnt;
87 	tx32.errcnt = txc->errcnt;
88 	tx32.stbcnt = txc->stbcnt;
89 	tx32.tai = txc->tai;
90 	if (copy_to_user(utp, &tx32, sizeof(struct compat_timex)))
91 		return -EFAULT;
92 	return 0;
93 }
94 
95 static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv)
96 {
97 	return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
98 			__get_user(tv->tv_sec, &ctv->tv_sec) ||
99 			__get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
100 }
101 
102 static int __compat_put_timeval(const struct timeval *tv, struct compat_timeval __user *ctv)
103 {
104 	return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) ||
105 			__put_user(tv->tv_sec, &ctv->tv_sec) ||
106 			__put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
107 }
108 
109 static int __compat_get_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
110 {
111 	return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
112 			__get_user(ts->tv_sec, &cts->tv_sec) ||
113 			__get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
114 }
115 
116 static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
117 {
118 	return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) ||
119 			__put_user(ts->tv_sec, &cts->tv_sec) ||
120 			__put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
121 }
122 
123 int compat_get_timeval(struct timeval *tv, const void __user *utv)
124 {
125 	if (COMPAT_USE_64BIT_TIME)
126 		return copy_from_user(tv, utv, sizeof(*tv)) ? -EFAULT : 0;
127 	else
128 		return __compat_get_timeval(tv, utv);
129 }
130 EXPORT_SYMBOL_GPL(compat_get_timeval);
131 
132 int compat_put_timeval(const struct timeval *tv, void __user *utv)
133 {
134 	if (COMPAT_USE_64BIT_TIME)
135 		return copy_to_user(utv, tv, sizeof(*tv)) ? -EFAULT : 0;
136 	else
137 		return __compat_put_timeval(tv, utv);
138 }
139 EXPORT_SYMBOL_GPL(compat_put_timeval);
140 
141 int compat_get_timespec(struct timespec *ts, const void __user *uts)
142 {
143 	if (COMPAT_USE_64BIT_TIME)
144 		return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
145 	else
146 		return __compat_get_timespec(ts, uts);
147 }
148 EXPORT_SYMBOL_GPL(compat_get_timespec);
149 
150 int compat_put_timespec(const struct timespec *ts, void __user *uts)
151 {
152 	if (COMPAT_USE_64BIT_TIME)
153 		return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
154 	else
155 		return __compat_put_timespec(ts, uts);
156 }
157 EXPORT_SYMBOL_GPL(compat_put_timespec);
158 
159 int compat_convert_timespec(struct timespec __user **kts,
160 			    const void __user *cts)
161 {
162 	struct timespec ts;
163 	struct timespec __user *uts;
164 
165 	if (!cts || COMPAT_USE_64BIT_TIME) {
166 		*kts = (struct timespec __user *)cts;
167 		return 0;
168 	}
169 
170 	uts = compat_alloc_user_space(sizeof(ts));
171 	if (!uts)
172 		return -EFAULT;
173 	if (compat_get_timespec(&ts, cts))
174 		return -EFAULT;
175 	if (copy_to_user(uts, &ts, sizeof(ts)))
176 		return -EFAULT;
177 
178 	*kts = uts;
179 	return 0;
180 }
181 
182 int get_compat_itimerval(struct itimerval *o, const struct compat_itimerval __user *i)
183 {
184 	struct compat_itimerval v32;
185 
186 	if (copy_from_user(&v32, i, sizeof(struct compat_itimerval)))
187 		return -EFAULT;
188 	o->it_interval.tv_sec = v32.it_interval.tv_sec;
189 	o->it_interval.tv_usec = v32.it_interval.tv_usec;
190 	o->it_value.tv_sec = v32.it_value.tv_sec;
191 	o->it_value.tv_usec = v32.it_value.tv_usec;
192 	return 0;
193 }
194 
195 int put_compat_itimerval(struct compat_itimerval __user *o, const struct itimerval *i)
196 {
197 	struct compat_itimerval v32;
198 
199 	v32.it_interval.tv_sec = i->it_interval.tv_sec;
200 	v32.it_interval.tv_usec = i->it_interval.tv_usec;
201 	v32.it_value.tv_sec = i->it_value.tv_sec;
202 	v32.it_value.tv_usec = i->it_value.tv_usec;
203 	return copy_to_user(o, &v32, sizeof(struct compat_itimerval)) ? -EFAULT : 0;
204 }
205 
206 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
207 {
208 	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
209 }
210 
211 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
212 {
213 	if (tbuf) {
214 		struct tms tms;
215 		struct compat_tms tmp;
216 
217 		do_sys_times(&tms);
218 		/* Convert our struct tms to the compat version. */
219 		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
220 		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
221 		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
222 		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
223 		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
224 			return -EFAULT;
225 	}
226 	force_successful_syscall_return();
227 	return compat_jiffies_to_clock_t(jiffies);
228 }
229 
230 #ifdef __ARCH_WANT_SYS_SIGPENDING
231 
232 /*
233  * Assumption: old_sigset_t and compat_old_sigset_t are both
234  * types that can be passed to put_user()/get_user().
235  */
236 
237 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
238 {
239 	old_sigset_t s;
240 	long ret;
241 	mm_segment_t old_fs = get_fs();
242 
243 	set_fs(KERNEL_DS);
244 	ret = sys_sigpending((old_sigset_t __user *) &s);
245 	set_fs(old_fs);
246 	if (ret == 0)
247 		ret = put_user(s, set);
248 	return ret;
249 }
250 
251 #endif
252 
253 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
254 
255 /*
256  * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
257  * blocked set of signals to the supplied signal set
258  */
259 static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set)
260 {
261 	memcpy(blocked->sig, &set, sizeof(set));
262 }
263 
264 COMPAT_SYSCALL_DEFINE3(sigprocmask, int, how,
265 		       compat_old_sigset_t __user *, nset,
266 		       compat_old_sigset_t __user *, oset)
267 {
268 	old_sigset_t old_set, new_set;
269 	sigset_t new_blocked;
270 
271 	old_set = current->blocked.sig[0];
272 
273 	if (nset) {
274 		if (get_user(new_set, nset))
275 			return -EFAULT;
276 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
277 
278 		new_blocked = current->blocked;
279 
280 		switch (how) {
281 		case SIG_BLOCK:
282 			sigaddsetmask(&new_blocked, new_set);
283 			break;
284 		case SIG_UNBLOCK:
285 			sigdelsetmask(&new_blocked, new_set);
286 			break;
287 		case SIG_SETMASK:
288 			compat_sig_setmask(&new_blocked, new_set);
289 			break;
290 		default:
291 			return -EINVAL;
292 		}
293 
294 		set_current_blocked(&new_blocked);
295 	}
296 
297 	if (oset) {
298 		if (put_user(old_set, oset))
299 			return -EFAULT;
300 	}
301 
302 	return 0;
303 }
304 
305 #endif
306 
307 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
308 		       struct compat_rlimit __user *, rlim)
309 {
310 	struct rlimit r;
311 
312 	if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
313 	    __get_user(r.rlim_cur, &rlim->rlim_cur) ||
314 	    __get_user(r.rlim_max, &rlim->rlim_max))
315 		return -EFAULT;
316 
317 	if (r.rlim_cur == COMPAT_RLIM_INFINITY)
318 		r.rlim_cur = RLIM_INFINITY;
319 	if (r.rlim_max == COMPAT_RLIM_INFINITY)
320 		r.rlim_max = RLIM_INFINITY;
321 	return do_prlimit(current, resource, &r, NULL);
322 }
323 
324 #ifdef COMPAT_RLIM_OLD_INFINITY
325 
326 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
327 		       struct compat_rlimit __user *, rlim)
328 {
329 	struct rlimit r;
330 	int ret;
331 	mm_segment_t old_fs = get_fs();
332 
333 	set_fs(KERNEL_DS);
334 	ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
335 	set_fs(old_fs);
336 
337 	if (!ret) {
338 		if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY)
339 			r.rlim_cur = COMPAT_RLIM_INFINITY;
340 		if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY)
341 			r.rlim_max = COMPAT_RLIM_INFINITY;
342 
343 		if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
344 		    __put_user(r.rlim_cur, &rlim->rlim_cur) ||
345 		    __put_user(r.rlim_max, &rlim->rlim_max))
346 			return -EFAULT;
347 	}
348 	return ret;
349 }
350 
351 #endif
352 
353 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
354 		       struct compat_rlimit __user *, rlim)
355 {
356 	struct rlimit r;
357 	int ret;
358 
359 	ret = do_prlimit(current, resource, NULL, &r);
360 	if (!ret) {
361 		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
362 			r.rlim_cur = COMPAT_RLIM_INFINITY;
363 		if (r.rlim_max > COMPAT_RLIM_INFINITY)
364 			r.rlim_max = COMPAT_RLIM_INFINITY;
365 
366 		if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
367 		    __put_user(r.rlim_cur, &rlim->rlim_cur) ||
368 		    __put_user(r.rlim_max, &rlim->rlim_max))
369 			return -EFAULT;
370 	}
371 	return ret;
372 }
373 
374 int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru)
375 {
376 	if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) ||
377 	    __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) ||
378 	    __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) ||
379 	    __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) ||
380 	    __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) ||
381 	    __put_user(r->ru_maxrss, &ru->ru_maxrss) ||
382 	    __put_user(r->ru_ixrss, &ru->ru_ixrss) ||
383 	    __put_user(r->ru_idrss, &ru->ru_idrss) ||
384 	    __put_user(r->ru_isrss, &ru->ru_isrss) ||
385 	    __put_user(r->ru_minflt, &ru->ru_minflt) ||
386 	    __put_user(r->ru_majflt, &ru->ru_majflt) ||
387 	    __put_user(r->ru_nswap, &ru->ru_nswap) ||
388 	    __put_user(r->ru_inblock, &ru->ru_inblock) ||
389 	    __put_user(r->ru_oublock, &ru->ru_oublock) ||
390 	    __put_user(r->ru_msgsnd, &ru->ru_msgsnd) ||
391 	    __put_user(r->ru_msgrcv, &ru->ru_msgrcv) ||
392 	    __put_user(r->ru_nsignals, &ru->ru_nsignals) ||
393 	    __put_user(r->ru_nvcsw, &ru->ru_nvcsw) ||
394 	    __put_user(r->ru_nivcsw, &ru->ru_nivcsw))
395 		return -EFAULT;
396 	return 0;
397 }
398 
399 COMPAT_SYSCALL_DEFINE4(wait4,
400 	compat_pid_t, pid,
401 	compat_uint_t __user *, stat_addr,
402 	int, options,
403 	struct compat_rusage __user *, ru)
404 {
405 	if (!ru) {
406 		return sys_wait4(pid, stat_addr, options, NULL);
407 	} else {
408 		struct rusage r;
409 		int ret;
410 		unsigned int status;
411 		mm_segment_t old_fs = get_fs();
412 
413 		set_fs (KERNEL_DS);
414 		ret = sys_wait4(pid,
415 				(stat_addr ?
416 				 (unsigned int __user *) &status : NULL),
417 				options, (struct rusage __user *) &r);
418 		set_fs (old_fs);
419 
420 		if (ret > 0) {
421 			if (put_compat_rusage(&r, ru))
422 				return -EFAULT;
423 			if (stat_addr && put_user(status, stat_addr))
424 				return -EFAULT;
425 		}
426 		return ret;
427 	}
428 }
429 
430 COMPAT_SYSCALL_DEFINE5(waitid,
431 		int, which, compat_pid_t, pid,
432 		struct compat_siginfo __user *, uinfo, int, options,
433 		struct compat_rusage __user *, uru)
434 {
435 	siginfo_t info;
436 	struct rusage ru;
437 	long ret;
438 	mm_segment_t old_fs = get_fs();
439 
440 	memset(&info, 0, sizeof(info));
441 
442 	set_fs(KERNEL_DS);
443 	ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
444 			 uru ? (struct rusage __user *)&ru : NULL);
445 	set_fs(old_fs);
446 
447 	if ((ret < 0) || (info.si_signo == 0))
448 		return ret;
449 
450 	if (uru) {
451 		/* sys_waitid() overwrites everything in ru */
452 		if (COMPAT_USE_64BIT_TIME)
453 			ret = copy_to_user(uru, &ru, sizeof(ru));
454 		else
455 			ret = put_compat_rusage(&ru, uru);
456 		if (ret)
457 			return -EFAULT;
458 	}
459 
460 	BUG_ON(info.si_code & __SI_MASK);
461 	info.si_code |= __SI_CHLD;
462 	return copy_siginfo_to_user32(uinfo, &info);
463 }
464 
465 static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
466 				    unsigned len, struct cpumask *new_mask)
467 {
468 	unsigned long *k;
469 
470 	if (len < cpumask_size())
471 		memset(new_mask, 0, cpumask_size());
472 	else if (len > cpumask_size())
473 		len = cpumask_size();
474 
475 	k = cpumask_bits(new_mask);
476 	return compat_get_bitmap(k, user_mask_ptr, len * 8);
477 }
478 
479 COMPAT_SYSCALL_DEFINE3(sched_setaffinity, compat_pid_t, pid,
480 		       unsigned int, len,
481 		       compat_ulong_t __user *, user_mask_ptr)
482 {
483 	cpumask_var_t new_mask;
484 	int retval;
485 
486 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
487 		return -ENOMEM;
488 
489 	retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask);
490 	if (retval)
491 		goto out;
492 
493 	retval = sched_setaffinity(pid, new_mask);
494 out:
495 	free_cpumask_var(new_mask);
496 	return retval;
497 }
498 
499 COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
500 		       compat_ulong_t __user *, user_mask_ptr)
501 {
502 	int ret;
503 	cpumask_var_t mask;
504 
505 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
506 		return -EINVAL;
507 	if (len & (sizeof(compat_ulong_t)-1))
508 		return -EINVAL;
509 
510 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
511 		return -ENOMEM;
512 
513 	ret = sched_getaffinity(pid, mask);
514 	if (ret == 0) {
515 		size_t retlen = min_t(size_t, len, cpumask_size());
516 
517 		if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
518 			ret = -EFAULT;
519 		else
520 			ret = retlen;
521 	}
522 	free_cpumask_var(mask);
523 
524 	return ret;
525 }
526 
527 int get_compat_itimerspec(struct itimerspec *dst,
528 			  const struct compat_itimerspec __user *src)
529 {
530 	if (__compat_get_timespec(&dst->it_interval, &src->it_interval) ||
531 	    __compat_get_timespec(&dst->it_value, &src->it_value))
532 		return -EFAULT;
533 	return 0;
534 }
535 
536 int put_compat_itimerspec(struct compat_itimerspec __user *dst,
537 			  const struct itimerspec *src)
538 {
539 	if (__compat_put_timespec(&src->it_interval, &dst->it_interval) ||
540 	    __compat_put_timespec(&src->it_value, &dst->it_value))
541 		return -EFAULT;
542 	return 0;
543 }
544 
545 /*
546  * We currently only need the following fields from the sigevent
547  * structure: sigev_value, sigev_signo, sig_notify and (sometimes
548  * sigev_notify_thread_id).  The others are handled in user mode.
549  * We also assume that copying sigev_value.sival_int is sufficient
550  * to keep all the bits of sigev_value.sival_ptr intact.
551  */
552 int get_compat_sigevent(struct sigevent *event,
553 		const struct compat_sigevent __user *u_event)
554 {
555 	memset(event, 0, sizeof(*event));
556 	return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) ||
557 		__get_user(event->sigev_value.sival_int,
558 			&u_event->sigev_value.sival_int) ||
559 		__get_user(event->sigev_signo, &u_event->sigev_signo) ||
560 		__get_user(event->sigev_notify, &u_event->sigev_notify) ||
561 		__get_user(event->sigev_notify_thread_id,
562 			&u_event->sigev_notify_thread_id))
563 		? -EFAULT : 0;
564 }
565 
566 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
567 		       unsigned long bitmap_size)
568 {
569 	int i, j;
570 	unsigned long m;
571 	compat_ulong_t um;
572 	unsigned long nr_compat_longs;
573 
574 	/* align bitmap up to nearest compat_long_t boundary */
575 	bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
576 
577 	if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
578 		return -EFAULT;
579 
580 	nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
581 
582 	for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
583 		m = 0;
584 
585 		for (j = 0; j < sizeof(m)/sizeof(um); j++) {
586 			/*
587 			 * We dont want to read past the end of the userspace
588 			 * bitmap. We must however ensure the end of the
589 			 * kernel bitmap is zeroed.
590 			 */
591 			if (nr_compat_longs) {
592 				nr_compat_longs--;
593 				if (__get_user(um, umask))
594 					return -EFAULT;
595 			} else {
596 				um = 0;
597 			}
598 
599 			umask++;
600 			m |= (long)um << (j * BITS_PER_COMPAT_LONG);
601 		}
602 		*mask++ = m;
603 	}
604 
605 	return 0;
606 }
607 
608 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
609 		       unsigned long bitmap_size)
610 {
611 	int i, j;
612 	unsigned long m;
613 	compat_ulong_t um;
614 	unsigned long nr_compat_longs;
615 
616 	/* align bitmap up to nearest compat_long_t boundary */
617 	bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
618 
619 	if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
620 		return -EFAULT;
621 
622 	nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
623 
624 	for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
625 		m = *mask++;
626 
627 		for (j = 0; j < sizeof(m)/sizeof(um); j++) {
628 			um = m;
629 
630 			/*
631 			 * We dont want to write past the end of the userspace
632 			 * bitmap.
633 			 */
634 			if (nr_compat_longs) {
635 				nr_compat_longs--;
636 				if (__put_user(um, umask))
637 					return -EFAULT;
638 			}
639 
640 			umask++;
641 			m >>= 4*sizeof(um);
642 			m >>= 4*sizeof(um);
643 		}
644 	}
645 
646 	return 0;
647 }
648 
649 void
650 sigset_from_compat(sigset_t *set, const compat_sigset_t *compat)
651 {
652 	switch (_NSIG_WORDS) {
653 	case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 );
654 	case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 );
655 	case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 );
656 	case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 );
657 	}
658 }
659 EXPORT_SYMBOL_GPL(sigset_from_compat);
660 
661 void
662 sigset_to_compat(compat_sigset_t *compat, const sigset_t *set)
663 {
664 	switch (_NSIG_WORDS) {
665 	case 4: compat->sig[7] = (set->sig[3] >> 32); compat->sig[6] = set->sig[3];
666 	case 3: compat->sig[5] = (set->sig[2] >> 32); compat->sig[4] = set->sig[2];
667 	case 2: compat->sig[3] = (set->sig[1] >> 32); compat->sig[2] = set->sig[1];
668 	case 1: compat->sig[1] = (set->sig[0] >> 32); compat->sig[0] = set->sig[0];
669 	}
670 }
671 
672 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
673 		struct compat_siginfo __user *, uinfo,
674 		struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
675 {
676 	compat_sigset_t s32;
677 	sigset_t s;
678 	struct timespec t;
679 	siginfo_t info;
680 	long ret;
681 
682 	if (sigsetsize != sizeof(sigset_t))
683 		return -EINVAL;
684 
685 	if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
686 		return -EFAULT;
687 	sigset_from_compat(&s, &s32);
688 
689 	if (uts) {
690 		if (compat_get_timespec(&t, uts))
691 			return -EFAULT;
692 	}
693 
694 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
695 
696 	if (ret > 0 && uinfo) {
697 		if (copy_siginfo_to_user32(uinfo, &info))
698 			ret = -EFAULT;
699 	}
700 
701 	return ret;
702 }
703 
704 #ifdef CONFIG_NUMA
705 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
706 		       compat_uptr_t __user *, pages32,
707 		       const int __user *, nodes,
708 		       int __user *, status,
709 		       int, flags)
710 {
711 	const void __user * __user *pages;
712 	int i;
713 
714 	pages = compat_alloc_user_space(nr_pages * sizeof(void *));
715 	for (i = 0; i < nr_pages; i++) {
716 		compat_uptr_t p;
717 
718 		if (get_user(p, pages32 + i) ||
719 			put_user(compat_ptr(p), pages + i))
720 			return -EFAULT;
721 	}
722 	return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
723 }
724 
725 COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
726 		       compat_ulong_t, maxnode,
727 		       const compat_ulong_t __user *, old_nodes,
728 		       const compat_ulong_t __user *, new_nodes)
729 {
730 	unsigned long __user *old = NULL;
731 	unsigned long __user *new = NULL;
732 	nodemask_t tmp_mask;
733 	unsigned long nr_bits;
734 	unsigned long size;
735 
736 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
737 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
738 	if (old_nodes) {
739 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
740 			return -EFAULT;
741 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
742 		if (new_nodes)
743 			new = old + size / sizeof(unsigned long);
744 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
745 			return -EFAULT;
746 	}
747 	if (new_nodes) {
748 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
749 			return -EFAULT;
750 		if (new == NULL)
751 			new = compat_alloc_user_space(size);
752 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
753 			return -EFAULT;
754 	}
755 	return sys_migrate_pages(pid, nr_bits + 1, old, new);
756 }
757 #endif
758 
759 COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
760 		       compat_pid_t, pid,
761 		       struct compat_timespec __user *, interval)
762 {
763 	struct timespec t;
764 	int ret;
765 	mm_segment_t old_fs = get_fs();
766 
767 	set_fs(KERNEL_DS);
768 	ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
769 	set_fs(old_fs);
770 	if (compat_put_timespec(&t, interval))
771 		return -EFAULT;
772 	return ret;
773 }
774 
775 /*
776  * Allocate user-space memory for the duration of a single system call,
777  * in order to marshall parameters inside a compat thunk.
778  */
779 void __user *compat_alloc_user_space(unsigned long len)
780 {
781 	void __user *ptr;
782 
783 	/* If len would occupy more than half of the entire compat space... */
784 	if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
785 		return NULL;
786 
787 	ptr = arch_compat_alloc_user_space(len);
788 
789 	if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
790 		return NULL;
791 
792 	return ptr;
793 }
794 EXPORT_SYMBOL_GPL(compat_alloc_user_space);
795