xref: /linux/fs/select.c (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains the procedures for the handling of select and poll
4  *
5  * Created for Linux based loosely upon Mathius Lattner's minix
6  * patches by Peter MacDonald. Heavily edited by Linus.
7  *
8  *  4 February 1994
9  *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
10  *     flag set in its personality we do *not* modify the given timeout
11  *     parameter to reflect time remaining.
12  *
13  *  24 January 2000
14  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
15  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
16  */
17 
18 #include <linux/compat.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/sched/rt.h>
22 #include <linux/syscalls.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/poll.h>
26 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/hrtimer.h>
32 #include <linux/freezer.h>
33 #include <net/busy_poll.h>
34 #include <linux/vmalloc.h>
35 
36 #include <linux/uaccess.h>
37 
38 
39 /*
40  * Estimate expected accuracy in ns from a timeval.
41  *
42  * After quite a bit of churning around, we've settled on
43  * a simple thing of taking 0.1% of the timeout as the
44  * slack, with a cap of 100 msec.
45  * "nice" tasks get a 0.5% slack instead.
46  *
47  * Consider this comment an open invitation to come up with even
48  * better solutions..
49  */
50 
51 #define MAX_SLACK	(100 * NSEC_PER_MSEC)
52 
53 static long __estimate_accuracy(struct timespec64 *tv)
54 {
55 	long slack;
56 	int divfactor = 1000;
57 
58 	if (tv->tv_sec < 0)
59 		return 0;
60 
61 	if (task_nice(current) > 0)
62 		divfactor = divfactor / 5;
63 
64 	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
65 		return MAX_SLACK;
66 
67 	slack = tv->tv_nsec / divfactor;
68 	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
69 
70 	if (slack > MAX_SLACK)
71 		return MAX_SLACK;
72 
73 	return slack;
74 }
75 
76 u64 select_estimate_accuracy(struct timespec64 *tv)
77 {
78 	u64 ret;
79 	struct timespec64 now;
80 	u64 slack = current->timer_slack_ns;
81 
82 	if (slack == 0)
83 		return 0;
84 
85 	ktime_get_ts64(&now);
86 	now = timespec64_sub(*tv, now);
87 	ret = __estimate_accuracy(&now);
88 	if (ret < slack)
89 		return slack;
90 	return ret;
91 }
92 
93 
94 
95 struct poll_table_page {
96 	struct poll_table_page * next;
97 	struct poll_table_entry * entry;
98 	struct poll_table_entry entries[];
99 };
100 
101 #define POLL_TABLE_FULL(table) \
102 	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
103 
104 /*
105  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
106  * I have rewritten this, taking some shortcuts: This code may not be easy to
107  * follow, but it should be free of race-conditions, and it's practical. If you
108  * understand what I'm doing here, then you understand how the linux
109  * sleep/wakeup mechanism works.
110  *
111  * Two very simple procedures, poll_wait() and poll_freewait() make all the
112  * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
113  * as all select/poll functions have to call it to add an entry to the
114  * poll table.
115  */
116 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
117 		       poll_table *p);
118 
119 void poll_initwait(struct poll_wqueues *pwq)
120 {
121 	init_poll_funcptr(&pwq->pt, __pollwait);
122 	pwq->polling_task = current;
123 	pwq->triggered = 0;
124 	pwq->error = 0;
125 	pwq->table = NULL;
126 	pwq->inline_index = 0;
127 }
128 EXPORT_SYMBOL(poll_initwait);
129 
130 static void free_poll_entry(struct poll_table_entry *entry)
131 {
132 	remove_wait_queue(entry->wait_address, &entry->wait);
133 	fput(entry->filp);
134 }
135 
136 void poll_freewait(struct poll_wqueues *pwq)
137 {
138 	struct poll_table_page * p = pwq->table;
139 	int i;
140 	for (i = 0; i < pwq->inline_index; i++)
141 		free_poll_entry(pwq->inline_entries + i);
142 	while (p) {
143 		struct poll_table_entry * entry;
144 		struct poll_table_page *old;
145 
146 		entry = p->entry;
147 		do {
148 			entry--;
149 			free_poll_entry(entry);
150 		} while (entry > p->entries);
151 		old = p;
152 		p = p->next;
153 		free_page((unsigned long) old);
154 	}
155 }
156 EXPORT_SYMBOL(poll_freewait);
157 
158 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
159 {
160 	struct poll_table_page *table = p->table;
161 
162 	if (p->inline_index < N_INLINE_POLL_ENTRIES)
163 		return p->inline_entries + p->inline_index++;
164 
165 	if (!table || POLL_TABLE_FULL(table)) {
166 		struct poll_table_page *new_table;
167 
168 		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
169 		if (!new_table) {
170 			p->error = -ENOMEM;
171 			return NULL;
172 		}
173 		new_table->entry = new_table->entries;
174 		new_table->next = table;
175 		p->table = new_table;
176 		table = new_table;
177 	}
178 
179 	return table->entry++;
180 }
181 
182 static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
183 {
184 	struct poll_wqueues *pwq = wait->private;
185 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
186 
187 	/*
188 	 * Although this function is called under waitqueue lock, LOCK
189 	 * doesn't imply write barrier and the users expect write
190 	 * barrier semantics on wakeup functions.  The following
191 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
192 	 * and is paired with smp_store_mb() in poll_schedule_timeout.
193 	 */
194 	smp_wmb();
195 	WRITE_ONCE(pwq->triggered, 1);
196 
197 	/*
198 	 * Perform the default wake up operation using a dummy
199 	 * waitqueue.
200 	 *
201 	 * TODO: This is hacky but there currently is no interface to
202 	 * pass in @sync.  @sync is scheduled to be removed and once
203 	 * that happens, wake_up_process() can be used directly.
204 	 */
205 	return default_wake_function(&dummy_wait, mode, sync, key);
206 }
207 
208 static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
209 {
210 	struct poll_table_entry *entry;
211 
212 	entry = container_of(wait, struct poll_table_entry, wait);
213 	if (key && !(key_to_poll(key) & entry->key))
214 		return 0;
215 	return __pollwake(wait, mode, sync, key);
216 }
217 
218 /* Add a new entry */
219 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
220 				poll_table *p)
221 {
222 	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
223 	struct poll_table_entry *entry = poll_get_entry(pwq);
224 	if (!entry)
225 		return;
226 	entry->filp = get_file(filp);
227 	entry->wait_address = wait_address;
228 	entry->key = p->_key;
229 	init_waitqueue_func_entry(&entry->wait, pollwake);
230 	entry->wait.private = pwq;
231 	add_wait_queue(wait_address, &entry->wait);
232 }
233 
234 static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
235 			  ktime_t *expires, unsigned long slack)
236 {
237 	int rc = -EINTR;
238 
239 	set_current_state(state);
240 	if (!READ_ONCE(pwq->triggered))
241 		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
242 	__set_current_state(TASK_RUNNING);
243 
244 	/*
245 	 * Prepare for the next iteration.
246 	 *
247 	 * The following smp_store_mb() serves two purposes.  First, it's
248 	 * the counterpart rmb of the wmb in pollwake() such that data
249 	 * written before wake up is always visible after wake up.
250 	 * Second, the full barrier guarantees that triggered clearing
251 	 * doesn't pass event check of the next iteration.  Note that
252 	 * this problem doesn't exist for the first iteration as
253 	 * add_wait_queue() has full barrier semantics.
254 	 */
255 	smp_store_mb(pwq->triggered, 0);
256 
257 	return rc;
258 }
259 
260 /**
261  * poll_select_set_timeout - helper function to setup the timeout value
262  * @to:		pointer to timespec64 variable for the final timeout
263  * @sec:	seconds (from user space)
264  * @nsec:	nanoseconds (from user space)
265  *
266  * Note, we do not use a timespec for the user space value here, That
267  * way we can use the function for timeval and compat interfaces as well.
268  *
269  * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
270  */
271 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
272 {
273 	struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
274 
275 	if (!timespec64_valid(&ts))
276 		return -EINVAL;
277 
278 	/* Optimize for the zero timeout value here */
279 	if (!sec && !nsec) {
280 		to->tv_sec = to->tv_nsec = 0;
281 	} else {
282 		ktime_get_ts64(to);
283 		*to = timespec64_add_safe(*to, ts);
284 	}
285 	return 0;
286 }
287 
288 enum poll_time_type {
289 	PT_TIMEVAL = 0,
290 	PT_OLD_TIMEVAL = 1,
291 	PT_TIMESPEC = 2,
292 	PT_OLD_TIMESPEC = 3,
293 };
294 
295 static int poll_select_finish(struct timespec64 *end_time,
296 			      void __user *p,
297 			      enum poll_time_type pt_type, int ret)
298 {
299 	struct timespec64 rts;
300 
301 	restore_saved_sigmask_unless(ret == -ERESTARTNOHAND);
302 
303 	if (!p)
304 		return ret;
305 
306 	if (current->personality & STICKY_TIMEOUTS)
307 		goto sticky;
308 
309 	/* No update for zero timeout */
310 	if (!end_time->tv_sec && !end_time->tv_nsec)
311 		return ret;
312 
313 	ktime_get_ts64(&rts);
314 	rts = timespec64_sub(*end_time, rts);
315 	if (rts.tv_sec < 0)
316 		rts.tv_sec = rts.tv_nsec = 0;
317 
318 
319 	switch (pt_type) {
320 	case PT_TIMEVAL:
321 		{
322 			struct __kernel_old_timeval rtv;
323 
324 			if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
325 				memset(&rtv, 0, sizeof(rtv));
326 			rtv.tv_sec = rts.tv_sec;
327 			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
328 			if (!copy_to_user(p, &rtv, sizeof(rtv)))
329 				return ret;
330 		}
331 		break;
332 	case PT_OLD_TIMEVAL:
333 		{
334 			struct old_timeval32 rtv;
335 
336 			rtv.tv_sec = rts.tv_sec;
337 			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
338 			if (!copy_to_user(p, &rtv, sizeof(rtv)))
339 				return ret;
340 		}
341 		break;
342 	case PT_TIMESPEC:
343 		if (!put_timespec64(&rts, p))
344 			return ret;
345 		break;
346 	case PT_OLD_TIMESPEC:
347 		if (!put_old_timespec32(&rts, p))
348 			return ret;
349 		break;
350 	default:
351 		BUG();
352 	}
353 	/*
354 	 * If an application puts its timeval in read-only memory, we
355 	 * don't want the Linux-specific update to the timeval to
356 	 * cause a fault after the select has completed
357 	 * successfully. However, because we're not updating the
358 	 * timeval, we can't restart the system call.
359 	 */
360 
361 sticky:
362 	if (ret == -ERESTARTNOHAND)
363 		ret = -EINTR;
364 	return ret;
365 }
366 
367 /*
368  * Scalable version of the fd_set.
369  */
370 
371 typedef struct {
372 	unsigned long *in, *out, *ex;
373 	unsigned long *res_in, *res_out, *res_ex;
374 } fd_set_bits;
375 
376 /*
377  * How many longwords for "nr" bits?
378  */
379 #define FDS_BITPERLONG	(8*sizeof(long))
380 #define FDS_LONGS(nr)	(((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
381 #define FDS_BYTES(nr)	(FDS_LONGS(nr)*sizeof(long))
382 
383 /*
384  * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
385  */
386 static inline
387 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
388 {
389 	nr = FDS_BYTES(nr);
390 	if (ufdset)
391 		return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
392 
393 	memset(fdset, 0, nr);
394 	return 0;
395 }
396 
397 static inline unsigned long __must_check
398 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
399 {
400 	if (ufdset)
401 		return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
402 	return 0;
403 }
404 
405 static inline
406 void zero_fd_set(unsigned long nr, unsigned long *fdset)
407 {
408 	memset(fdset, 0, FDS_BYTES(nr));
409 }
410 
411 #define FDS_IN(fds, n)		(fds->in + n)
412 #define FDS_OUT(fds, n)		(fds->out + n)
413 #define FDS_EX(fds, n)		(fds->ex + n)
414 
415 #define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
416 
417 static int max_select_fd(unsigned long n, fd_set_bits *fds)
418 {
419 	unsigned long *open_fds;
420 	unsigned long set;
421 	int max;
422 	struct fdtable *fdt;
423 
424 	/* handle last in-complete long-word first */
425 	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
426 	n /= BITS_PER_LONG;
427 	fdt = files_fdtable(current->files);
428 	open_fds = fdt->open_fds + n;
429 	max = 0;
430 	if (set) {
431 		set &= BITS(fds, n);
432 		if (set) {
433 			if (!(set & ~*open_fds))
434 				goto get_max;
435 			return -EBADF;
436 		}
437 	}
438 	while (n) {
439 		open_fds--;
440 		n--;
441 		set = BITS(fds, n);
442 		if (!set)
443 			continue;
444 		if (set & ~*open_fds)
445 			return -EBADF;
446 		if (max)
447 			continue;
448 get_max:
449 		do {
450 			max++;
451 			set >>= 1;
452 		} while (set);
453 		max += n * BITS_PER_LONG;
454 	}
455 
456 	return max;
457 }
458 
459 #define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\
460 			EPOLLNVAL)
461 #define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\
462 			 EPOLLNVAL)
463 #define POLLEX_SET (EPOLLPRI | EPOLLNVAL)
464 
465 static inline __poll_t select_poll_one(int fd, poll_table *wait, unsigned long in,
466 				unsigned long out, unsigned long bit,
467 				__poll_t ll_flag)
468 {
469 	CLASS(fd, f)(fd);
470 
471 	if (fd_empty(f))
472 		return EPOLLNVAL;
473 
474 	wait->_key = POLLEX_SET | ll_flag;
475 	if (in & bit)
476 		wait->_key |= POLLIN_SET;
477 	if (out & bit)
478 		wait->_key |= POLLOUT_SET;
479 
480 	return vfs_poll(fd_file(f), wait);
481 }
482 
483 static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
484 {
485 	ktime_t expire, *to = NULL;
486 	struct poll_wqueues table;
487 	poll_table *wait;
488 	int retval, i, timed_out = 0;
489 	u64 slack = 0;
490 	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
491 	unsigned long busy_start = 0;
492 
493 	rcu_read_lock();
494 	retval = max_select_fd(n, fds);
495 	rcu_read_unlock();
496 
497 	if (retval < 0)
498 		return retval;
499 	n = retval;
500 
501 	poll_initwait(&table);
502 	wait = &table.pt;
503 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
504 		wait->_qproc = NULL;
505 		timed_out = 1;
506 	}
507 
508 	if (end_time && !timed_out)
509 		slack = select_estimate_accuracy(end_time);
510 
511 	retval = 0;
512 	for (;;) {
513 		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
514 		bool can_busy_loop = false;
515 
516 		inp = fds->in; outp = fds->out; exp = fds->ex;
517 		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
518 
519 		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
520 			unsigned long in, out, ex, all_bits, bit = 1, j;
521 			unsigned long res_in = 0, res_out = 0, res_ex = 0;
522 			__poll_t mask;
523 
524 			in = *inp++; out = *outp++; ex = *exp++;
525 			all_bits = in | out | ex;
526 			if (all_bits == 0) {
527 				i += BITS_PER_LONG;
528 				continue;
529 			}
530 
531 			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
532 				if (i >= n)
533 					break;
534 				if (!(bit & all_bits))
535 					continue;
536 				mask = select_poll_one(i, wait, in, out, bit,
537 						       busy_flag);
538 				if ((mask & POLLIN_SET) && (in & bit)) {
539 					res_in |= bit;
540 					retval++;
541 					wait->_qproc = NULL;
542 				}
543 				if ((mask & POLLOUT_SET) && (out & bit)) {
544 					res_out |= bit;
545 					retval++;
546 					wait->_qproc = NULL;
547 				}
548 				if ((mask & POLLEX_SET) && (ex & bit)) {
549 					res_ex |= bit;
550 					retval++;
551 					wait->_qproc = NULL;
552 				}
553 				/* got something, stop busy polling */
554 				if (retval) {
555 					can_busy_loop = false;
556 					busy_flag = 0;
557 
558 				/*
559 				 * only remember a returned
560 				 * POLL_BUSY_LOOP if we asked for it
561 				 */
562 				} else if (busy_flag & mask)
563 					can_busy_loop = true;
564 
565 			}
566 			if (res_in)
567 				*rinp = res_in;
568 			if (res_out)
569 				*routp = res_out;
570 			if (res_ex)
571 				*rexp = res_ex;
572 			cond_resched();
573 		}
574 		wait->_qproc = NULL;
575 		if (retval || timed_out || signal_pending(current))
576 			break;
577 		if (table.error) {
578 			retval = table.error;
579 			break;
580 		}
581 
582 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
583 		if (can_busy_loop && !need_resched()) {
584 			if (!busy_start) {
585 				busy_start = busy_loop_current_time();
586 				continue;
587 			}
588 			if (!busy_loop_timeout(busy_start))
589 				continue;
590 		}
591 		busy_flag = 0;
592 
593 		/*
594 		 * If this is the first loop and we have a timeout
595 		 * given, then we convert to ktime_t and set the to
596 		 * pointer to the expiry value.
597 		 */
598 		if (end_time && !to) {
599 			expire = timespec64_to_ktime(*end_time);
600 			to = &expire;
601 		}
602 
603 		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
604 					   to, slack))
605 			timed_out = 1;
606 	}
607 
608 	poll_freewait(&table);
609 
610 	return retval;
611 }
612 
613 /*
614  * We can actually return ERESTARTSYS instead of EINTR, but I'd
615  * like to be certain this leads to no problems. So I return
616  * EINTR just for safety.
617  *
618  * Update: ERESTARTSYS breaks at least the xview clock binary, so
619  * I'm trying ERESTARTNOHAND which restart only when you want to.
620  */
621 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
622 			   fd_set __user *exp, struct timespec64 *end_time)
623 {
624 	fd_set_bits fds;
625 	void *bits;
626 	int ret, max_fds;
627 	size_t size, alloc_size;
628 	struct fdtable *fdt;
629 	/* Allocate small arguments on the stack to save memory and be faster */
630 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
631 
632 	ret = -EINVAL;
633 	if (unlikely(n < 0))
634 		goto out_nofds;
635 
636 	/* max_fds can increase, so grab it once to avoid race */
637 	rcu_read_lock();
638 	fdt = files_fdtable(current->files);
639 	max_fds = fdt->max_fds;
640 	rcu_read_unlock();
641 	if (n > max_fds)
642 		n = max_fds;
643 
644 	/*
645 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
646 	 * since we used fdset we need to allocate memory in units of
647 	 * long-words.
648 	 */
649 	size = FDS_BYTES(n);
650 	bits = stack_fds;
651 	if (size > sizeof(stack_fds) / 6) {
652 		/* Not enough space in on-stack array; must use kmalloc */
653 		ret = -ENOMEM;
654 		if (size > (SIZE_MAX / 6))
655 			goto out_nofds;
656 
657 		alloc_size = 6 * size;
658 		bits = kvmalloc(alloc_size, GFP_KERNEL);
659 		if (!bits)
660 			goto out_nofds;
661 	}
662 	fds.in      = bits;
663 	fds.out     = bits +   size;
664 	fds.ex      = bits + 2*size;
665 	fds.res_in  = bits + 3*size;
666 	fds.res_out = bits + 4*size;
667 	fds.res_ex  = bits + 5*size;
668 
669 	if ((ret = get_fd_set(n, inp, fds.in)) ||
670 	    (ret = get_fd_set(n, outp, fds.out)) ||
671 	    (ret = get_fd_set(n, exp, fds.ex)))
672 		goto out;
673 	zero_fd_set(n, fds.res_in);
674 	zero_fd_set(n, fds.res_out);
675 	zero_fd_set(n, fds.res_ex);
676 
677 	ret = do_select(n, &fds, end_time);
678 
679 	if (ret < 0)
680 		goto out;
681 	if (!ret) {
682 		ret = -ERESTARTNOHAND;
683 		if (signal_pending(current))
684 			goto out;
685 		ret = 0;
686 	}
687 
688 	if (set_fd_set(n, inp, fds.res_in) ||
689 	    set_fd_set(n, outp, fds.res_out) ||
690 	    set_fd_set(n, exp, fds.res_ex))
691 		ret = -EFAULT;
692 
693 out:
694 	if (bits != stack_fds)
695 		kvfree(bits);
696 out_nofds:
697 	return ret;
698 }
699 
700 static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
701 		       fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
702 {
703 	struct timespec64 end_time, *to = NULL;
704 	struct __kernel_old_timeval tv;
705 	int ret;
706 
707 	if (tvp) {
708 		if (copy_from_user(&tv, tvp, sizeof(tv)))
709 			return -EFAULT;
710 
711 		to = &end_time;
712 		if (poll_select_set_timeout(to,
713 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
714 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
715 			return -EINVAL;
716 	}
717 
718 	ret = core_sys_select(n, inp, outp, exp, to);
719 	return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret);
720 }
721 
722 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
723 		fd_set __user *, exp, struct __kernel_old_timeval __user *, tvp)
724 {
725 	return kern_select(n, inp, outp, exp, tvp);
726 }
727 
728 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
729 		       fd_set __user *exp, void __user *tsp,
730 		       const sigset_t __user *sigmask, size_t sigsetsize,
731 		       enum poll_time_type type)
732 {
733 	struct timespec64 ts, end_time, *to = NULL;
734 	int ret;
735 
736 	if (tsp) {
737 		switch (type) {
738 		case PT_TIMESPEC:
739 			if (get_timespec64(&ts, tsp))
740 				return -EFAULT;
741 			break;
742 		case PT_OLD_TIMESPEC:
743 			if (get_old_timespec32(&ts, tsp))
744 				return -EFAULT;
745 			break;
746 		default:
747 			BUG();
748 		}
749 
750 		to = &end_time;
751 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
752 			return -EINVAL;
753 	}
754 
755 	ret = set_user_sigmask(sigmask, sigsetsize);
756 	if (ret)
757 		return ret;
758 
759 	ret = core_sys_select(n, inp, outp, exp, to);
760 	return poll_select_finish(&end_time, tsp, type, ret);
761 }
762 
763 /*
764  * Most architectures can't handle 7-argument syscalls. So we provide a
765  * 6-argument version where the sixth argument is a pointer to a structure
766  * which has a pointer to the sigset_t itself followed by a size_t containing
767  * the sigset size.
768  */
769 struct sigset_argpack {
770 	sigset_t __user *p;
771 	size_t size;
772 };
773 
774 static inline int get_sigset_argpack(struct sigset_argpack *to,
775 				     struct sigset_argpack __user *from)
776 {
777 	// the path is hot enough for overhead of copy_from_user() to matter
778 	if (from) {
779 		scoped_user_read_access(from, Efault) {
780 			unsafe_get_user(to->p, &from->p, Efault);
781 			unsafe_get_user(to->size, &from->size, Efault);
782 		}
783 	}
784 	return 0;
785 Efault:
786 	return -EFAULT;
787 }
788 
789 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
790 		fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
791 		void __user *, sig)
792 {
793 	struct sigset_argpack x = {NULL, 0};
794 
795 	if (get_sigset_argpack(&x, sig))
796 		return -EFAULT;
797 
798 	return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC);
799 }
800 
801 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
802 
803 SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp,
804 		fd_set __user *, exp, struct old_timespec32 __user *, tsp,
805 		void __user *, sig)
806 {
807 	struct sigset_argpack x = {NULL, 0};
808 
809 	if (get_sigset_argpack(&x, sig))
810 		return -EFAULT;
811 
812 	return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC);
813 }
814 
815 #endif
816 
817 #ifdef __ARCH_WANT_SYS_OLD_SELECT
818 struct sel_arg_struct {
819 	unsigned long n;
820 	fd_set __user *inp, *outp, *exp;
821 	struct __kernel_old_timeval __user *tvp;
822 };
823 
824 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
825 {
826 	struct sel_arg_struct a;
827 
828 	if (copy_from_user(&a, arg, sizeof(a)))
829 		return -EFAULT;
830 	return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
831 }
832 #endif
833 
834 struct poll_list {
835 	struct poll_list *next;
836 	unsigned int len;
837 	struct pollfd entries[] __counted_by(len);
838 };
839 
840 #define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
841 
842 /*
843  * Fish for pollable events on the pollfd->fd file descriptor. We're only
844  * interested in events matching the pollfd->events mask, and the result
845  * matching that mask is both recorded in pollfd->revents and returned. The
846  * pwait poll_table will be used by the fd-provided poll handler for waiting,
847  * if pwait->_qproc is non-NULL.
848  */
849 static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
850 				     bool *can_busy_poll,
851 				     __poll_t busy_flag)
852 {
853 	int fd = pollfd->fd;
854 	__poll_t mask, filter;
855 
856 	if (unlikely(fd < 0))
857 		return 0;
858 
859 	CLASS(fd, f)(fd);
860 	if (fd_empty(f))
861 		return EPOLLNVAL;
862 
863 	/* userland u16 ->events contains POLL... bitmap */
864 	filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
865 	pwait->_key = filter | busy_flag;
866 	mask = vfs_poll(fd_file(f), pwait);
867 	if (mask & busy_flag)
868 		*can_busy_poll = true;
869 	return mask & filter;		/* Mask out unneeded events. */
870 }
871 
872 static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
873 		   struct timespec64 *end_time)
874 {
875 	poll_table* pt = &wait->pt;
876 	ktime_t expire, *to = NULL;
877 	int timed_out = 0, count = 0;
878 	u64 slack = 0;
879 	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
880 	unsigned long busy_start = 0;
881 
882 	/* Optimise the no-wait case */
883 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
884 		pt->_qproc = NULL;
885 		timed_out = 1;
886 	}
887 
888 	if (end_time && !timed_out)
889 		slack = select_estimate_accuracy(end_time);
890 
891 	for (;;) {
892 		struct poll_list *walk;
893 		bool can_busy_loop = false;
894 
895 		for (walk = list; walk != NULL; walk = walk->next) {
896 			struct pollfd * pfd, * pfd_end;
897 
898 			pfd = walk->entries;
899 			pfd_end = pfd + walk->len;
900 			for (; pfd != pfd_end; pfd++) {
901 				__poll_t mask;
902 				/*
903 				 * Fish for events. If we found one, record it
904 				 * and kill poll_table->_qproc, so we don't
905 				 * needlessly register any other waiters after
906 				 * this. They'll get immediately deregistered
907 				 * when we break out and return.
908 				 */
909 				mask = do_pollfd(pfd, pt, &can_busy_loop, busy_flag);
910 				pfd->revents = mangle_poll(mask);
911 				if (mask) {
912 					count++;
913 					pt->_qproc = NULL;
914 					/* found something, stop busy polling */
915 					busy_flag = 0;
916 					can_busy_loop = false;
917 				}
918 			}
919 		}
920 		/*
921 		 * All waiters have already been registered, so don't provide
922 		 * a poll_table->_qproc to them on the next loop iteration.
923 		 */
924 		pt->_qproc = NULL;
925 		if (!count) {
926 			count = wait->error;
927 			if (signal_pending(current))
928 				count = -ERESTARTNOHAND;
929 		}
930 		if (count || timed_out)
931 			break;
932 
933 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
934 		if (can_busy_loop && !need_resched()) {
935 			if (!busy_start) {
936 				busy_start = busy_loop_current_time();
937 				continue;
938 			}
939 			if (!busy_loop_timeout(busy_start))
940 				continue;
941 		}
942 		busy_flag = 0;
943 
944 		/*
945 		 * If this is the first loop and we have a timeout
946 		 * given, then we convert to ktime_t and set the to
947 		 * pointer to the expiry value.
948 		 */
949 		if (end_time && !to) {
950 			expire = timespec64_to_ktime(*end_time);
951 			to = &expire;
952 		}
953 
954 		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
955 			timed_out = 1;
956 	}
957 	return count;
958 }
959 
960 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
961 			sizeof(struct pollfd))
962 
963 static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
964 		struct timespec64 *end_time)
965 {
966 	struct poll_wqueues table;
967 	int err = -EFAULT, fdcount;
968 	/* Allocate small arguments on the stack to save memory and be
969 	   faster - use long to make sure the buffer is aligned properly
970 	   on 64 bit archs to avoid unaligned access */
971 	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
972 	struct poll_list *const head = (struct poll_list *)stack_pps;
973  	struct poll_list *walk = head;
974 	unsigned int todo = nfds;
975 	unsigned int len;
976 
977 	if (nfds > rlimit(RLIMIT_NOFILE))
978 		return -EINVAL;
979 
980 	len = min_t(unsigned int, nfds, N_STACK_PPS);
981 	for (;;) {
982 		walk->next = NULL;
983 		walk->len = len;
984 		if (!len)
985 			break;
986 
987 		if (copy_from_user(walk->entries, ufds + nfds-todo,
988 					sizeof(struct pollfd) * walk->len))
989 			goto out_fds;
990 
991 		if (walk->len >= todo)
992 			break;
993 		todo -= walk->len;
994 
995 		len = min(todo, POLLFD_PER_PAGE);
996 		walk = walk->next = kmalloc(struct_size(walk, entries, len),
997 					    GFP_KERNEL);
998 		if (!walk) {
999 			err = -ENOMEM;
1000 			goto out_fds;
1001 		}
1002 	}
1003 
1004 	poll_initwait(&table);
1005 	fdcount = do_poll(head, &table, end_time);
1006 	poll_freewait(&table);
1007 
1008 	if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
1009 		goto out_fds;
1010 
1011 	for (walk = head; walk; walk = walk->next) {
1012 		struct pollfd *fds = walk->entries;
1013 		unsigned int j;
1014 
1015 		for (j = walk->len; j; fds++, ufds++, j--)
1016 			unsafe_put_user(fds->revents, &ufds->revents, Efault);
1017   	}
1018 	user_write_access_end();
1019 
1020 	err = fdcount;
1021 out_fds:
1022 	walk = head->next;
1023 	while (walk) {
1024 		struct poll_list *pos = walk;
1025 		walk = walk->next;
1026 		kfree(pos);
1027 	}
1028 
1029 	return err;
1030 
1031 Efault:
1032 	user_write_access_end();
1033 	err = -EFAULT;
1034 	goto out_fds;
1035 }
1036 
1037 static long do_restart_poll(struct restart_block *restart_block)
1038 {
1039 	struct pollfd __user *ufds = restart_block->poll.ufds;
1040 	int nfds = restart_block->poll.nfds;
1041 	struct timespec64 *to = NULL;
1042 	int ret;
1043 
1044 	if (restart_block->poll.has_timeout)
1045 		to = &restart_block->poll.end_time;
1046 
1047 	ret = do_sys_poll(ufds, nfds, to);
1048 
1049 	if (ret == -ERESTARTNOHAND)
1050 		ret = set_restart_fn(restart_block, do_restart_poll);
1051 
1052 	return ret;
1053 }
1054 
1055 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
1056 		int, timeout_msecs)
1057 {
1058 	struct timespec64 end_time, *to = NULL;
1059 	int ret;
1060 
1061 	if (timeout_msecs >= 0) {
1062 		to = &end_time;
1063 		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1064 			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
1065 	}
1066 
1067 	ret = do_sys_poll(ufds, nfds, to);
1068 
1069 	if (ret == -ERESTARTNOHAND) {
1070 		struct restart_block *restart_block;
1071 
1072 		restart_block = &current->restart_block;
1073 		restart_block->poll.ufds = ufds;
1074 		restart_block->poll.nfds = nfds;
1075 
1076 		if (timeout_msecs >= 0) {
1077 			restart_block->poll.end_time = end_time;
1078 			restart_block->poll.has_timeout = 1;
1079 		} else
1080 			restart_block->poll.has_timeout = 0;
1081 
1082 		ret = set_restart_fn(restart_block, do_restart_poll);
1083 	}
1084 	return ret;
1085 }
1086 
1087 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1088 		struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
1089 		size_t, sigsetsize)
1090 {
1091 	struct timespec64 ts, end_time, *to = NULL;
1092 	int ret;
1093 
1094 	if (tsp) {
1095 		if (get_timespec64(&ts, tsp))
1096 			return -EFAULT;
1097 
1098 		to = &end_time;
1099 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1100 			return -EINVAL;
1101 	}
1102 
1103 	ret = set_user_sigmask(sigmask, sigsetsize);
1104 	if (ret)
1105 		return ret;
1106 
1107 	ret = do_sys_poll(ufds, nfds, to);
1108 	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1109 }
1110 
1111 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
1112 
1113 SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
1114 		struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
1115 		size_t, sigsetsize)
1116 {
1117 	struct timespec64 ts, end_time, *to = NULL;
1118 	int ret;
1119 
1120 	if (tsp) {
1121 		if (get_old_timespec32(&ts, tsp))
1122 			return -EFAULT;
1123 
1124 		to = &end_time;
1125 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1126 			return -EINVAL;
1127 	}
1128 
1129 	ret = set_user_sigmask(sigmask, sigsetsize);
1130 	if (ret)
1131 		return ret;
1132 
1133 	ret = do_sys_poll(ufds, nfds, to);
1134 	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1135 }
1136 #endif
1137 
1138 #ifdef CONFIG_COMPAT
1139 #define __COMPAT_NFDBITS       (8 * sizeof(compat_ulong_t))
1140 
1141 /*
1142  * Ooo, nasty.  We need here to frob 32-bit unsigned longs to
1143  * 64-bit unsigned longs.
1144  */
1145 static
1146 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1147 			unsigned long *fdset)
1148 {
1149 	if (ufdset) {
1150 		return compat_get_bitmap(fdset, ufdset, nr);
1151 	} else {
1152 		zero_fd_set(nr, fdset);
1153 		return 0;
1154 	}
1155 }
1156 
1157 static
1158 int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1159 		      unsigned long *fdset)
1160 {
1161 	if (!ufdset)
1162 		return 0;
1163 	return compat_put_bitmap(ufdset, fdset, nr);
1164 }
1165 
1166 
1167 /*
1168  * This is a virtual copy of sys_select from fs/select.c and probably
1169  * should be compared to it from time to time
1170  */
1171 
1172 /*
1173  * We can actually return ERESTARTSYS instead of EINTR, but I'd
1174  * like to be certain this leads to no problems. So I return
1175  * EINTR just for safety.
1176  *
1177  * Update: ERESTARTSYS breaks at least the xview clock binary, so
1178  * I'm trying ERESTARTNOHAND which restart only when you want to.
1179  */
1180 static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1181 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1182 	struct timespec64 *end_time)
1183 {
1184 	fd_set_bits fds;
1185 	void *bits;
1186 	int size, max_fds, ret = -EINVAL;
1187 	struct fdtable *fdt;
1188 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1189 
1190 	if (n < 0)
1191 		goto out_nofds;
1192 
1193 	/* max_fds can increase, so grab it once to avoid race */
1194 	rcu_read_lock();
1195 	fdt = files_fdtable(current->files);
1196 	max_fds = fdt->max_fds;
1197 	rcu_read_unlock();
1198 	if (n > max_fds)
1199 		n = max_fds;
1200 
1201 	/*
1202 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1203 	 * since we used fdset we need to allocate memory in units of
1204 	 * long-words.
1205 	 */
1206 	size = FDS_BYTES(n);
1207 	bits = stack_fds;
1208 	if (size > sizeof(stack_fds) / 6) {
1209 		bits = kmalloc_array(6, size, GFP_KERNEL);
1210 		ret = -ENOMEM;
1211 		if (!bits)
1212 			goto out_nofds;
1213 	}
1214 	fds.in      = (unsigned long *)  bits;
1215 	fds.out     = (unsigned long *) (bits +   size);
1216 	fds.ex      = (unsigned long *) (bits + 2*size);
1217 	fds.res_in  = (unsigned long *) (bits + 3*size);
1218 	fds.res_out = (unsigned long *) (bits + 4*size);
1219 	fds.res_ex  = (unsigned long *) (bits + 5*size);
1220 
1221 	if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1222 	    (ret = compat_get_fd_set(n, outp, fds.out)) ||
1223 	    (ret = compat_get_fd_set(n, exp, fds.ex)))
1224 		goto out;
1225 	zero_fd_set(n, fds.res_in);
1226 	zero_fd_set(n, fds.res_out);
1227 	zero_fd_set(n, fds.res_ex);
1228 
1229 	ret = do_select(n, &fds, end_time);
1230 
1231 	if (ret < 0)
1232 		goto out;
1233 	if (!ret) {
1234 		ret = -ERESTARTNOHAND;
1235 		if (signal_pending(current))
1236 			goto out;
1237 		ret = 0;
1238 	}
1239 
1240 	if (compat_set_fd_set(n, inp, fds.res_in) ||
1241 	    compat_set_fd_set(n, outp, fds.res_out) ||
1242 	    compat_set_fd_set(n, exp, fds.res_ex))
1243 		ret = -EFAULT;
1244 out:
1245 	if (bits != stack_fds)
1246 		kfree(bits);
1247 out_nofds:
1248 	return ret;
1249 }
1250 
1251 static int do_compat_select(int n, compat_ulong_t __user *inp,
1252 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1253 	struct old_timeval32 __user *tvp)
1254 {
1255 	struct timespec64 end_time, *to = NULL;
1256 	struct old_timeval32 tv;
1257 	int ret;
1258 
1259 	if (tvp) {
1260 		if (copy_from_user(&tv, tvp, sizeof(tv)))
1261 			return -EFAULT;
1262 
1263 		to = &end_time;
1264 		if (poll_select_set_timeout(to,
1265 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1266 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1267 			return -EINVAL;
1268 	}
1269 
1270 	ret = compat_core_sys_select(n, inp, outp, exp, to);
1271 	return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret);
1272 }
1273 
1274 COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1275 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1276 	struct old_timeval32 __user *, tvp)
1277 {
1278 	return do_compat_select(n, inp, outp, exp, tvp);
1279 }
1280 
1281 struct compat_sel_arg_struct {
1282 	compat_ulong_t n;
1283 	compat_uptr_t inp;
1284 	compat_uptr_t outp;
1285 	compat_uptr_t exp;
1286 	compat_uptr_t tvp;
1287 };
1288 
1289 COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1290 {
1291 	struct compat_sel_arg_struct a;
1292 
1293 	if (copy_from_user(&a, arg, sizeof(a)))
1294 		return -EFAULT;
1295 	return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1296 				compat_ptr(a.exp), compat_ptr(a.tvp));
1297 }
1298 
1299 static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1300 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1301 	void __user *tsp, compat_sigset_t __user *sigmask,
1302 	compat_size_t sigsetsize, enum poll_time_type type)
1303 {
1304 	struct timespec64 ts, end_time, *to = NULL;
1305 	int ret;
1306 
1307 	if (tsp) {
1308 		switch (type) {
1309 		case PT_OLD_TIMESPEC:
1310 			if (get_old_timespec32(&ts, tsp))
1311 				return -EFAULT;
1312 			break;
1313 		case PT_TIMESPEC:
1314 			if (get_timespec64(&ts, tsp))
1315 				return -EFAULT;
1316 			break;
1317 		default:
1318 			BUG();
1319 		}
1320 
1321 		to = &end_time;
1322 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1323 			return -EINVAL;
1324 	}
1325 
1326 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1327 	if (ret)
1328 		return ret;
1329 
1330 	ret = compat_core_sys_select(n, inp, outp, exp, to);
1331 	return poll_select_finish(&end_time, tsp, type, ret);
1332 }
1333 
1334 struct compat_sigset_argpack {
1335 	compat_uptr_t p;
1336 	compat_size_t size;
1337 };
1338 static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
1339 					    struct compat_sigset_argpack __user *from)
1340 {
1341 	if (from) {
1342 		if (!user_read_access_begin(from, sizeof(*from)))
1343 			return -EFAULT;
1344 		unsafe_get_user(to->p, &from->p, Efault);
1345 		unsafe_get_user(to->size, &from->size, Efault);
1346 		user_read_access_end();
1347 	}
1348 	return 0;
1349 Efault:
1350 	user_read_access_end();
1351 	return -EFAULT;
1352 }
1353 
1354 COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
1355 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1356 	struct __kernel_timespec __user *, tsp, void __user *, sig)
1357 {
1358 	struct compat_sigset_argpack x = {0, 0};
1359 
1360 	if (get_compat_sigset_argpack(&x, sig))
1361 		return -EFAULT;
1362 
1363 	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1364 				 x.size, PT_TIMESPEC);
1365 }
1366 
1367 #if defined(CONFIG_COMPAT_32BIT_TIME)
1368 
1369 COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
1370 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1371 	struct old_timespec32 __user *, tsp, void __user *, sig)
1372 {
1373 	struct compat_sigset_argpack x = {0, 0};
1374 
1375 	if (get_compat_sigset_argpack(&x, sig))
1376 		return -EFAULT;
1377 
1378 	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1379 				 x.size, PT_OLD_TIMESPEC);
1380 }
1381 
1382 #endif
1383 
1384 #if defined(CONFIG_COMPAT_32BIT_TIME)
1385 COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
1386 	unsigned int,  nfds, struct old_timespec32 __user *, tsp,
1387 	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1388 {
1389 	struct timespec64 ts, end_time, *to = NULL;
1390 	int ret;
1391 
1392 	if (tsp) {
1393 		if (get_old_timespec32(&ts, tsp))
1394 			return -EFAULT;
1395 
1396 		to = &end_time;
1397 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1398 			return -EINVAL;
1399 	}
1400 
1401 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1402 	if (ret)
1403 		return ret;
1404 
1405 	ret = do_sys_poll(ufds, nfds, to);
1406 	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1407 }
1408 #endif
1409 
1410 /* New compat syscall for 64 bit time_t*/
1411 COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
1412 	unsigned int,  nfds, struct __kernel_timespec __user *, tsp,
1413 	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1414 {
1415 	struct timespec64 ts, end_time, *to = NULL;
1416 	int ret;
1417 
1418 	if (tsp) {
1419 		if (get_timespec64(&ts, tsp))
1420 			return -EFAULT;
1421 
1422 		to = &end_time;
1423 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1424 			return -EINVAL;
1425 	}
1426 
1427 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1428 	if (ret)
1429 		return ret;
1430 
1431 	ret = do_sys_poll(ufds, nfds, to);
1432 	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1433 }
1434 
1435 #endif
1436