xref: /linux/fs/select.c (revision 2fe3c78a2c26dd5ee811024a1b7d6cfb4d654319)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains the procedures for the handling of select and poll
4  *
5  * Created for Linux based loosely upon Mathius Lattner's minix
6  * patches by Peter MacDonald. Heavily edited by Linus.
7  *
8  *  4 February 1994
9  *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
10  *     flag set in its personality we do *not* modify the given timeout
11  *     parameter to reflect time remaining.
12  *
13  *  24 January 2000
14  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
15  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
16  */
17 
18 #include <linux/compat.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/sched/rt.h>
22 #include <linux/syscalls.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/poll.h>
26 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/hrtimer.h>
32 #include <linux/freezer.h>
33 #include <net/busy_poll.h>
34 #include <linux/vmalloc.h>
35 
36 #include <linux/uaccess.h>
37 
38 
39 /*
40  * Estimate expected accuracy in ns from a timeval.
41  *
42  * After quite a bit of churning around, we've settled on
43  * a simple thing of taking 0.1% of the timeout as the
44  * slack, with a cap of 100 msec.
45  * "nice" tasks get a 0.5% slack instead.
46  *
47  * Consider this comment an open invitation to come up with even
48  * better solutions..
49  */
50 
51 #define MAX_SLACK	(100 * NSEC_PER_MSEC)
52 
53 static long __estimate_accuracy(struct timespec64 *tv)
54 {
55 	long slack;
56 	int divfactor = 1000;
57 
58 	if (tv->tv_sec < 0)
59 		return 0;
60 
61 	if (task_nice(current) > 0)
62 		divfactor = divfactor / 5;
63 
64 	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
65 		return MAX_SLACK;
66 
67 	slack = tv->tv_nsec / divfactor;
68 	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
69 
70 	if (slack > MAX_SLACK)
71 		return MAX_SLACK;
72 
73 	return slack;
74 }
75 
76 u64 select_estimate_accuracy(struct timespec64 *tv)
77 {
78 	u64 ret;
79 	struct timespec64 now;
80 	u64 slack = current->timer_slack_ns;
81 
82 	if (slack == 0)
83 		return 0;
84 
85 	ktime_get_ts64(&now);
86 	now = timespec64_sub(*tv, now);
87 	ret = __estimate_accuracy(&now);
88 	if (ret < slack)
89 		return slack;
90 	return ret;
91 }
92 
93 
94 
95 struct poll_table_page {
96 	struct poll_table_page * next;
97 	struct poll_table_entry * entry;
98 	struct poll_table_entry entries[];
99 };
100 
101 #define POLL_TABLE_FULL(table) \
102 	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
103 
104 /*
105  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
106  * I have rewritten this, taking some shortcuts: This code may not be easy to
107  * follow, but it should be free of race-conditions, and it's practical. If you
108  * understand what I'm doing here, then you understand how the linux
109  * sleep/wakeup mechanism works.
110  *
111  * Two very simple procedures, poll_wait() and poll_freewait() make all the
112  * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
113  * as all select/poll functions have to call it to add an entry to the
114  * poll table.
115  */
116 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
117 		       poll_table *p);
118 
119 void poll_initwait(struct poll_wqueues *pwq)
120 {
121 	init_poll_funcptr(&pwq->pt, __pollwait);
122 	pwq->polling_task = current;
123 	pwq->triggered = 0;
124 	pwq->error = 0;
125 	pwq->table = NULL;
126 	pwq->inline_index = 0;
127 }
128 EXPORT_SYMBOL(poll_initwait);
129 
130 static void free_poll_entry(struct poll_table_entry *entry)
131 {
132 	remove_wait_queue(entry->wait_address, &entry->wait);
133 	fput(entry->filp);
134 }
135 
136 void poll_freewait(struct poll_wqueues *pwq)
137 {
138 	struct poll_table_page * p = pwq->table;
139 	int i;
140 	for (i = 0; i < pwq->inline_index; i++)
141 		free_poll_entry(pwq->inline_entries + i);
142 	while (p) {
143 		struct poll_table_entry * entry;
144 		struct poll_table_page *old;
145 
146 		entry = p->entry;
147 		do {
148 			entry--;
149 			free_poll_entry(entry);
150 		} while (entry > p->entries);
151 		old = p;
152 		p = p->next;
153 		free_page((unsigned long) old);
154 	}
155 }
156 EXPORT_SYMBOL(poll_freewait);
157 
158 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
159 {
160 	struct poll_table_page *table = p->table;
161 
162 	if (p->inline_index < N_INLINE_POLL_ENTRIES)
163 		return p->inline_entries + p->inline_index++;
164 
165 	if (!table || POLL_TABLE_FULL(table)) {
166 		struct poll_table_page *new_table;
167 
168 		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
169 		if (!new_table) {
170 			p->error = -ENOMEM;
171 			return NULL;
172 		}
173 		new_table->entry = new_table->entries;
174 		new_table->next = table;
175 		p->table = new_table;
176 		table = new_table;
177 	}
178 
179 	return table->entry++;
180 }
181 
182 static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
183 {
184 	struct poll_wqueues *pwq = wait->private;
185 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
186 
187 	/*
188 	 * Although this function is called under waitqueue lock, LOCK
189 	 * doesn't imply write barrier and the users expect write
190 	 * barrier semantics on wakeup functions.  The following
191 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
192 	 * and is paired with smp_store_mb() in poll_schedule_timeout.
193 	 */
194 	smp_wmb();
195 	pwq->triggered = 1;
196 
197 	/*
198 	 * Perform the default wake up operation using a dummy
199 	 * waitqueue.
200 	 *
201 	 * TODO: This is hacky but there currently is no interface to
202 	 * pass in @sync.  @sync is scheduled to be removed and once
203 	 * that happens, wake_up_process() can be used directly.
204 	 */
205 	return default_wake_function(&dummy_wait, mode, sync, key);
206 }
207 
208 static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
209 {
210 	struct poll_table_entry *entry;
211 
212 	entry = container_of(wait, struct poll_table_entry, wait);
213 	if (key && !(key_to_poll(key) & entry->key))
214 		return 0;
215 	return __pollwake(wait, mode, sync, key);
216 }
217 
218 /* Add a new entry */
219 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
220 				poll_table *p)
221 {
222 	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
223 	struct poll_table_entry *entry = poll_get_entry(pwq);
224 	if (!entry)
225 		return;
226 	entry->filp = get_file(filp);
227 	entry->wait_address = wait_address;
228 	entry->key = p->_key;
229 	init_waitqueue_func_entry(&entry->wait, pollwake);
230 	entry->wait.private = pwq;
231 	add_wait_queue(wait_address, &entry->wait);
232 }
233 
234 static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
235 			  ktime_t *expires, unsigned long slack)
236 {
237 	int rc = -EINTR;
238 
239 	set_current_state(state);
240 	if (!pwq->triggered)
241 		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
242 	__set_current_state(TASK_RUNNING);
243 
244 	/*
245 	 * Prepare for the next iteration.
246 	 *
247 	 * The following smp_store_mb() serves two purposes.  First, it's
248 	 * the counterpart rmb of the wmb in pollwake() such that data
249 	 * written before wake up is always visible after wake up.
250 	 * Second, the full barrier guarantees that triggered clearing
251 	 * doesn't pass event check of the next iteration.  Note that
252 	 * this problem doesn't exist for the first iteration as
253 	 * add_wait_queue() has full barrier semantics.
254 	 */
255 	smp_store_mb(pwq->triggered, 0);
256 
257 	return rc;
258 }
259 
260 /**
261  * poll_select_set_timeout - helper function to setup the timeout value
262  * @to:		pointer to timespec64 variable for the final timeout
263  * @sec:	seconds (from user space)
264  * @nsec:	nanoseconds (from user space)
265  *
266  * Note, we do not use a timespec for the user space value here, That
267  * way we can use the function for timeval and compat interfaces as well.
268  *
269  * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
270  */
271 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
272 {
273 	struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
274 
275 	if (!timespec64_valid(&ts))
276 		return -EINVAL;
277 
278 	/* Optimize for the zero timeout value here */
279 	if (!sec && !nsec) {
280 		to->tv_sec = to->tv_nsec = 0;
281 	} else {
282 		ktime_get_ts64(to);
283 		*to = timespec64_add_safe(*to, ts);
284 	}
285 	return 0;
286 }
287 
288 enum poll_time_type {
289 	PT_TIMEVAL = 0,
290 	PT_OLD_TIMEVAL = 1,
291 	PT_TIMESPEC = 2,
292 	PT_OLD_TIMESPEC = 3,
293 };
294 
295 static int poll_select_finish(struct timespec64 *end_time,
296 			      void __user *p,
297 			      enum poll_time_type pt_type, int ret)
298 {
299 	struct timespec64 rts;
300 
301 	restore_saved_sigmask_unless(ret == -ERESTARTNOHAND);
302 
303 	if (!p)
304 		return ret;
305 
306 	if (current->personality & STICKY_TIMEOUTS)
307 		goto sticky;
308 
309 	/* No update for zero timeout */
310 	if (!end_time->tv_sec && !end_time->tv_nsec)
311 		return ret;
312 
313 	ktime_get_ts64(&rts);
314 	rts = timespec64_sub(*end_time, rts);
315 	if (rts.tv_sec < 0)
316 		rts.tv_sec = rts.tv_nsec = 0;
317 
318 
319 	switch (pt_type) {
320 	case PT_TIMEVAL:
321 		{
322 			struct __kernel_old_timeval rtv;
323 
324 			if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
325 				memset(&rtv, 0, sizeof(rtv));
326 			rtv.tv_sec = rts.tv_sec;
327 			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
328 			if (!copy_to_user(p, &rtv, sizeof(rtv)))
329 				return ret;
330 		}
331 		break;
332 	case PT_OLD_TIMEVAL:
333 		{
334 			struct old_timeval32 rtv;
335 
336 			rtv.tv_sec = rts.tv_sec;
337 			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
338 			if (!copy_to_user(p, &rtv, sizeof(rtv)))
339 				return ret;
340 		}
341 		break;
342 	case PT_TIMESPEC:
343 		if (!put_timespec64(&rts, p))
344 			return ret;
345 		break;
346 	case PT_OLD_TIMESPEC:
347 		if (!put_old_timespec32(&rts, p))
348 			return ret;
349 		break;
350 	default:
351 		BUG();
352 	}
353 	/*
354 	 * If an application puts its timeval in read-only memory, we
355 	 * don't want the Linux-specific update to the timeval to
356 	 * cause a fault after the select has completed
357 	 * successfully. However, because we're not updating the
358 	 * timeval, we can't restart the system call.
359 	 */
360 
361 sticky:
362 	if (ret == -ERESTARTNOHAND)
363 		ret = -EINTR;
364 	return ret;
365 }
366 
367 /*
368  * Scalable version of the fd_set.
369  */
370 
371 typedef struct {
372 	unsigned long *in, *out, *ex;
373 	unsigned long *res_in, *res_out, *res_ex;
374 } fd_set_bits;
375 
376 /*
377  * How many longwords for "nr" bits?
378  */
379 #define FDS_BITPERLONG	(8*sizeof(long))
380 #define FDS_LONGS(nr)	(((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
381 #define FDS_BYTES(nr)	(FDS_LONGS(nr)*sizeof(long))
382 
383 /*
384  * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
385  */
386 static inline
387 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
388 {
389 	nr = FDS_BYTES(nr);
390 	if (ufdset)
391 		return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
392 
393 	memset(fdset, 0, nr);
394 	return 0;
395 }
396 
397 static inline unsigned long __must_check
398 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
399 {
400 	if (ufdset)
401 		return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
402 	return 0;
403 }
404 
405 static inline
406 void zero_fd_set(unsigned long nr, unsigned long *fdset)
407 {
408 	memset(fdset, 0, FDS_BYTES(nr));
409 }
410 
411 #define FDS_IN(fds, n)		(fds->in + n)
412 #define FDS_OUT(fds, n)		(fds->out + n)
413 #define FDS_EX(fds, n)		(fds->ex + n)
414 
415 #define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
416 
417 static int max_select_fd(unsigned long n, fd_set_bits *fds)
418 {
419 	unsigned long *open_fds;
420 	unsigned long set;
421 	int max;
422 	struct fdtable *fdt;
423 
424 	/* handle last in-complete long-word first */
425 	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
426 	n /= BITS_PER_LONG;
427 	fdt = files_fdtable(current->files);
428 	open_fds = fdt->open_fds + n;
429 	max = 0;
430 	if (set) {
431 		set &= BITS(fds, n);
432 		if (set) {
433 			if (!(set & ~*open_fds))
434 				goto get_max;
435 			return -EBADF;
436 		}
437 	}
438 	while (n) {
439 		open_fds--;
440 		n--;
441 		set = BITS(fds, n);
442 		if (!set)
443 			continue;
444 		if (set & ~*open_fds)
445 			return -EBADF;
446 		if (max)
447 			continue;
448 get_max:
449 		do {
450 			max++;
451 			set >>= 1;
452 		} while (set);
453 		max += n * BITS_PER_LONG;
454 	}
455 
456 	return max;
457 }
458 
459 #define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\
460 			EPOLLNVAL)
461 #define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\
462 			 EPOLLNVAL)
463 #define POLLEX_SET (EPOLLPRI | EPOLLNVAL)
464 
465 static inline void wait_key_set(poll_table *wait, unsigned long in,
466 				unsigned long out, unsigned long bit,
467 				__poll_t ll_flag)
468 {
469 	wait->_key = POLLEX_SET | ll_flag;
470 	if (in & bit)
471 		wait->_key |= POLLIN_SET;
472 	if (out & bit)
473 		wait->_key |= POLLOUT_SET;
474 }
475 
476 static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
477 {
478 	ktime_t expire, *to = NULL;
479 	struct poll_wqueues table;
480 	poll_table *wait;
481 	int retval, i, timed_out = 0;
482 	u64 slack = 0;
483 	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
484 	unsigned long busy_start = 0;
485 
486 	rcu_read_lock();
487 	retval = max_select_fd(n, fds);
488 	rcu_read_unlock();
489 
490 	if (retval < 0)
491 		return retval;
492 	n = retval;
493 
494 	poll_initwait(&table);
495 	wait = &table.pt;
496 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
497 		wait->_qproc = NULL;
498 		timed_out = 1;
499 	}
500 
501 	if (end_time && !timed_out)
502 		slack = select_estimate_accuracy(end_time);
503 
504 	retval = 0;
505 	for (;;) {
506 		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
507 		bool can_busy_loop = false;
508 
509 		inp = fds->in; outp = fds->out; exp = fds->ex;
510 		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
511 
512 		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
513 			unsigned long in, out, ex, all_bits, bit = 1, j;
514 			unsigned long res_in = 0, res_out = 0, res_ex = 0;
515 			__poll_t mask;
516 
517 			in = *inp++; out = *outp++; ex = *exp++;
518 			all_bits = in | out | ex;
519 			if (all_bits == 0) {
520 				i += BITS_PER_LONG;
521 				continue;
522 			}
523 
524 			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
525 				struct fd f;
526 				if (i >= n)
527 					break;
528 				if (!(bit & all_bits))
529 					continue;
530 				mask = EPOLLNVAL;
531 				f = fdget(i);
532 				if (f.file) {
533 					wait_key_set(wait, in, out, bit,
534 						     busy_flag);
535 					mask = vfs_poll(f.file, wait);
536 
537 					fdput(f);
538 				}
539 				if ((mask & POLLIN_SET) && (in & bit)) {
540 					res_in |= bit;
541 					retval++;
542 					wait->_qproc = NULL;
543 				}
544 				if ((mask & POLLOUT_SET) && (out & bit)) {
545 					res_out |= bit;
546 					retval++;
547 					wait->_qproc = NULL;
548 				}
549 				if ((mask & POLLEX_SET) && (ex & bit)) {
550 					res_ex |= bit;
551 					retval++;
552 					wait->_qproc = NULL;
553 				}
554 				/* got something, stop busy polling */
555 				if (retval) {
556 					can_busy_loop = false;
557 					busy_flag = 0;
558 
559 				/*
560 				 * only remember a returned
561 				 * POLL_BUSY_LOOP if we asked for it
562 				 */
563 				} else if (busy_flag & mask)
564 					can_busy_loop = true;
565 
566 			}
567 			if (res_in)
568 				*rinp = res_in;
569 			if (res_out)
570 				*routp = res_out;
571 			if (res_ex)
572 				*rexp = res_ex;
573 			cond_resched();
574 		}
575 		wait->_qproc = NULL;
576 		if (retval || timed_out || signal_pending(current))
577 			break;
578 		if (table.error) {
579 			retval = table.error;
580 			break;
581 		}
582 
583 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
584 		if (can_busy_loop && !need_resched()) {
585 			if (!busy_start) {
586 				busy_start = busy_loop_current_time();
587 				continue;
588 			}
589 			if (!busy_loop_timeout(busy_start))
590 				continue;
591 		}
592 		busy_flag = 0;
593 
594 		/*
595 		 * If this is the first loop and we have a timeout
596 		 * given, then we convert to ktime_t and set the to
597 		 * pointer to the expiry value.
598 		 */
599 		if (end_time && !to) {
600 			expire = timespec64_to_ktime(*end_time);
601 			to = &expire;
602 		}
603 
604 		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
605 					   to, slack))
606 			timed_out = 1;
607 	}
608 
609 	poll_freewait(&table);
610 
611 	return retval;
612 }
613 
614 /*
615  * We can actually return ERESTARTSYS instead of EINTR, but I'd
616  * like to be certain this leads to no problems. So I return
617  * EINTR just for safety.
618  *
619  * Update: ERESTARTSYS breaks at least the xview clock binary, so
620  * I'm trying ERESTARTNOHAND which restart only when you want to.
621  */
622 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
623 			   fd_set __user *exp, struct timespec64 *end_time)
624 {
625 	fd_set_bits fds;
626 	void *bits;
627 	int ret, max_fds;
628 	size_t size, alloc_size;
629 	struct fdtable *fdt;
630 	/* Allocate small arguments on the stack to save memory and be faster */
631 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
632 
633 	ret = -EINVAL;
634 	if (n < 0)
635 		goto out_nofds;
636 
637 	/* max_fds can increase, so grab it once to avoid race */
638 	rcu_read_lock();
639 	fdt = files_fdtable(current->files);
640 	max_fds = fdt->max_fds;
641 	rcu_read_unlock();
642 	if (n > max_fds)
643 		n = max_fds;
644 
645 	/*
646 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
647 	 * since we used fdset we need to allocate memory in units of
648 	 * long-words.
649 	 */
650 	size = FDS_BYTES(n);
651 	bits = stack_fds;
652 	if (size > sizeof(stack_fds) / 6) {
653 		/* Not enough space in on-stack array; must use kmalloc */
654 		ret = -ENOMEM;
655 		if (size > (SIZE_MAX / 6))
656 			goto out_nofds;
657 
658 		alloc_size = 6 * size;
659 		bits = kvmalloc(alloc_size, GFP_KERNEL);
660 		if (!bits)
661 			goto out_nofds;
662 	}
663 	fds.in      = bits;
664 	fds.out     = bits +   size;
665 	fds.ex      = bits + 2*size;
666 	fds.res_in  = bits + 3*size;
667 	fds.res_out = bits + 4*size;
668 	fds.res_ex  = bits + 5*size;
669 
670 	if ((ret = get_fd_set(n, inp, fds.in)) ||
671 	    (ret = get_fd_set(n, outp, fds.out)) ||
672 	    (ret = get_fd_set(n, exp, fds.ex)))
673 		goto out;
674 	zero_fd_set(n, fds.res_in);
675 	zero_fd_set(n, fds.res_out);
676 	zero_fd_set(n, fds.res_ex);
677 
678 	ret = do_select(n, &fds, end_time);
679 
680 	if (ret < 0)
681 		goto out;
682 	if (!ret) {
683 		ret = -ERESTARTNOHAND;
684 		if (signal_pending(current))
685 			goto out;
686 		ret = 0;
687 	}
688 
689 	if (set_fd_set(n, inp, fds.res_in) ||
690 	    set_fd_set(n, outp, fds.res_out) ||
691 	    set_fd_set(n, exp, fds.res_ex))
692 		ret = -EFAULT;
693 
694 out:
695 	if (bits != stack_fds)
696 		kvfree(bits);
697 out_nofds:
698 	return ret;
699 }
700 
701 static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
702 		       fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
703 {
704 	struct timespec64 end_time, *to = NULL;
705 	struct __kernel_old_timeval tv;
706 	int ret;
707 
708 	if (tvp) {
709 		if (copy_from_user(&tv, tvp, sizeof(tv)))
710 			return -EFAULT;
711 
712 		to = &end_time;
713 		if (poll_select_set_timeout(to,
714 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
715 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
716 			return -EINVAL;
717 	}
718 
719 	ret = core_sys_select(n, inp, outp, exp, to);
720 	return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret);
721 }
722 
723 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
724 		fd_set __user *, exp, struct __kernel_old_timeval __user *, tvp)
725 {
726 	return kern_select(n, inp, outp, exp, tvp);
727 }
728 
729 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
730 		       fd_set __user *exp, void __user *tsp,
731 		       const sigset_t __user *sigmask, size_t sigsetsize,
732 		       enum poll_time_type type)
733 {
734 	struct timespec64 ts, end_time, *to = NULL;
735 	int ret;
736 
737 	if (tsp) {
738 		switch (type) {
739 		case PT_TIMESPEC:
740 			if (get_timespec64(&ts, tsp))
741 				return -EFAULT;
742 			break;
743 		case PT_OLD_TIMESPEC:
744 			if (get_old_timespec32(&ts, tsp))
745 				return -EFAULT;
746 			break;
747 		default:
748 			BUG();
749 		}
750 
751 		to = &end_time;
752 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
753 			return -EINVAL;
754 	}
755 
756 	ret = set_user_sigmask(sigmask, sigsetsize);
757 	if (ret)
758 		return ret;
759 
760 	ret = core_sys_select(n, inp, outp, exp, to);
761 	return poll_select_finish(&end_time, tsp, type, ret);
762 }
763 
764 /*
765  * Most architectures can't handle 7-argument syscalls. So we provide a
766  * 6-argument version where the sixth argument is a pointer to a structure
767  * which has a pointer to the sigset_t itself followed by a size_t containing
768  * the sigset size.
769  */
770 struct sigset_argpack {
771 	sigset_t __user *p;
772 	size_t size;
773 };
774 
775 static inline int get_sigset_argpack(struct sigset_argpack *to,
776 				     struct sigset_argpack __user *from)
777 {
778 	// the path is hot enough for overhead of copy_from_user() to matter
779 	if (from) {
780 		if (!user_read_access_begin(from, sizeof(*from)))
781 			return -EFAULT;
782 		unsafe_get_user(to->p, &from->p, Efault);
783 		unsafe_get_user(to->size, &from->size, Efault);
784 		user_read_access_end();
785 	}
786 	return 0;
787 Efault:
788 	user_access_end();
789 	return -EFAULT;
790 }
791 
792 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
793 		fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
794 		void __user *, sig)
795 {
796 	struct sigset_argpack x = {NULL, 0};
797 
798 	if (get_sigset_argpack(&x, sig))
799 		return -EFAULT;
800 
801 	return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC);
802 }
803 
804 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
805 
806 SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp,
807 		fd_set __user *, exp, struct old_timespec32 __user *, tsp,
808 		void __user *, sig)
809 {
810 	struct sigset_argpack x = {NULL, 0};
811 
812 	if (get_sigset_argpack(&x, sig))
813 		return -EFAULT;
814 
815 	return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC);
816 }
817 
818 #endif
819 
820 #ifdef __ARCH_WANT_SYS_OLD_SELECT
821 struct sel_arg_struct {
822 	unsigned long n;
823 	fd_set __user *inp, *outp, *exp;
824 	struct __kernel_old_timeval __user *tvp;
825 };
826 
827 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
828 {
829 	struct sel_arg_struct a;
830 
831 	if (copy_from_user(&a, arg, sizeof(a)))
832 		return -EFAULT;
833 	return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
834 }
835 #endif
836 
837 struct poll_list {
838 	struct poll_list *next;
839 	unsigned int len;
840 	struct pollfd entries[] __counted_by(len);
841 };
842 
843 #define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
844 
845 /*
846  * Fish for pollable events on the pollfd->fd file descriptor. We're only
847  * interested in events matching the pollfd->events mask, and the result
848  * matching that mask is both recorded in pollfd->revents and returned. The
849  * pwait poll_table will be used by the fd-provided poll handler for waiting,
850  * if pwait->_qproc is non-NULL.
851  */
852 static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
853 				     bool *can_busy_poll,
854 				     __poll_t busy_flag)
855 {
856 	int fd = pollfd->fd;
857 	__poll_t mask = 0, filter;
858 	struct fd f;
859 
860 	if (fd < 0)
861 		goto out;
862 	mask = EPOLLNVAL;
863 	f = fdget(fd);
864 	if (!f.file)
865 		goto out;
866 
867 	/* userland u16 ->events contains POLL... bitmap */
868 	filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
869 	pwait->_key = filter | busy_flag;
870 	mask = vfs_poll(f.file, pwait);
871 	if (mask & busy_flag)
872 		*can_busy_poll = true;
873 	mask &= filter;		/* Mask out unneeded events. */
874 	fdput(f);
875 
876 out:
877 	/* ... and so does ->revents */
878 	pollfd->revents = mangle_poll(mask);
879 	return mask;
880 }
881 
882 static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
883 		   struct timespec64 *end_time)
884 {
885 	poll_table* pt = &wait->pt;
886 	ktime_t expire, *to = NULL;
887 	int timed_out = 0, count = 0;
888 	u64 slack = 0;
889 	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
890 	unsigned long busy_start = 0;
891 
892 	/* Optimise the no-wait case */
893 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
894 		pt->_qproc = NULL;
895 		timed_out = 1;
896 	}
897 
898 	if (end_time && !timed_out)
899 		slack = select_estimate_accuracy(end_time);
900 
901 	for (;;) {
902 		struct poll_list *walk;
903 		bool can_busy_loop = false;
904 
905 		for (walk = list; walk != NULL; walk = walk->next) {
906 			struct pollfd * pfd, * pfd_end;
907 
908 			pfd = walk->entries;
909 			pfd_end = pfd + walk->len;
910 			for (; pfd != pfd_end; pfd++) {
911 				/*
912 				 * Fish for events. If we found one, record it
913 				 * and kill poll_table->_qproc, so we don't
914 				 * needlessly register any other waiters after
915 				 * this. They'll get immediately deregistered
916 				 * when we break out and return.
917 				 */
918 				if (do_pollfd(pfd, pt, &can_busy_loop,
919 					      busy_flag)) {
920 					count++;
921 					pt->_qproc = NULL;
922 					/* found something, stop busy polling */
923 					busy_flag = 0;
924 					can_busy_loop = false;
925 				}
926 			}
927 		}
928 		/*
929 		 * All waiters have already been registered, so don't provide
930 		 * a poll_table->_qproc to them on the next loop iteration.
931 		 */
932 		pt->_qproc = NULL;
933 		if (!count) {
934 			count = wait->error;
935 			if (signal_pending(current))
936 				count = -ERESTARTNOHAND;
937 		}
938 		if (count || timed_out)
939 			break;
940 
941 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
942 		if (can_busy_loop && !need_resched()) {
943 			if (!busy_start) {
944 				busy_start = busy_loop_current_time();
945 				continue;
946 			}
947 			if (!busy_loop_timeout(busy_start))
948 				continue;
949 		}
950 		busy_flag = 0;
951 
952 		/*
953 		 * If this is the first loop and we have a timeout
954 		 * given, then we convert to ktime_t and set the to
955 		 * pointer to the expiry value.
956 		 */
957 		if (end_time && !to) {
958 			expire = timespec64_to_ktime(*end_time);
959 			to = &expire;
960 		}
961 
962 		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
963 			timed_out = 1;
964 	}
965 	return count;
966 }
967 
968 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
969 			sizeof(struct pollfd))
970 
971 static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
972 		struct timespec64 *end_time)
973 {
974 	struct poll_wqueues table;
975 	int err = -EFAULT, fdcount;
976 	/* Allocate small arguments on the stack to save memory and be
977 	   faster - use long to make sure the buffer is aligned properly
978 	   on 64 bit archs to avoid unaligned access */
979 	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
980 	struct poll_list *const head = (struct poll_list *)stack_pps;
981  	struct poll_list *walk = head;
982 	unsigned int todo = nfds;
983 	unsigned int len;
984 
985 	if (nfds > rlimit(RLIMIT_NOFILE))
986 		return -EINVAL;
987 
988 	len = min_t(unsigned int, nfds, N_STACK_PPS);
989 	for (;;) {
990 		walk->next = NULL;
991 		walk->len = len;
992 		if (!len)
993 			break;
994 
995 		if (copy_from_user(walk->entries, ufds + nfds-todo,
996 					sizeof(struct pollfd) * walk->len))
997 			goto out_fds;
998 
999 		if (walk->len >= todo)
1000 			break;
1001 		todo -= walk->len;
1002 
1003 		len = min(todo, POLLFD_PER_PAGE);
1004 		walk = walk->next = kmalloc(struct_size(walk, entries, len),
1005 					    GFP_KERNEL);
1006 		if (!walk) {
1007 			err = -ENOMEM;
1008 			goto out_fds;
1009 		}
1010 	}
1011 
1012 	poll_initwait(&table);
1013 	fdcount = do_poll(head, &table, end_time);
1014 	poll_freewait(&table);
1015 
1016 	if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
1017 		goto out_fds;
1018 
1019 	for (walk = head; walk; walk = walk->next) {
1020 		struct pollfd *fds = walk->entries;
1021 		unsigned int j;
1022 
1023 		for (j = walk->len; j; fds++, ufds++, j--)
1024 			unsafe_put_user(fds->revents, &ufds->revents, Efault);
1025   	}
1026 	user_write_access_end();
1027 
1028 	err = fdcount;
1029 out_fds:
1030 	walk = head->next;
1031 	while (walk) {
1032 		struct poll_list *pos = walk;
1033 		walk = walk->next;
1034 		kfree(pos);
1035 	}
1036 
1037 	return err;
1038 
1039 Efault:
1040 	user_write_access_end();
1041 	err = -EFAULT;
1042 	goto out_fds;
1043 }
1044 
1045 static long do_restart_poll(struct restart_block *restart_block)
1046 {
1047 	struct pollfd __user *ufds = restart_block->poll.ufds;
1048 	int nfds = restart_block->poll.nfds;
1049 	struct timespec64 *to = NULL, end_time;
1050 	int ret;
1051 
1052 	if (restart_block->poll.has_timeout) {
1053 		end_time.tv_sec = restart_block->poll.tv_sec;
1054 		end_time.tv_nsec = restart_block->poll.tv_nsec;
1055 		to = &end_time;
1056 	}
1057 
1058 	ret = do_sys_poll(ufds, nfds, to);
1059 
1060 	if (ret == -ERESTARTNOHAND)
1061 		ret = set_restart_fn(restart_block, do_restart_poll);
1062 
1063 	return ret;
1064 }
1065 
1066 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
1067 		int, timeout_msecs)
1068 {
1069 	struct timespec64 end_time, *to = NULL;
1070 	int ret;
1071 
1072 	if (timeout_msecs >= 0) {
1073 		to = &end_time;
1074 		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1075 			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
1076 	}
1077 
1078 	ret = do_sys_poll(ufds, nfds, to);
1079 
1080 	if (ret == -ERESTARTNOHAND) {
1081 		struct restart_block *restart_block;
1082 
1083 		restart_block = &current->restart_block;
1084 		restart_block->poll.ufds = ufds;
1085 		restart_block->poll.nfds = nfds;
1086 
1087 		if (timeout_msecs >= 0) {
1088 			restart_block->poll.tv_sec = end_time.tv_sec;
1089 			restart_block->poll.tv_nsec = end_time.tv_nsec;
1090 			restart_block->poll.has_timeout = 1;
1091 		} else
1092 			restart_block->poll.has_timeout = 0;
1093 
1094 		ret = set_restart_fn(restart_block, do_restart_poll);
1095 	}
1096 	return ret;
1097 }
1098 
1099 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1100 		struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
1101 		size_t, sigsetsize)
1102 {
1103 	struct timespec64 ts, end_time, *to = NULL;
1104 	int ret;
1105 
1106 	if (tsp) {
1107 		if (get_timespec64(&ts, tsp))
1108 			return -EFAULT;
1109 
1110 		to = &end_time;
1111 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1112 			return -EINVAL;
1113 	}
1114 
1115 	ret = set_user_sigmask(sigmask, sigsetsize);
1116 	if (ret)
1117 		return ret;
1118 
1119 	ret = do_sys_poll(ufds, nfds, to);
1120 	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1121 }
1122 
1123 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
1124 
1125 SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
1126 		struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
1127 		size_t, sigsetsize)
1128 {
1129 	struct timespec64 ts, end_time, *to = NULL;
1130 	int ret;
1131 
1132 	if (tsp) {
1133 		if (get_old_timespec32(&ts, tsp))
1134 			return -EFAULT;
1135 
1136 		to = &end_time;
1137 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1138 			return -EINVAL;
1139 	}
1140 
1141 	ret = set_user_sigmask(sigmask, sigsetsize);
1142 	if (ret)
1143 		return ret;
1144 
1145 	ret = do_sys_poll(ufds, nfds, to);
1146 	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1147 }
1148 #endif
1149 
1150 #ifdef CONFIG_COMPAT
1151 #define __COMPAT_NFDBITS       (8 * sizeof(compat_ulong_t))
1152 
1153 /*
1154  * Ooo, nasty.  We need here to frob 32-bit unsigned longs to
1155  * 64-bit unsigned longs.
1156  */
1157 static
1158 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1159 			unsigned long *fdset)
1160 {
1161 	if (ufdset) {
1162 		return compat_get_bitmap(fdset, ufdset, nr);
1163 	} else {
1164 		zero_fd_set(nr, fdset);
1165 		return 0;
1166 	}
1167 }
1168 
1169 static
1170 int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1171 		      unsigned long *fdset)
1172 {
1173 	if (!ufdset)
1174 		return 0;
1175 	return compat_put_bitmap(ufdset, fdset, nr);
1176 }
1177 
1178 
1179 /*
1180  * This is a virtual copy of sys_select from fs/select.c and probably
1181  * should be compared to it from time to time
1182  */
1183 
1184 /*
1185  * We can actually return ERESTARTSYS instead of EINTR, but I'd
1186  * like to be certain this leads to no problems. So I return
1187  * EINTR just for safety.
1188  *
1189  * Update: ERESTARTSYS breaks at least the xview clock binary, so
1190  * I'm trying ERESTARTNOHAND which restart only when you want to.
1191  */
1192 static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1193 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1194 	struct timespec64 *end_time)
1195 {
1196 	fd_set_bits fds;
1197 	void *bits;
1198 	int size, max_fds, ret = -EINVAL;
1199 	struct fdtable *fdt;
1200 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1201 
1202 	if (n < 0)
1203 		goto out_nofds;
1204 
1205 	/* max_fds can increase, so grab it once to avoid race */
1206 	rcu_read_lock();
1207 	fdt = files_fdtable(current->files);
1208 	max_fds = fdt->max_fds;
1209 	rcu_read_unlock();
1210 	if (n > max_fds)
1211 		n = max_fds;
1212 
1213 	/*
1214 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1215 	 * since we used fdset we need to allocate memory in units of
1216 	 * long-words.
1217 	 */
1218 	size = FDS_BYTES(n);
1219 	bits = stack_fds;
1220 	if (size > sizeof(stack_fds) / 6) {
1221 		bits = kmalloc_array(6, size, GFP_KERNEL);
1222 		ret = -ENOMEM;
1223 		if (!bits)
1224 			goto out_nofds;
1225 	}
1226 	fds.in      = (unsigned long *)  bits;
1227 	fds.out     = (unsigned long *) (bits +   size);
1228 	fds.ex      = (unsigned long *) (bits + 2*size);
1229 	fds.res_in  = (unsigned long *) (bits + 3*size);
1230 	fds.res_out = (unsigned long *) (bits + 4*size);
1231 	fds.res_ex  = (unsigned long *) (bits + 5*size);
1232 
1233 	if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1234 	    (ret = compat_get_fd_set(n, outp, fds.out)) ||
1235 	    (ret = compat_get_fd_set(n, exp, fds.ex)))
1236 		goto out;
1237 	zero_fd_set(n, fds.res_in);
1238 	zero_fd_set(n, fds.res_out);
1239 	zero_fd_set(n, fds.res_ex);
1240 
1241 	ret = do_select(n, &fds, end_time);
1242 
1243 	if (ret < 0)
1244 		goto out;
1245 	if (!ret) {
1246 		ret = -ERESTARTNOHAND;
1247 		if (signal_pending(current))
1248 			goto out;
1249 		ret = 0;
1250 	}
1251 
1252 	if (compat_set_fd_set(n, inp, fds.res_in) ||
1253 	    compat_set_fd_set(n, outp, fds.res_out) ||
1254 	    compat_set_fd_set(n, exp, fds.res_ex))
1255 		ret = -EFAULT;
1256 out:
1257 	if (bits != stack_fds)
1258 		kfree(bits);
1259 out_nofds:
1260 	return ret;
1261 }
1262 
1263 static int do_compat_select(int n, compat_ulong_t __user *inp,
1264 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1265 	struct old_timeval32 __user *tvp)
1266 {
1267 	struct timespec64 end_time, *to = NULL;
1268 	struct old_timeval32 tv;
1269 	int ret;
1270 
1271 	if (tvp) {
1272 		if (copy_from_user(&tv, tvp, sizeof(tv)))
1273 			return -EFAULT;
1274 
1275 		to = &end_time;
1276 		if (poll_select_set_timeout(to,
1277 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1278 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1279 			return -EINVAL;
1280 	}
1281 
1282 	ret = compat_core_sys_select(n, inp, outp, exp, to);
1283 	return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret);
1284 }
1285 
1286 COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1287 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1288 	struct old_timeval32 __user *, tvp)
1289 {
1290 	return do_compat_select(n, inp, outp, exp, tvp);
1291 }
1292 
1293 struct compat_sel_arg_struct {
1294 	compat_ulong_t n;
1295 	compat_uptr_t inp;
1296 	compat_uptr_t outp;
1297 	compat_uptr_t exp;
1298 	compat_uptr_t tvp;
1299 };
1300 
1301 COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1302 {
1303 	struct compat_sel_arg_struct a;
1304 
1305 	if (copy_from_user(&a, arg, sizeof(a)))
1306 		return -EFAULT;
1307 	return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1308 				compat_ptr(a.exp), compat_ptr(a.tvp));
1309 }
1310 
1311 static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1312 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1313 	void __user *tsp, compat_sigset_t __user *sigmask,
1314 	compat_size_t sigsetsize, enum poll_time_type type)
1315 {
1316 	struct timespec64 ts, end_time, *to = NULL;
1317 	int ret;
1318 
1319 	if (tsp) {
1320 		switch (type) {
1321 		case PT_OLD_TIMESPEC:
1322 			if (get_old_timespec32(&ts, tsp))
1323 				return -EFAULT;
1324 			break;
1325 		case PT_TIMESPEC:
1326 			if (get_timespec64(&ts, tsp))
1327 				return -EFAULT;
1328 			break;
1329 		default:
1330 			BUG();
1331 		}
1332 
1333 		to = &end_time;
1334 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1335 			return -EINVAL;
1336 	}
1337 
1338 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1339 	if (ret)
1340 		return ret;
1341 
1342 	ret = compat_core_sys_select(n, inp, outp, exp, to);
1343 	return poll_select_finish(&end_time, tsp, type, ret);
1344 }
1345 
1346 struct compat_sigset_argpack {
1347 	compat_uptr_t p;
1348 	compat_size_t size;
1349 };
1350 static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
1351 					    struct compat_sigset_argpack __user *from)
1352 {
1353 	if (from) {
1354 		if (!user_read_access_begin(from, sizeof(*from)))
1355 			return -EFAULT;
1356 		unsafe_get_user(to->p, &from->p, Efault);
1357 		unsafe_get_user(to->size, &from->size, Efault);
1358 		user_read_access_end();
1359 	}
1360 	return 0;
1361 Efault:
1362 	user_access_end();
1363 	return -EFAULT;
1364 }
1365 
1366 COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
1367 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1368 	struct __kernel_timespec __user *, tsp, void __user *, sig)
1369 {
1370 	struct compat_sigset_argpack x = {0, 0};
1371 
1372 	if (get_compat_sigset_argpack(&x, sig))
1373 		return -EFAULT;
1374 
1375 	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1376 				 x.size, PT_TIMESPEC);
1377 }
1378 
1379 #if defined(CONFIG_COMPAT_32BIT_TIME)
1380 
1381 COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
1382 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1383 	struct old_timespec32 __user *, tsp, void __user *, sig)
1384 {
1385 	struct compat_sigset_argpack x = {0, 0};
1386 
1387 	if (get_compat_sigset_argpack(&x, sig))
1388 		return -EFAULT;
1389 
1390 	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1391 				 x.size, PT_OLD_TIMESPEC);
1392 }
1393 
1394 #endif
1395 
1396 #if defined(CONFIG_COMPAT_32BIT_TIME)
1397 COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
1398 	unsigned int,  nfds, struct old_timespec32 __user *, tsp,
1399 	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1400 {
1401 	struct timespec64 ts, end_time, *to = NULL;
1402 	int ret;
1403 
1404 	if (tsp) {
1405 		if (get_old_timespec32(&ts, tsp))
1406 			return -EFAULT;
1407 
1408 		to = &end_time;
1409 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1410 			return -EINVAL;
1411 	}
1412 
1413 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1414 	if (ret)
1415 		return ret;
1416 
1417 	ret = do_sys_poll(ufds, nfds, to);
1418 	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1419 }
1420 #endif
1421 
1422 /* New compat syscall for 64 bit time_t*/
1423 COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
1424 	unsigned int,  nfds, struct __kernel_timespec __user *, tsp,
1425 	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1426 {
1427 	struct timespec64 ts, end_time, *to = NULL;
1428 	int ret;
1429 
1430 	if (tsp) {
1431 		if (get_timespec64(&ts, tsp))
1432 			return -EFAULT;
1433 
1434 		to = &end_time;
1435 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1436 			return -EINVAL;
1437 	}
1438 
1439 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1440 	if (ret)
1441 		return ret;
1442 
1443 	ret = do_sys_poll(ufds, nfds, to);
1444 	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1445 }
1446 
1447 #endif
1448