xref: /linux/include/linux/poll.h (revision 7110f24f9e33979fd704f7a4a595a9d3e9bdacb7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_POLL_H
3 #define _LINUX_POLL_H
4 
5 
6 #include <linux/compiler.h>
7 #include <linux/ktime.h>
8 #include <linux/wait.h>
9 #include <linux/string.h>
10 #include <linux/fs.h>
11 #include <linux/uaccess.h>
12 #include <uapi/linux/poll.h>
13 #include <uapi/linux/eventpoll.h>
14 
15 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
16    additional memory. */
17 #define MAX_STACK_ALLOC 832
18 #define FRONTEND_STACK_ALLOC	256
19 #define SELECT_STACK_ALLOC	FRONTEND_STACK_ALLOC
20 #define POLL_STACK_ALLOC	FRONTEND_STACK_ALLOC
21 #define WQUEUES_STACK_ALLOC	(MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
22 #define N_INLINE_POLL_ENTRIES	(WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
23 
24 #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
25 
26 struct poll_table_struct;
27 
28 /*
29  * structures and helpers for f_op->poll implementations
30  */
31 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
32 
33 /*
34  * Do not touch the structure directly, use the access function
35  * poll_requested_events() instead.
36  */
37 typedef struct poll_table_struct {
38 	poll_queue_proc _qproc;
39 	__poll_t _key;
40 } poll_table;
41 
poll_wait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)42 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
43 {
44 	if (p && p->_qproc) {
45 		p->_qproc(filp, wait_address, p);
46 		/*
47 		 * This memory barrier is paired in the wq_has_sleeper().
48 		 * See the comment above prepare_to_wait(), we need to
49 		 * ensure that subsequent tests in this thread can't be
50 		 * reordered with __add_wait_queue() in _qproc() paths.
51 		 */
52 		smp_mb();
53 	}
54 }
55 
56 /*
57  * Return the set of events that the application wants to poll for.
58  * This is useful for drivers that need to know whether a DMA transfer has
59  * to be started implicitly on poll(). You typically only want to do that
60  * if the application is actually polling for POLLIN and/or POLLOUT.
61  */
poll_requested_events(const poll_table * p)62 static inline __poll_t poll_requested_events(const poll_table *p)
63 {
64 	return p ? p->_key : ~(__poll_t)0;
65 }
66 
init_poll_funcptr(poll_table * pt,poll_queue_proc qproc)67 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
68 {
69 	pt->_qproc = qproc;
70 	pt->_key   = ~(__poll_t)0; /* all events enabled */
71 }
72 
file_can_poll(struct file * file)73 static inline bool file_can_poll(struct file *file)
74 {
75 	return file->f_op->poll;
76 }
77 
vfs_poll(struct file * file,struct poll_table_struct * pt)78 static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
79 {
80 	if (unlikely(!file->f_op->poll))
81 		return DEFAULT_POLLMASK;
82 	return file->f_op->poll(file, pt);
83 }
84 
85 struct poll_table_entry {
86 	struct file *filp;
87 	__poll_t key;
88 	wait_queue_entry_t wait;
89 	wait_queue_head_t *wait_address;
90 };
91 
92 /*
93  * Structures and helpers for select/poll syscall
94  */
95 struct poll_wqueues {
96 	poll_table pt;
97 	struct poll_table_page *table;
98 	struct task_struct *polling_task;
99 	int triggered;
100 	int error;
101 	int inline_index;
102 	struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
103 };
104 
105 extern void poll_initwait(struct poll_wqueues *pwq);
106 extern void poll_freewait(struct poll_wqueues *pwq);
107 extern u64 select_estimate_accuracy(struct timespec64 *tv);
108 
109 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
110 
111 extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
112 			   fd_set __user *exp, struct timespec64 *end_time);
113 
114 extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
115 				   long nsec);
116 
117 #define __MAP(v, from, to) \
118 	(from < to ? (v & from) * (to/from) : (v & from) / (from/to))
119 
mangle_poll(__poll_t val)120 static inline __u16 mangle_poll(__poll_t val)
121 {
122 	__u16 v = (__force __u16)val;
123 #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X)
124 	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
125 		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
126 		M(HUP) | M(RDHUP) | M(MSG);
127 #undef M
128 }
129 
demangle_poll(u16 val)130 static inline __poll_t demangle_poll(u16 val)
131 {
132 #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X)
133 	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
134 		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
135 		M(HUP) | M(RDHUP) | M(MSG);
136 #undef M
137 }
138 #undef __MAP
139 
140 
141 #endif /* _LINUX_POLL_H */
142