xref: /linux/include/net/request_sock.h (revision f49f4ab95c301dbccad0efe85296d908b8ae7ad4)
1 /*
2  * NET		Generic infrastructure for Network protocols.
3  *
4  *		Definitions for request_sock
5  *
6  * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7  *
8  * 		From code originally in include/net/tcp.h
9  *
10  *		This program is free software; you can redistribute it and/or
11  *		modify it under the terms of the GNU General Public License
12  *		as published by the Free Software Foundation; either version
13  *		2 of the License, or (at your option) any later version.
14  */
15 #ifndef _REQUEST_SOCK_H
16 #define _REQUEST_SOCK_H
17 
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
21 #include <linux/bug.h>
22 
23 #include <net/sock.h>
24 
25 struct request_sock;
26 struct sk_buff;
27 struct dst_entry;
28 struct proto;
29 
30 /* empty to "strongly type" an otherwise void parameter.
31  */
32 struct request_values {
33 };
34 
35 struct request_sock_ops {
36 	int		family;
37 	int		obj_size;
38 	struct kmem_cache	*slab;
39 	char		*slab_name;
40 	int		(*rtx_syn_ack)(struct sock *sk,
41 				       struct request_sock *req,
42 				       struct request_values *rvp);
43 	void		(*send_ack)(struct sock *sk, struct sk_buff *skb,
44 				    struct request_sock *req);
45 	void		(*send_reset)(struct sock *sk,
46 				      struct sk_buff *skb);
47 	void		(*destructor)(struct request_sock *req);
48 	void		(*syn_ack_timeout)(struct sock *sk,
49 					   struct request_sock *req);
50 };
51 
52 /* struct request_sock - mini sock to represent a connection request
53  */
54 struct request_sock {
55 	struct request_sock		*dl_next; /* Must be first member! */
56 	u16				mss;
57 	u8				retrans;
58 	u8				cookie_ts; /* syncookie: encode tcpopts in timestamp */
59 	/* The following two fields can be easily recomputed I think -AK */
60 	u32				window_clamp; /* window clamp at creation time */
61 	u32				rcv_wnd;	  /* rcv_wnd offered first time */
62 	u32				ts_recent;
63 	unsigned long			expires;
64 	const struct request_sock_ops	*rsk_ops;
65 	struct sock			*sk;
66 	u32				secid;
67 	u32				peer_secid;
68 };
69 
70 static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
71 {
72 	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
73 
74 	if (req != NULL)
75 		req->rsk_ops = ops;
76 
77 	return req;
78 }
79 
80 static inline void __reqsk_free(struct request_sock *req)
81 {
82 	kmem_cache_free(req->rsk_ops->slab, req);
83 }
84 
85 static inline void reqsk_free(struct request_sock *req)
86 {
87 	req->rsk_ops->destructor(req);
88 	__reqsk_free(req);
89 }
90 
91 extern int sysctl_max_syn_backlog;
92 
93 /** struct listen_sock - listen state
94  *
95  * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
96  */
97 struct listen_sock {
98 	u8			max_qlen_log;
99 	u8			synflood_warned;
100 	/* 2 bytes hole, try to use */
101 	int			qlen;
102 	int			qlen_young;
103 	int			clock_hand;
104 	u32			hash_rnd;
105 	u32			nr_table_entries;
106 	struct request_sock	*syn_table[0];
107 };
108 
109 /*
110  * For a TCP Fast Open listener -
111  *	lock - protects the access to all the reqsk, which is co-owned by
112  *		the listener and the child socket.
113  *	qlen - pending TFO requests (still in TCP_SYN_RECV).
114  *	max_qlen - max TFO reqs allowed before TFO is disabled.
115  *
116  *	XXX (TFO) - ideally these fields can be made as part of "listen_sock"
117  *	structure above. But there is some implementation difficulty due to
118  *	listen_sock being part of request_sock_queue hence will be freed when
119  *	a listener is stopped. But TFO related fields may continue to be
120  *	accessed even after a listener is closed, until its sk_refcnt drops
121  *	to 0 implying no more outstanding TFO reqs. One solution is to keep
122  *	listen_opt around until	sk_refcnt drops to 0. But there is some other
123  *	complexity that needs to be resolved. E.g., a listener can be disabled
124  *	temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
125  */
126 struct fastopen_queue {
127 	struct request_sock	*rskq_rst_head; /* Keep track of past TFO */
128 	struct request_sock	*rskq_rst_tail; /* requests that caused RST.
129 						 * This is part of the defense
130 						 * against spoofing attack.
131 						 */
132 	spinlock_t	lock;
133 	int		qlen;		/* # of pending (TCP_SYN_RECV) reqs */
134 	int		max_qlen;	/* != 0 iff TFO is currently enabled */
135 };
136 
137 /** struct request_sock_queue - queue of request_socks
138  *
139  * @rskq_accept_head - FIFO head of established children
140  * @rskq_accept_tail - FIFO tail of established children
141  * @rskq_defer_accept - User waits for some data after accept()
142  * @syn_wait_lock - serializer
143  *
144  * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
145  * lock sock while browsing the listening hash (otherwise it's deadlock prone).
146  *
147  * This lock is acquired in read mode only from listening_get_next() seq_file
148  * op and it's acquired in write mode _only_ from code that is actively
149  * changing rskq_accept_head. All readers that are holding the master sock lock
150  * don't need to grab this lock in read mode too as rskq_accept_head. writes
151  * are always protected from the main sock lock.
152  */
153 struct request_sock_queue {
154 	struct request_sock	*rskq_accept_head;
155 	struct request_sock	*rskq_accept_tail;
156 	rwlock_t		syn_wait_lock;
157 	u8			rskq_defer_accept;
158 	/* 3 bytes hole, try to pack */
159 	struct listen_sock	*listen_opt;
160 	struct fastopen_queue	*fastopenq; /* This is non-NULL iff TFO has been
161 					     * enabled on this listener. Check
162 					     * max_qlen != 0 in fastopen_queue
163 					     * to determine if TFO is enabled
164 					     * right at this moment.
165 					     */
166 };
167 
168 extern int reqsk_queue_alloc(struct request_sock_queue *queue,
169 			     unsigned int nr_table_entries);
170 
171 extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
172 extern void reqsk_queue_destroy(struct request_sock_queue *queue);
173 extern void reqsk_fastopen_remove(struct sock *sk,
174 				  struct request_sock *req, bool reset);
175 
176 static inline struct request_sock *
177 	reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
178 {
179 	struct request_sock *req = queue->rskq_accept_head;
180 
181 	queue->rskq_accept_head = NULL;
182 	return req;
183 }
184 
185 static inline int reqsk_queue_empty(struct request_sock_queue *queue)
186 {
187 	return queue->rskq_accept_head == NULL;
188 }
189 
190 static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
191 				      struct request_sock *req,
192 				      struct request_sock **prev_req)
193 {
194 	write_lock(&queue->syn_wait_lock);
195 	*prev_req = req->dl_next;
196 	write_unlock(&queue->syn_wait_lock);
197 }
198 
199 static inline void reqsk_queue_add(struct request_sock_queue *queue,
200 				   struct request_sock *req,
201 				   struct sock *parent,
202 				   struct sock *child)
203 {
204 	req->sk = child;
205 	sk_acceptq_added(parent);
206 
207 	if (queue->rskq_accept_head == NULL)
208 		queue->rskq_accept_head = req;
209 	else
210 		queue->rskq_accept_tail->dl_next = req;
211 
212 	queue->rskq_accept_tail = req;
213 	req->dl_next = NULL;
214 }
215 
216 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
217 {
218 	struct request_sock *req = queue->rskq_accept_head;
219 
220 	WARN_ON(req == NULL);
221 
222 	queue->rskq_accept_head = req->dl_next;
223 	if (queue->rskq_accept_head == NULL)
224 		queue->rskq_accept_tail = NULL;
225 
226 	return req;
227 }
228 
229 static inline int reqsk_queue_removed(struct request_sock_queue *queue,
230 				      struct request_sock *req)
231 {
232 	struct listen_sock *lopt = queue->listen_opt;
233 
234 	if (req->retrans == 0)
235 		--lopt->qlen_young;
236 
237 	return --lopt->qlen;
238 }
239 
240 static inline int reqsk_queue_added(struct request_sock_queue *queue)
241 {
242 	struct listen_sock *lopt = queue->listen_opt;
243 	const int prev_qlen = lopt->qlen;
244 
245 	lopt->qlen_young++;
246 	lopt->qlen++;
247 	return prev_qlen;
248 }
249 
250 static inline int reqsk_queue_len(const struct request_sock_queue *queue)
251 {
252 	return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
253 }
254 
255 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
256 {
257 	return queue->listen_opt->qlen_young;
258 }
259 
260 static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
261 {
262 	return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
263 }
264 
265 static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
266 					u32 hash, struct request_sock *req,
267 					unsigned long timeout)
268 {
269 	struct listen_sock *lopt = queue->listen_opt;
270 
271 	req->expires = jiffies + timeout;
272 	req->retrans = 0;
273 	req->sk = NULL;
274 	req->dl_next = lopt->syn_table[hash];
275 
276 	write_lock(&queue->syn_wait_lock);
277 	lopt->syn_table[hash] = req;
278 	write_unlock(&queue->syn_wait_lock);
279 }
280 
281 #endif /* _REQUEST_SOCK_H */
282