xref: /linux/include/net/request_sock.h (revision 60b2737de1b1ddfdb90f3ba622634eb49d6f3603)
1 /*
2  * NET		Generic infrastructure for Network protocols.
3  *
4  *		Definitions for request_sock
5  *
6  * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7  *
8  * 		From code originally in include/net/tcp.h
9  *
10  *		This program is free software; you can redistribute it and/or
11  *		modify it under the terms of the GNU General Public License
12  *		as published by the Free Software Foundation; either version
13  *		2 of the License, or (at your option) any later version.
14  */
15 #ifndef _REQUEST_SOCK_H
16 #define _REQUEST_SOCK_H
17 
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
21 
22 #include <net/sock.h>
23 
24 struct request_sock;
25 struct sk_buff;
26 struct dst_entry;
27 struct proto;
28 
29 struct request_sock_ops {
30 	int		family;
31 	kmem_cache_t	*slab;
32 	int		obj_size;
33 	int		(*rtx_syn_ack)(struct sock *sk,
34 				       struct request_sock *req,
35 				       struct dst_entry *dst);
36 	void		(*send_ack)(struct sk_buff *skb,
37 				    struct request_sock *req);
38 	void		(*send_reset)(struct sk_buff *skb);
39 	void		(*destructor)(struct request_sock *req);
40 };
41 
42 /* struct request_sock - mini sock to represent a connection request
43  */
44 struct request_sock {
45 	struct request_sock		*dl_next; /* Must be first member! */
46 	u16				mss;
47 	u8				retrans;
48 	u8				__pad;
49 	/* The following two fields can be easily recomputed I think -AK */
50 	u32				window_clamp; /* window clamp at creation time */
51 	u32				rcv_wnd;	  /* rcv_wnd offered first time */
52 	u32				ts_recent;
53 	unsigned long			expires;
54 	struct request_sock_ops		*rsk_ops;
55 	struct sock			*sk;
56 };
57 
58 static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
59 {
60 	struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
61 
62 	if (req != NULL)
63 		req->rsk_ops = ops;
64 
65 	return req;
66 }
67 
68 static inline void __reqsk_free(struct request_sock *req)
69 {
70 	kmem_cache_free(req->rsk_ops->slab, req);
71 }
72 
73 static inline void reqsk_free(struct request_sock *req)
74 {
75 	req->rsk_ops->destructor(req);
76 	__reqsk_free(req);
77 }
78 
79 extern int sysctl_max_syn_backlog;
80 
81 /** struct listen_sock - listen state
82  *
83  * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
84  */
85 struct listen_sock {
86 	u8			max_qlen_log;
87 	/* 3 bytes hole, try to use */
88 	int			qlen;
89 	int			qlen_young;
90 	int			clock_hand;
91 	u32			hash_rnd;
92 	struct request_sock	*syn_table[0];
93 };
94 
95 /** struct request_sock_queue - queue of request_socks
96  *
97  * @rskq_accept_head - FIFO head of established children
98  * @rskq_accept_tail - FIFO tail of established children
99  * @syn_wait_lock - serializer
100  *
101  * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
102  * lock sock while browsing the listening hash (otherwise it's deadlock prone).
103  *
104  * This lock is acquired in read mode only from listening_get_next() seq_file
105  * op and it's acquired in write mode _only_ from code that is actively
106  * changing rskq_accept_head. All readers that are holding the master sock lock
107  * don't need to grab this lock in read mode too as rskq_accept_head. writes
108  * are always protected from the main sock lock.
109  */
110 struct request_sock_queue {
111 	struct request_sock	*rskq_accept_head;
112 	struct request_sock	*rskq_accept_tail;
113 	rwlock_t		syn_wait_lock;
114 	struct listen_sock	*listen_opt;
115 };
116 
117 extern int reqsk_queue_alloc(struct request_sock_queue *queue,
118 			     const int nr_table_entries);
119 
120 static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
121 {
122 	struct listen_sock *lopt;
123 
124 	write_lock_bh(&queue->syn_wait_lock);
125 	lopt = queue->listen_opt;
126 	queue->listen_opt = NULL;
127 	write_unlock_bh(&queue->syn_wait_lock);
128 
129 	return lopt;
130 }
131 
132 static inline void reqsk_queue_destroy(struct request_sock_queue *queue)
133 {
134 	kfree(reqsk_queue_yank_listen_sk(queue));
135 }
136 
137 static inline struct request_sock *
138 	reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
139 {
140 	struct request_sock *req = queue->rskq_accept_head;
141 
142 	queue->rskq_accept_head = queue->rskq_accept_head = NULL;
143 	return req;
144 }
145 
146 static inline int reqsk_queue_empty(struct request_sock_queue *queue)
147 {
148 	return queue->rskq_accept_head == NULL;
149 }
150 
151 static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
152 				      struct request_sock *req,
153 				      struct request_sock **prev_req)
154 {
155 	write_lock(&queue->syn_wait_lock);
156 	*prev_req = req->dl_next;
157 	write_unlock(&queue->syn_wait_lock);
158 }
159 
160 static inline void reqsk_queue_add(struct request_sock_queue *queue,
161 				   struct request_sock *req,
162 				   struct sock *parent,
163 				   struct sock *child)
164 {
165 	req->sk = child;
166 	sk_acceptq_added(parent);
167 
168 	if (queue->rskq_accept_head == NULL)
169 		queue->rskq_accept_head = req;
170 	else
171 		queue->rskq_accept_tail->dl_next = req;
172 
173 	queue->rskq_accept_tail = req;
174 	req->dl_next = NULL;
175 }
176 
177 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
178 {
179 	struct request_sock *req = queue->rskq_accept_head;
180 
181 	BUG_TRAP(req != NULL);
182 
183 	queue->rskq_accept_head = req->dl_next;
184 	if (queue->rskq_accept_head == NULL)
185 		queue->rskq_accept_tail = NULL;
186 
187 	return req;
188 }
189 
190 static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
191 						 struct sock *parent)
192 {
193 	struct request_sock *req = reqsk_queue_remove(queue);
194 	struct sock *child = req->sk;
195 
196 	BUG_TRAP(child != NULL);
197 
198 	sk_acceptq_removed(parent);
199 	__reqsk_free(req);
200 	return child;
201 }
202 
203 static inline int reqsk_queue_removed(struct request_sock_queue *queue,
204 				      struct request_sock *req)
205 {
206 	struct listen_sock *lopt = queue->listen_opt;
207 
208 	if (req->retrans == 0)
209 		--lopt->qlen_young;
210 
211 	return --lopt->qlen;
212 }
213 
214 static inline int reqsk_queue_added(struct request_sock_queue *queue)
215 {
216 	struct listen_sock *lopt = queue->listen_opt;
217 	const int prev_qlen = lopt->qlen;
218 
219 	lopt->qlen_young++;
220 	lopt->qlen++;
221 	return prev_qlen;
222 }
223 
224 static inline int reqsk_queue_len(struct request_sock_queue *queue)
225 {
226 	return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
227 }
228 
229 static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
230 {
231 	return queue->listen_opt->qlen_young;
232 }
233 
234 static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
235 {
236 	return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
237 }
238 
239 static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
240 					u32 hash, struct request_sock *req,
241 					unsigned timeout)
242 {
243 	struct listen_sock *lopt = queue->listen_opt;
244 
245 	req->expires = jiffies + timeout;
246 	req->retrans = 0;
247 	req->sk = NULL;
248 	req->dl_next = lopt->syn_table[hash];
249 
250 	write_lock(&queue->syn_wait_lock);
251 	lopt->syn_table[hash] = req;
252 	write_unlock(&queue->syn_wait_lock);
253 }
254 
255 #endif /* _REQUEST_SOCK_H */
256