xref: /linux/include/net/request_sock.h (revision f6e0a4984c2e7244689ea87b62b433bed9d07e94)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * NET		Generic infrastructure for Network protocols.
4  *
5  *		Definitions for request_sock
6  *
7  * Authors:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8  *
9  * 		From code originally in include/net/tcp.h
10  */
11 #ifndef _REQUEST_SOCK_H
12 #define _REQUEST_SOCK_H
13 
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/bug.h>
18 #include <linux/refcount.h>
19 
20 #include <net/sock.h>
21 
22 struct request_sock;
23 struct sk_buff;
24 struct dst_entry;
25 struct proto;
26 
27 struct request_sock_ops {
28 	int		family;
29 	unsigned int	obj_size;
30 	struct kmem_cache	*slab;
31 	char		*slab_name;
32 	int		(*rtx_syn_ack)(const struct sock *sk,
33 				       struct request_sock *req);
34 	void		(*send_ack)(const struct sock *sk, struct sk_buff *skb,
35 				    struct request_sock *req);
36 	void		(*send_reset)(const struct sock *sk,
37 				      struct sk_buff *skb);
38 	void		(*destructor)(struct request_sock *req);
39 	void		(*syn_ack_timeout)(const struct request_sock *req);
40 };
41 
42 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
43 
44 struct saved_syn {
45 	u32 mac_hdrlen;
46 	u32 network_hdrlen;
47 	u32 tcp_hdrlen;
48 	u8 data[];
49 };
50 
51 /* struct request_sock - mini sock to represent a connection request
52  */
53 struct request_sock {
54 	struct sock_common		__req_common;
55 #define rsk_refcnt			__req_common.skc_refcnt
56 #define rsk_hash			__req_common.skc_hash
57 #define rsk_listener			__req_common.skc_listener
58 #define rsk_window_clamp		__req_common.skc_window_clamp
59 #define rsk_rcv_wnd			__req_common.skc_rcv_wnd
60 
61 	struct request_sock		*dl_next;
62 	u16				mss;
63 	u8				num_retrans; /* number of retransmits */
64 	u8				syncookie:1; /* syncookie: encode tcpopts in timestamp */
65 	u8				num_timeout:7; /* number of timeouts */
66 	u32				ts_recent;
67 	struct timer_list		rsk_timer;
68 	const struct request_sock_ops	*rsk_ops;
69 	struct sock			*sk;
70 	struct saved_syn		*saved_syn;
71 	u32				secid;
72 	u32				peer_secid;
73 	u32				timeout;
74 };
75 
76 static inline struct request_sock *inet_reqsk(const struct sock *sk)
77 {
78 	return (struct request_sock *)sk;
79 }
80 
81 static inline struct sock *req_to_sk(struct request_sock *req)
82 {
83 	return (struct sock *)req;
84 }
85 
86 /**
87  * skb_steal_sock - steal a socket from an sk_buff
88  * @skb: sk_buff to steal the socket from
89  * @refcounted: is set to true if the socket is reference-counted
90  * @prefetched: is set to true if the socket was assigned from bpf
91  */
92 static inline struct sock *skb_steal_sock(struct sk_buff *skb,
93 					  bool *refcounted, bool *prefetched)
94 {
95 	struct sock *sk = skb->sk;
96 
97 	if (!sk) {
98 		*prefetched = false;
99 		*refcounted = false;
100 		return NULL;
101 	}
102 
103 	*prefetched = skb_sk_is_prefetched(skb);
104 	if (*prefetched) {
105 #if IS_ENABLED(CONFIG_SYN_COOKIES)
106 		if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
107 			struct request_sock *req = inet_reqsk(sk);
108 
109 			*refcounted = false;
110 			sk = req->rsk_listener;
111 			req->rsk_listener = NULL;
112 			return sk;
113 		}
114 #endif
115 		*refcounted = sk_is_refcounted(sk);
116 	} else {
117 		*refcounted = true;
118 	}
119 
120 	skb->destructor = NULL;
121 	skb->sk = NULL;
122 	return sk;
123 }
124 
125 static inline struct request_sock *
126 reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
127 	    bool attach_listener)
128 {
129 	struct request_sock *req;
130 
131 	req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
132 	if (!req)
133 		return NULL;
134 	req->rsk_listener = NULL;
135 	if (attach_listener) {
136 		if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
137 			kmem_cache_free(ops->slab, req);
138 			return NULL;
139 		}
140 		req->rsk_listener = sk_listener;
141 	}
142 	req->rsk_ops = ops;
143 	req_to_sk(req)->sk_prot = sk_listener->sk_prot;
144 	sk_node_init(&req_to_sk(req)->sk_node);
145 	sk_tx_queue_clear(req_to_sk(req));
146 	req->saved_syn = NULL;
147 	req->timeout = 0;
148 	req->num_timeout = 0;
149 	req->num_retrans = 0;
150 	req->sk = NULL;
151 	refcount_set(&req->rsk_refcnt, 0);
152 
153 	return req;
154 }
155 
156 static inline void __reqsk_free(struct request_sock *req)
157 {
158 	req->rsk_ops->destructor(req);
159 	if (req->rsk_listener)
160 		sock_put(req->rsk_listener);
161 	kfree(req->saved_syn);
162 	kmem_cache_free(req->rsk_ops->slab, req);
163 }
164 
165 static inline void reqsk_free(struct request_sock *req)
166 {
167 	WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
168 	__reqsk_free(req);
169 }
170 
171 static inline void reqsk_put(struct request_sock *req)
172 {
173 	if (refcount_dec_and_test(&req->rsk_refcnt))
174 		reqsk_free(req);
175 }
176 
177 /*
178  * For a TCP Fast Open listener -
179  *	lock - protects the access to all the reqsk, which is co-owned by
180  *		the listener and the child socket.
181  *	qlen - pending TFO requests (still in TCP_SYN_RECV).
182  *	max_qlen - max TFO reqs allowed before TFO is disabled.
183  *
184  *	XXX (TFO) - ideally these fields can be made as part of "listen_sock"
185  *	structure above. But there is some implementation difficulty due to
186  *	listen_sock being part of request_sock_queue hence will be freed when
187  *	a listener is stopped. But TFO related fields may continue to be
188  *	accessed even after a listener is closed, until its sk_refcnt drops
189  *	to 0 implying no more outstanding TFO reqs. One solution is to keep
190  *	listen_opt around until	sk_refcnt drops to 0. But there is some other
191  *	complexity that needs to be resolved. E.g., a listener can be disabled
192  *	temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
193  */
194 struct fastopen_queue {
195 	struct request_sock	*rskq_rst_head; /* Keep track of past TFO */
196 	struct request_sock	*rskq_rst_tail; /* requests that caused RST.
197 						 * This is part of the defense
198 						 * against spoofing attack.
199 						 */
200 	spinlock_t	lock;
201 	int		qlen;		/* # of pending (TCP_SYN_RECV) reqs */
202 	int		max_qlen;	/* != 0 iff TFO is currently enabled */
203 
204 	struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
205 };
206 
207 /** struct request_sock_queue - queue of request_socks
208  *
209  * @rskq_accept_head - FIFO head of established children
210  * @rskq_accept_tail - FIFO tail of established children
211  * @rskq_defer_accept - User waits for some data after accept()
212  *
213  */
214 struct request_sock_queue {
215 	spinlock_t		rskq_lock;
216 	u8			rskq_defer_accept;
217 
218 	u32			synflood_warned;
219 	atomic_t		qlen;
220 	atomic_t		young;
221 
222 	struct request_sock	*rskq_accept_head;
223 	struct request_sock	*rskq_accept_tail;
224 	struct fastopen_queue	fastopenq;  /* Check max_qlen != 0 to determine
225 					     * if TFO is enabled.
226 					     */
227 };
228 
229 void reqsk_queue_alloc(struct request_sock_queue *queue);
230 
231 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
232 			   bool reset);
233 
234 static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
235 {
236 	return READ_ONCE(queue->rskq_accept_head) == NULL;
237 }
238 
239 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
240 						      struct sock *parent)
241 {
242 	struct request_sock *req;
243 
244 	spin_lock_bh(&queue->rskq_lock);
245 	req = queue->rskq_accept_head;
246 	if (req) {
247 		sk_acceptq_removed(parent);
248 		WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
249 		if (queue->rskq_accept_head == NULL)
250 			queue->rskq_accept_tail = NULL;
251 	}
252 	spin_unlock_bh(&queue->rskq_lock);
253 	return req;
254 }
255 
256 static inline void reqsk_queue_removed(struct request_sock_queue *queue,
257 				       const struct request_sock *req)
258 {
259 	if (req->num_timeout == 0)
260 		atomic_dec(&queue->young);
261 	atomic_dec(&queue->qlen);
262 }
263 
264 static inline void reqsk_queue_added(struct request_sock_queue *queue)
265 {
266 	atomic_inc(&queue->young);
267 	atomic_inc(&queue->qlen);
268 }
269 
270 static inline int reqsk_queue_len(const struct request_sock_queue *queue)
271 {
272 	return atomic_read(&queue->qlen);
273 }
274 
275 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
276 {
277 	return atomic_read(&queue->young);
278 }
279 
280 #endif /* _REQUEST_SOCK_H */
281