xref: /linux/net/dccp/minisocks.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  net/dccp/minisocks.c
4  *
5  *  An implementation of the DCCP protocol
6  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7  */
8 
9 #include <linux/dccp.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/timer.h>
14 
15 #include <net/sock.h>
16 #include <net/xfrm.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/rstreason.h>
19 
20 #include "ackvec.h"
21 #include "ccid.h"
22 #include "dccp.h"
23 #include "feat.h"
24 
25 struct inet_timewait_death_row dccp_death_row = {
26 	.tw_refcount = REFCOUNT_INIT(1),
27 	.sysctl_max_tw_buckets = NR_FILE * 2,
28 	.hashinfo	= &dccp_hashinfo,
29 };
30 
31 EXPORT_SYMBOL_GPL(dccp_death_row);
32 
dccp_time_wait(struct sock * sk,int state,int timeo)33 void dccp_time_wait(struct sock *sk, int state, int timeo)
34 {
35 	struct inet_timewait_sock *tw;
36 
37 	tw = inet_twsk_alloc(sk, &dccp_death_row, state);
38 
39 	if (tw != NULL) {
40 		const struct inet_connection_sock *icsk = inet_csk(sk);
41 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
42 #if IS_ENABLED(CONFIG_IPV6)
43 		if (tw->tw_family == PF_INET6) {
44 			tw->tw_v6_daddr = sk->sk_v6_daddr;
45 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
46 			tw->tw_ipv6only = sk->sk_ipv6only;
47 		}
48 #endif
49 
50 		/* Get the TIME_WAIT timeout firing. */
51 		if (timeo < rto)
52 			timeo = rto;
53 
54 		if (state == DCCP_TIME_WAIT)
55 			timeo = DCCP_TIMEWAIT_LEN;
56 
57 		/* Linkage updates.
58 		 * Note that access to tw after this point is illegal.
59 		 */
60 		inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo);
61 	} else {
62 		/* Sorry, if we're out of memory, just CLOSE this
63 		 * socket up.  We've got bigger problems than
64 		 * non-graceful socket closings.
65 		 */
66 		DCCP_WARN("time wait bucket table overflow\n");
67 	}
68 
69 	dccp_done(sk);
70 }
71 
dccp_create_openreq_child(const struct sock * sk,const struct request_sock * req,const struct sk_buff * skb)72 struct sock *dccp_create_openreq_child(const struct sock *sk,
73 				       const struct request_sock *req,
74 				       const struct sk_buff *skb)
75 {
76 	/*
77 	 * Step 3: Process LISTEN state
78 	 *
79 	 *   (* Generate a new socket and switch to that socket *)
80 	 *   Set S := new socket for this port pair
81 	 */
82 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
83 
84 	if (newsk != NULL) {
85 		struct dccp_request_sock *dreq = dccp_rsk(req);
86 		struct inet_connection_sock *newicsk = inet_csk(newsk);
87 		struct dccp_sock *newdp = dccp_sk(newsk);
88 
89 		newdp->dccps_role	    = DCCP_ROLE_SERVER;
90 		newdp->dccps_hc_rx_ackvec   = NULL;
91 		newdp->dccps_service_list   = NULL;
92 		newdp->dccps_hc_rx_ccid     = NULL;
93 		newdp->dccps_hc_tx_ccid     = NULL;
94 		newdp->dccps_service	    = dreq->dreq_service;
95 		newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
96 		newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
97 		newicsk->icsk_rto	    = DCCP_TIMEOUT_INIT;
98 
99 		INIT_LIST_HEAD(&newdp->dccps_featneg);
100 		/*
101 		 * Step 3: Process LISTEN state
102 		 *
103 		 *    Choose S.ISS (initial seqno) or set from Init Cookies
104 		 *    Initialize S.GAR := S.ISS
105 		 *    Set S.ISR, S.GSR from packet (or Init Cookies)
106 		 *
107 		 *    Setting AWL/AWH and SWL/SWH happens as part of the feature
108 		 *    activation below, as these windows all depend on the local
109 		 *    and remote Sequence Window feature values (7.5.2).
110 		 */
111 		newdp->dccps_iss = dreq->dreq_iss;
112 		newdp->dccps_gss = dreq->dreq_gss;
113 		newdp->dccps_gar = newdp->dccps_iss;
114 		newdp->dccps_isr = dreq->dreq_isr;
115 		newdp->dccps_gsr = dreq->dreq_gsr;
116 
117 		/*
118 		 * Activate features: initialise CCIDs, sequence windows etc.
119 		 */
120 		if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) {
121 			sk_free_unlock_clone(newsk);
122 			return NULL;
123 		}
124 		dccp_init_xmit_timers(newsk);
125 
126 		__DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS);
127 	}
128 	return newsk;
129 }
130 
131 EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
132 
133 /*
134  * Process an incoming packet for RESPOND sockets represented
135  * as an request_sock.
136  */
dccp_check_req(struct sock * sk,struct sk_buff * skb,struct request_sock * req)137 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
138 			    struct request_sock *req)
139 {
140 	struct sock *child = NULL;
141 	struct dccp_request_sock *dreq = dccp_rsk(req);
142 	bool own_req;
143 
144 	/* TCP/DCCP listeners became lockless.
145 	 * DCCP stores complex state in its request_sock, so we need
146 	 * a protection for them, now this code runs without being protected
147 	 * by the parent (listener) lock.
148 	 */
149 	spin_lock_bh(&dreq->dreq_lock);
150 
151 	/* Check for retransmitted REQUEST */
152 	if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
153 
154 		if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) {
155 			dccp_pr_debug("Retransmitted REQUEST\n");
156 			dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq;
157 			/*
158 			 * Send another RESPONSE packet
159 			 * To protect against Request floods, increment retrans
160 			 * counter (backoff, monitored by dccp_response_timer).
161 			 */
162 			inet_rtx_syn_ack(sk, req);
163 		}
164 		/* Network Duplicate, discard packet */
165 		goto out;
166 	}
167 
168 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
169 
170 	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
171 	    dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
172 		goto drop;
173 
174 	/* Invalid ACK */
175 	if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
176 				dreq->dreq_iss, dreq->dreq_gss)) {
177 		dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
178 			      "dreq_iss=%llu, dreq_gss=%llu\n",
179 			      (unsigned long long)
180 			      DCCP_SKB_CB(skb)->dccpd_ack_seq,
181 			      (unsigned long long) dreq->dreq_iss,
182 			      (unsigned long long) dreq->dreq_gss);
183 		goto drop;
184 	}
185 
186 	if (dccp_parse_options(sk, dreq, skb))
187 		 goto drop;
188 
189 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
190 							 req, &own_req);
191 	if (child) {
192 		child = inet_csk_complete_hashdance(sk, child, req, own_req);
193 		goto out;
194 	}
195 
196 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
197 drop:
198 	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
199 		req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
200 
201 	inet_csk_reqsk_queue_drop(sk, req);
202 out:
203 	spin_unlock_bh(&dreq->dreq_lock);
204 	return child;
205 }
206 
207 EXPORT_SYMBOL_GPL(dccp_check_req);
208 
209 /*
210  *  Queue segment on the new socket if the new socket is active,
211  *  otherwise we just shortcircuit this and continue with
212  *  the new socket.
213  */
dccp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)214 int dccp_child_process(struct sock *parent, struct sock *child,
215 		       struct sk_buff *skb)
216 	__releases(child)
217 {
218 	int ret = 0;
219 	const int state = child->sk_state;
220 
221 	if (!sock_owned_by_user(child)) {
222 		ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
223 					     skb->len);
224 
225 		/* Wakeup parent, send SIGIO */
226 		if (state == DCCP_RESPOND && child->sk_state != state)
227 			parent->sk_data_ready(parent);
228 	} else {
229 		/* Alas, it is possible again, because we do lookup
230 		 * in main socket hash table and lock on listening
231 		 * socket does not protect us more.
232 		 */
233 		__sk_add_backlog(child, skb);
234 	}
235 
236 	bh_unlock_sock(child);
237 	sock_put(child);
238 	return ret;
239 }
240 
241 EXPORT_SYMBOL_GPL(dccp_child_process);
242 
dccp_reqsk_send_ack(const struct sock * sk,struct sk_buff * skb,struct request_sock * rsk)243 void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
244 			 struct request_sock *rsk)
245 {
246 	DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
247 }
248 
249 EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
250 
dccp_reqsk_init(struct request_sock * req,struct dccp_sock const * dp,struct sk_buff const * skb)251 int dccp_reqsk_init(struct request_sock *req,
252 		    struct dccp_sock const *dp, struct sk_buff const *skb)
253 {
254 	struct dccp_request_sock *dreq = dccp_rsk(req);
255 
256 	spin_lock_init(&dreq->dreq_lock);
257 	inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
258 	inet_rsk(req)->ir_num	   = ntohs(dccp_hdr(skb)->dccph_dport);
259 	inet_rsk(req)->acked	   = 0;
260 	dreq->dreq_timestamp_echo  = 0;
261 
262 	/* inherit feature negotiation options from listening socket */
263 	return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
264 }
265 
266 EXPORT_SYMBOL_GPL(dccp_reqsk_init);
267