1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
14 #include <net/aligned_data.h>
15 #include <net/rps.h>
16 #include <net/sock.h>
17 #include <net/inet_common.h>
18 #include <net/inet_hashtables.h>
19 #include <net/protocol.h>
20 #include <net/tcp_states.h>
21 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
22 #include <net/transp_v6.h>
23 #endif
24 #include <net/mptcp.h>
25 #include <net/hotdata.h>
26 #include <net/xfrm.h>
27 #include <asm/ioctls.h>
28 #include "protocol.h"
29 #include "mib.h"
30
31 static unsigned int mptcp_inq_hint(const struct sock *sk);
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/mptcp.h>
35
36 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
37 struct mptcp6_sock {
38 struct mptcp_sock msk;
39 struct ipv6_pinfo np;
40 };
41 #endif
42
43 enum {
44 MPTCP_CMSG_TS = BIT(0),
45 MPTCP_CMSG_INQ = BIT(1),
46 };
47
48 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
49
50 static void __mptcp_destroy_sock(struct sock *sk);
51 static void mptcp_check_send_data_fin(struct sock *sk);
52
53 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = {
54 .bh_lock = INIT_LOCAL_LOCK(bh_lock),
55 };
56 static struct net_device *mptcp_napi_dev;
57
58 /* Returns end sequence number of the receiver's advertised window */
mptcp_wnd_end(const struct mptcp_sock * msk)59 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
60 {
61 return READ_ONCE(msk->wnd_end);
62 }
63
mptcp_fallback_tcp_ops(const struct sock * sk)64 static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
65 {
66 unsigned short family = READ_ONCE(sk->sk_family);
67
68 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
69 if (family == AF_INET6)
70 return &inet6_stream_ops;
71 #endif
72 WARN_ON_ONCE(family != AF_INET);
73 return &inet_stream_ops;
74 }
75
__mptcp_try_fallback(struct mptcp_sock * msk,int fb_mib)76 bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
77 {
78 struct net *net = sock_net((struct sock *)msk);
79
80 if (__mptcp_check_fallback(msk))
81 return true;
82
83 /* The caller possibly is not holding the msk socket lock, but
84 * in the fallback case only the current subflow is touching
85 * the OoO queue.
86 */
87 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
88 return false;
89
90 spin_lock_bh(&msk->fallback_lock);
91 if (!msk->allow_infinite_fallback) {
92 spin_unlock_bh(&msk->fallback_lock);
93 return false;
94 }
95
96 msk->allow_subflows = false;
97 set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
98 __MPTCP_INC_STATS(net, fb_mib);
99 spin_unlock_bh(&msk->fallback_lock);
100 return true;
101 }
102
__mptcp_socket_create(struct mptcp_sock * msk)103 static int __mptcp_socket_create(struct mptcp_sock *msk)
104 {
105 struct mptcp_subflow_context *subflow;
106 struct sock *sk = (struct sock *)msk;
107 struct socket *ssock;
108 int err;
109
110 err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock);
111 if (err)
112 return err;
113
114 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio;
115 WRITE_ONCE(msk->first, ssock->sk);
116 subflow = mptcp_subflow_ctx(ssock->sk);
117 list_add(&subflow->node, &msk->conn_list);
118 sock_hold(ssock->sk);
119 subflow->request_mptcp = 1;
120 subflow->subflow_id = msk->subflow_id++;
121
122 /* This is the first subflow, always with id 0 */
123 WRITE_ONCE(subflow->local_id, 0);
124 mptcp_sock_graft(msk->first, sk->sk_socket);
125 iput(SOCK_INODE(ssock));
126
127 return 0;
128 }
129
130 /* If the MPC handshake is not started, returns the first subflow,
131 * eventually allocating it.
132 */
__mptcp_nmpc_sk(struct mptcp_sock * msk)133 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
134 {
135 struct sock *sk = (struct sock *)msk;
136 int ret;
137
138 if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
139 return ERR_PTR(-EINVAL);
140
141 if (!msk->first) {
142 ret = __mptcp_socket_create(msk);
143 if (ret)
144 return ERR_PTR(ret);
145 }
146
147 return msk->first;
148 }
149
mptcp_drop(struct sock * sk,struct sk_buff * skb)150 static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
151 {
152 sk_drops_skbadd(sk, skb);
153 __kfree_skb(skb);
154 }
155
__mptcp_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen,int * delta)156 static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
157 struct sk_buff *from, bool *fragstolen,
158 int *delta)
159 {
160 int limit = READ_ONCE(sk->sk_rcvbuf);
161
162 if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) ||
163 MPTCP_SKB_CB(from)->offset ||
164 ((to->len + from->len) > (limit >> 3)) ||
165 !skb_try_coalesce(to, from, fragstolen, delta))
166 return false;
167
168 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
169 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
170 to->len, MPTCP_SKB_CB(from)->end_seq);
171 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
172 return true;
173 }
174
mptcp_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from)175 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
176 struct sk_buff *from)
177 {
178 bool fragstolen;
179 int delta;
180
181 if (!__mptcp_try_coalesce(sk, to, from, &fragstolen, &delta))
182 return false;
183
184 /* note the fwd memory can reach a negative value after accounting
185 * for the delta, but the later skb free will restore a non
186 * negative one
187 */
188 atomic_add(delta, &sk->sk_rmem_alloc);
189 sk_mem_charge(sk, delta);
190 kfree_skb_partial(from, fragstolen);
191
192 return true;
193 }
194
mptcp_ooo_try_coalesce(struct mptcp_sock * msk,struct sk_buff * to,struct sk_buff * from)195 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
196 struct sk_buff *from)
197 {
198 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
199 return false;
200
201 return mptcp_try_coalesce((struct sock *)msk, to, from);
202 }
203
204 /* "inspired" by tcp_rcvbuf_grow(), main difference:
205 * - mptcp does not maintain a msk-level window clamp
206 * - returns true when the receive buffer is actually updated
207 */
mptcp_rcvbuf_grow(struct sock * sk,u32 newval)208 static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
209 {
210 struct mptcp_sock *msk = mptcp_sk(sk);
211 const struct net *net = sock_net(sk);
212 u32 rcvwin, rcvbuf, cap, oldval;
213 u64 grow;
214
215 oldval = msk->rcvq_space.space;
216 msk->rcvq_space.space = newval;
217 if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
218 (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
219 return false;
220
221 /* DRS is always one RTT late. */
222 rcvwin = newval << 1;
223
224 /* slow start: allow the sender to double its rate. */
225 grow = (u64)rcvwin * (newval - oldval);
226 do_div(grow, oldval);
227 rcvwin += grow << 1;
228
229 cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
230
231 rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap);
232 if (rcvbuf > sk->sk_rcvbuf) {
233 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
234 return true;
235 }
236 return false;
237 }
238
239 /* "inspired" by tcp_data_queue_ofo(), main differences:
240 * - use mptcp seqs
241 * - don't cope with sacks
242 */
mptcp_data_queue_ofo(struct mptcp_sock * msk,struct sk_buff * skb)243 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
244 {
245 struct sock *sk = (struct sock *)msk;
246 struct rb_node **p, *parent;
247 u64 seq, end_seq, max_seq;
248 struct sk_buff *skb1;
249
250 seq = MPTCP_SKB_CB(skb)->map_seq;
251 end_seq = MPTCP_SKB_CB(skb)->end_seq;
252 max_seq = atomic64_read(&msk->rcv_wnd_sent);
253
254 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
255 RB_EMPTY_ROOT(&msk->out_of_order_queue));
256 if (after64(end_seq, max_seq)) {
257 /* out of window */
258 mptcp_drop(sk, skb);
259 pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
260 (unsigned long long)end_seq - (unsigned long)max_seq,
261 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent));
262 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
263 return;
264 }
265
266 p = &msk->out_of_order_queue.rb_node;
267 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
268 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
269 rb_link_node(&skb->rbnode, NULL, p);
270 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
271 msk->ooo_last_skb = skb;
272 goto end;
273 }
274
275 /* with 2 subflows, adding at end of ooo queue is quite likely
276 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
277 */
278 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
279 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
280 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
281 return;
282 }
283
284 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
285 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
286 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
287 parent = &msk->ooo_last_skb->rbnode;
288 p = &parent->rb_right;
289 goto insert;
290 }
291
292 /* Find place to insert this segment. Handle overlaps on the way. */
293 parent = NULL;
294 while (*p) {
295 parent = *p;
296 skb1 = rb_to_skb(parent);
297 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
298 p = &parent->rb_left;
299 continue;
300 }
301 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
302 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
303 /* All the bits are present. Drop. */
304 mptcp_drop(sk, skb);
305 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
306 return;
307 }
308 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
309 /* partial overlap:
310 * | skb |
311 * | skb1 |
312 * continue traversing
313 */
314 } else {
315 /* skb's seq == skb1's seq and skb covers skb1.
316 * Replace skb1 with skb.
317 */
318 rb_replace_node(&skb1->rbnode, &skb->rbnode,
319 &msk->out_of_order_queue);
320 mptcp_drop(sk, skb1);
321 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
322 goto merge_right;
323 }
324 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
325 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
326 return;
327 }
328 p = &parent->rb_right;
329 }
330
331 insert:
332 /* Insert segment into RB tree. */
333 rb_link_node(&skb->rbnode, parent, p);
334 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
335
336 merge_right:
337 /* Remove other segments covered by skb. */
338 while ((skb1 = skb_rb_next(skb)) != NULL) {
339 if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
340 break;
341 rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
342 mptcp_drop(sk, skb1);
343 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
344 }
345 /* If there is no skb after us, we are the last_skb ! */
346 if (!skb1)
347 msk->ooo_last_skb = skb;
348
349 end:
350 skb_condense(skb);
351 skb_set_owner_r(skb, sk);
352 }
353
mptcp_init_skb(struct sock * ssk,struct sk_buff * skb,int offset,int copy_len)354 static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
355 int copy_len)
356 {
357 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
358 bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
359
360 /* the skb map_seq accounts for the skb offset:
361 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
362 * value
363 */
364 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
365 MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
366 MPTCP_SKB_CB(skb)->offset = offset;
367 MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
368 MPTCP_SKB_CB(skb)->cant_coalesce = 0;
369
370 __skb_unlink(skb, &ssk->sk_receive_queue);
371
372 skb_ext_reset(skb);
373 skb_dst_drop(skb);
374 }
375
__mptcp_move_skb(struct sock * sk,struct sk_buff * skb)376 static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
377 {
378 u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq;
379 struct mptcp_sock *msk = mptcp_sk(sk);
380 struct sk_buff *tail;
381
382 mptcp_borrow_fwdmem(sk, skb);
383
384 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
385 /* in sequence */
386 msk->bytes_received += copy_len;
387 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
388 tail = skb_peek_tail(&sk->sk_receive_queue);
389 if (tail && mptcp_try_coalesce(sk, tail, skb))
390 return true;
391
392 skb_set_owner_r(skb, sk);
393 __skb_queue_tail(&sk->sk_receive_queue, skb);
394 return true;
395 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
396 mptcp_data_queue_ofo(msk, skb);
397 return false;
398 }
399
400 /* old data, keep it simple and drop the whole pkt, sender
401 * will retransmit as needed, if needed.
402 */
403 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
404 mptcp_drop(sk, skb);
405 return false;
406 }
407
mptcp_stop_rtx_timer(struct sock * sk)408 static void mptcp_stop_rtx_timer(struct sock *sk)
409 {
410 sk_stop_timer(sk, &sk->mptcp_retransmit_timer);
411 mptcp_sk(sk)->timer_ival = 0;
412 }
413
mptcp_close_wake_up(struct sock * sk)414 static void mptcp_close_wake_up(struct sock *sk)
415 {
416 if (sock_flag(sk, SOCK_DEAD))
417 return;
418
419 sk->sk_state_change(sk);
420 if (sk->sk_shutdown == SHUTDOWN_MASK ||
421 sk->sk_state == TCP_CLOSE)
422 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
423 else
424 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
425 }
426
mptcp_shutdown_subflows(struct mptcp_sock * msk)427 static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
428 {
429 struct mptcp_subflow_context *subflow;
430
431 mptcp_for_each_subflow(msk, subflow) {
432 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
433 bool slow;
434
435 slow = lock_sock_fast(ssk);
436 tcp_shutdown(ssk, SEND_SHUTDOWN);
437 unlock_sock_fast(ssk, slow);
438 }
439 }
440
441 /* called under the msk socket lock */
mptcp_pending_data_fin_ack(struct sock * sk)442 static bool mptcp_pending_data_fin_ack(struct sock *sk)
443 {
444 struct mptcp_sock *msk = mptcp_sk(sk);
445
446 return ((1 << sk->sk_state) &
447 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
448 msk->write_seq == READ_ONCE(msk->snd_una);
449 }
450
mptcp_check_data_fin_ack(struct sock * sk)451 static void mptcp_check_data_fin_ack(struct sock *sk)
452 {
453 struct mptcp_sock *msk = mptcp_sk(sk);
454
455 /* Look for an acknowledged DATA_FIN */
456 if (mptcp_pending_data_fin_ack(sk)) {
457 WRITE_ONCE(msk->snd_data_fin_enable, 0);
458
459 switch (sk->sk_state) {
460 case TCP_FIN_WAIT1:
461 mptcp_set_state(sk, TCP_FIN_WAIT2);
462 break;
463 case TCP_CLOSING:
464 case TCP_LAST_ACK:
465 mptcp_shutdown_subflows(msk);
466 mptcp_set_state(sk, TCP_CLOSE);
467 break;
468 }
469
470 mptcp_close_wake_up(sk);
471 }
472 }
473
474 /* can be called with no lock acquired */
mptcp_pending_data_fin(struct sock * sk,u64 * seq)475 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
476 {
477 struct mptcp_sock *msk = mptcp_sk(sk);
478
479 if (READ_ONCE(msk->rcv_data_fin) &&
480 ((1 << inet_sk_state_load(sk)) &
481 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
482 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
483
484 if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
485 if (seq)
486 *seq = rcv_data_fin_seq;
487
488 return true;
489 }
490 }
491
492 return false;
493 }
494
mptcp_set_datafin_timeout(struct sock * sk)495 static void mptcp_set_datafin_timeout(struct sock *sk)
496 {
497 struct inet_connection_sock *icsk = inet_csk(sk);
498 u32 retransmits;
499
500 retransmits = min_t(u32, icsk->icsk_retransmits,
501 ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
502
503 mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
504 }
505
__mptcp_set_timeout(struct sock * sk,long tout)506 static void __mptcp_set_timeout(struct sock *sk, long tout)
507 {
508 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
509 }
510
mptcp_timeout_from_subflow(const struct mptcp_subflow_context * subflow)511 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
512 {
513 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
514
515 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
516 tcp_timeout_expires(ssk) - jiffies : 0;
517 }
518
mptcp_set_timeout(struct sock * sk)519 static void mptcp_set_timeout(struct sock *sk)
520 {
521 struct mptcp_subflow_context *subflow;
522 long tout = 0;
523
524 mptcp_for_each_subflow(mptcp_sk(sk), subflow)
525 tout = max(tout, mptcp_timeout_from_subflow(subflow));
526 __mptcp_set_timeout(sk, tout);
527 }
528
tcp_can_send_ack(const struct sock * ssk)529 static inline bool tcp_can_send_ack(const struct sock *ssk)
530 {
531 return !((1 << inet_sk_state_load(ssk)) &
532 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
533 }
534
__mptcp_subflow_send_ack(struct sock * ssk)535 void __mptcp_subflow_send_ack(struct sock *ssk)
536 {
537 if (tcp_can_send_ack(ssk))
538 tcp_send_ack(ssk);
539 }
540
mptcp_subflow_send_ack(struct sock * ssk)541 static void mptcp_subflow_send_ack(struct sock *ssk)
542 {
543 bool slow;
544
545 slow = lock_sock_fast(ssk);
546 __mptcp_subflow_send_ack(ssk);
547 unlock_sock_fast(ssk, slow);
548 }
549
mptcp_send_ack(struct mptcp_sock * msk)550 static void mptcp_send_ack(struct mptcp_sock *msk)
551 {
552 struct mptcp_subflow_context *subflow;
553
554 mptcp_for_each_subflow(msk, subflow)
555 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
556 }
557
mptcp_subflow_cleanup_rbuf(struct sock * ssk,int copied)558 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
559 {
560 bool slow;
561
562 slow = lock_sock_fast(ssk);
563 if (tcp_can_send_ack(ssk))
564 tcp_cleanup_rbuf(ssk, copied);
565 unlock_sock_fast(ssk, slow);
566 }
567
mptcp_subflow_could_cleanup(const struct sock * ssk,bool rx_empty)568 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
569 {
570 const struct inet_connection_sock *icsk = inet_csk(ssk);
571 u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
572 const struct tcp_sock *tp = tcp_sk(ssk);
573
574 return (ack_pending & ICSK_ACK_SCHED) &&
575 ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
576 READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
577 (rx_empty && ack_pending &
578 (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
579 }
580
mptcp_cleanup_rbuf(struct mptcp_sock * msk,int copied)581 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
582 {
583 int old_space = READ_ONCE(msk->old_wspace);
584 struct mptcp_subflow_context *subflow;
585 struct sock *sk = (struct sock *)msk;
586 int space = __mptcp_space(sk);
587 bool cleanup, rx_empty;
588
589 cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
590 rx_empty = !sk_rmem_alloc_get(sk) && copied;
591
592 mptcp_for_each_subflow(msk, subflow) {
593 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
594
595 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
596 mptcp_subflow_cleanup_rbuf(ssk, copied);
597 }
598 }
599
mptcp_check_data_fin(struct sock * sk)600 static void mptcp_check_data_fin(struct sock *sk)
601 {
602 struct mptcp_sock *msk = mptcp_sk(sk);
603 u64 rcv_data_fin_seq;
604
605 /* Need to ack a DATA_FIN received from a peer while this side
606 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
607 * msk->rcv_data_fin was set when parsing the incoming options
608 * at the subflow level and the msk lock was not held, so this
609 * is the first opportunity to act on the DATA_FIN and change
610 * the msk state.
611 *
612 * If we are caught up to the sequence number of the incoming
613 * DATA_FIN, send the DATA_ACK now and do state transition. If
614 * not caught up, do nothing and let the recv code send DATA_ACK
615 * when catching up.
616 */
617
618 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
619 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
620 WRITE_ONCE(msk->rcv_data_fin, 0);
621
622 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
623 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
624
625 switch (sk->sk_state) {
626 case TCP_ESTABLISHED:
627 mptcp_set_state(sk, TCP_CLOSE_WAIT);
628 break;
629 case TCP_FIN_WAIT1:
630 mptcp_set_state(sk, TCP_CLOSING);
631 break;
632 case TCP_FIN_WAIT2:
633 mptcp_shutdown_subflows(msk);
634 mptcp_set_state(sk, TCP_CLOSE);
635 break;
636 default:
637 /* Other states not expected */
638 WARN_ON_ONCE(1);
639 break;
640 }
641
642 if (!__mptcp_check_fallback(msk))
643 mptcp_send_ack(msk);
644 mptcp_close_wake_up(sk);
645 }
646 }
647
mptcp_dss_corruption(struct mptcp_sock * msk,struct sock * ssk)648 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
649 {
650 if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) {
651 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
652 mptcp_subflow_reset(ssk);
653 }
654 }
655
__mptcp_add_backlog(struct sock * sk,struct mptcp_subflow_context * subflow,struct sk_buff * skb)656 static void __mptcp_add_backlog(struct sock *sk,
657 struct mptcp_subflow_context *subflow,
658 struct sk_buff *skb)
659 {
660 struct mptcp_sock *msk = mptcp_sk(sk);
661 struct sk_buff *tail = NULL;
662 struct sock *ssk = skb->sk;
663 bool fragstolen;
664 int delta;
665
666 if (unlikely(sk->sk_state == TCP_CLOSE)) {
667 kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
668 return;
669 }
670
671 /* Try to coalesce with the last skb in our backlog */
672 if (!list_empty(&msk->backlog_list))
673 tail = list_last_entry(&msk->backlog_list, struct sk_buff, list);
674
675 if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq &&
676 ssk == tail->sk &&
677 __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) {
678 skb->truesize -= delta;
679 kfree_skb_partial(skb, fragstolen);
680 __mptcp_subflow_lend_fwdmem(subflow, delta);
681 goto account;
682 }
683
684 list_add_tail(&skb->list, &msk->backlog_list);
685 mptcp_subflow_lend_fwdmem(subflow, skb);
686 delta = skb->truesize;
687
688 account:
689 WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
690
691 /* Possibly not accept()ed yet, keep track of memory not CG
692 * accounted, mptcp_graft_subflows() will handle it.
693 */
694 if (!mem_cgroup_from_sk(ssk))
695 msk->backlog_unaccounted += delta;
696 }
697
__mptcp_move_skbs_from_subflow(struct mptcp_sock * msk,struct sock * ssk,bool own_msk)698 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
699 struct sock *ssk, bool own_msk)
700 {
701 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
702 struct sock *sk = (struct sock *)msk;
703 bool more_data_avail;
704 struct tcp_sock *tp;
705 bool ret = false;
706
707 pr_debug("msk=%p ssk=%p\n", msk, ssk);
708 tp = tcp_sk(ssk);
709 do {
710 u32 map_remaining, offset;
711 u32 seq = tp->copied_seq;
712 struct sk_buff *skb;
713 bool fin;
714
715 /* try to move as much data as available */
716 map_remaining = subflow->map_data_len -
717 mptcp_subflow_get_map_offset(subflow);
718
719 skb = skb_peek(&ssk->sk_receive_queue);
720 if (unlikely(!skb))
721 break;
722
723 if (__mptcp_check_fallback(msk)) {
724 /* Under fallback skbs have no MPTCP extension and TCP could
725 * collapse them between the dummy map creation and the
726 * current dequeue. Be sure to adjust the map size.
727 */
728 map_remaining = skb->len;
729 subflow->map_data_len = skb->len;
730 }
731
732 offset = seq - TCP_SKB_CB(skb)->seq;
733 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
734 if (fin)
735 seq++;
736
737 if (offset < skb->len) {
738 size_t len = skb->len - offset;
739
740 mptcp_init_skb(ssk, skb, offset, len);
741
742 if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) {
743 mptcp_subflow_lend_fwdmem(subflow, skb);
744 ret |= __mptcp_move_skb(sk, skb);
745 } else {
746 __mptcp_add_backlog(sk, subflow, skb);
747 }
748 seq += len;
749
750 if (unlikely(map_remaining < len)) {
751 DEBUG_NET_WARN_ON_ONCE(1);
752 mptcp_dss_corruption(msk, ssk);
753 }
754 } else {
755 if (unlikely(!fin)) {
756 DEBUG_NET_WARN_ON_ONCE(1);
757 mptcp_dss_corruption(msk, ssk);
758 }
759
760 sk_eat_skb(ssk, skb);
761 }
762
763 WRITE_ONCE(tp->copied_seq, seq);
764 more_data_avail = mptcp_subflow_data_available(ssk);
765
766 } while (more_data_avail);
767
768 if (ret)
769 msk->last_data_recv = tcp_jiffies32;
770 return ret;
771 }
772
__mptcp_ofo_queue(struct mptcp_sock * msk)773 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
774 {
775 struct sock *sk = (struct sock *)msk;
776 struct sk_buff *skb, *tail;
777 bool moved = false;
778 struct rb_node *p;
779 u64 end_seq;
780
781 p = rb_first(&msk->out_of_order_queue);
782 pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
783 while (p) {
784 skb = rb_to_skb(p);
785 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
786 break;
787
788 p = rb_next(p);
789 rb_erase(&skb->rbnode, &msk->out_of_order_queue);
790
791 if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
792 msk->ack_seq))) {
793 mptcp_drop(sk, skb);
794 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
795 continue;
796 }
797
798 end_seq = MPTCP_SKB_CB(skb)->end_seq;
799 tail = skb_peek_tail(&sk->sk_receive_queue);
800 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
801 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
802
803 /* skip overlapping data, if any */
804 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
805 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
806 delta);
807 MPTCP_SKB_CB(skb)->offset += delta;
808 MPTCP_SKB_CB(skb)->map_seq += delta;
809 __skb_queue_tail(&sk->sk_receive_queue, skb);
810 }
811 msk->bytes_received += end_seq - msk->ack_seq;
812 WRITE_ONCE(msk->ack_seq, end_seq);
813 moved = true;
814 }
815 return moved;
816 }
817
__mptcp_subflow_error_report(struct sock * sk,struct sock * ssk)818 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
819 {
820 int ssk_state;
821 int err;
822
823 /* only propagate errors on fallen-back sockets or
824 * on MPC connect
825 */
826 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk)))
827 return false;
828
829 err = sock_error(ssk);
830 if (!err)
831 return false;
832
833 /* We need to propagate only transition to CLOSE state.
834 * Orphaned socket will see such state change via
835 * subflow_sched_work_if_closed() and that path will properly
836 * destroy the msk as needed.
837 */
838 ssk_state = inet_sk_state_load(ssk);
839 if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
840 mptcp_set_state(sk, ssk_state);
841 WRITE_ONCE(sk->sk_err, -err);
842
843 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
844 smp_wmb();
845 sk_error_report(sk);
846 return true;
847 }
848
__mptcp_error_report(struct sock * sk)849 void __mptcp_error_report(struct sock *sk)
850 {
851 struct mptcp_subflow_context *subflow;
852 struct mptcp_sock *msk = mptcp_sk(sk);
853
854 mptcp_for_each_subflow(msk, subflow)
855 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
856 break;
857 }
858
859 /* In most cases we will be able to lock the mptcp socket. If its already
860 * owned, we need to defer to the work queue to avoid ABBA deadlock.
861 */
move_skbs_to_msk(struct mptcp_sock * msk,struct sock * ssk)862 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
863 {
864 struct sock *sk = (struct sock *)msk;
865 bool moved;
866
867 moved = __mptcp_move_skbs_from_subflow(msk, ssk, true);
868 __mptcp_ofo_queue(msk);
869 if (unlikely(ssk->sk_err))
870 __mptcp_subflow_error_report(sk, ssk);
871
872 /* If the moves have caught up with the DATA_FIN sequence number
873 * it's time to ack the DATA_FIN and change socket state, but
874 * this is not a good place to change state. Let the workqueue
875 * do it.
876 */
877 if (mptcp_pending_data_fin(sk, NULL))
878 mptcp_schedule_work(sk);
879 return moved;
880 }
881
mptcp_data_ready(struct sock * sk,struct sock * ssk)882 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
883 {
884 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
885 struct mptcp_sock *msk = mptcp_sk(sk);
886
887 /* The peer can send data while we are shutting down this
888 * subflow at subflow destruction time, but we must avoid enqueuing
889 * more data to the msk receive queue
890 */
891 if (unlikely(subflow->closing))
892 return;
893
894 mptcp_data_lock(sk);
895 if (!sock_owned_by_user(sk)) {
896 /* Wake-up the reader only for in-sequence data */
897 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
898 sk->sk_data_ready(sk);
899 } else {
900 __mptcp_move_skbs_from_subflow(msk, ssk, false);
901 }
902 mptcp_data_unlock(sk);
903 }
904
mptcp_subflow_joined(struct mptcp_sock * msk,struct sock * ssk)905 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
906 {
907 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
908 msk->allow_infinite_fallback = false;
909 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
910 }
911
__mptcp_finish_join(struct mptcp_sock * msk,struct sock * ssk)912 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
913 {
914 struct sock *sk = (struct sock *)msk;
915
916 if (sk->sk_state != TCP_ESTABLISHED)
917 return false;
918
919 spin_lock_bh(&msk->fallback_lock);
920 if (!msk->allow_subflows) {
921 spin_unlock_bh(&msk->fallback_lock);
922 return false;
923 }
924 mptcp_subflow_joined(msk, ssk);
925 spin_unlock_bh(&msk->fallback_lock);
926
927 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
928 mptcp_sockopt_sync_locked(msk, ssk);
929 mptcp_stop_tout_timer(sk);
930 __mptcp_propagate_sndbuf(sk, ssk);
931 return true;
932 }
933
__mptcp_flush_join_list(struct sock * sk,struct list_head * join_list)934 static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list)
935 {
936 struct mptcp_subflow_context *tmp, *subflow;
937 struct mptcp_sock *msk = mptcp_sk(sk);
938
939 list_for_each_entry_safe(subflow, tmp, join_list, node) {
940 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
941 bool slow = lock_sock_fast(ssk);
942
943 list_move_tail(&subflow->node, &msk->conn_list);
944 if (!__mptcp_finish_join(msk, ssk))
945 mptcp_subflow_reset(ssk);
946 unlock_sock_fast(ssk, slow);
947 }
948 }
949
mptcp_rtx_timer_pending(struct sock * sk)950 static bool mptcp_rtx_timer_pending(struct sock *sk)
951 {
952 return timer_pending(&sk->mptcp_retransmit_timer);
953 }
954
mptcp_reset_rtx_timer(struct sock * sk)955 static void mptcp_reset_rtx_timer(struct sock *sk)
956 {
957 unsigned long tout;
958
959 /* prevent rescheduling on close */
960 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
961 return;
962
963 tout = mptcp_sk(sk)->timer_ival;
964 sk_reset_timer(sk, &sk->mptcp_retransmit_timer, jiffies + tout);
965 }
966
mptcp_schedule_work(struct sock * sk)967 bool mptcp_schedule_work(struct sock *sk)
968 {
969 if (inet_sk_state_load(sk) == TCP_CLOSE)
970 return false;
971
972 /* Get a reference on this socket, mptcp_worker() will release it.
973 * As mptcp_worker() might complete before us, we can not avoid
974 * a sock_hold()/sock_put() if schedule_work() returns false.
975 */
976 sock_hold(sk);
977
978 if (schedule_work(&mptcp_sk(sk)->work))
979 return true;
980
981 sock_put(sk);
982 return false;
983 }
984
mptcp_skb_can_collapse_to(u64 write_seq,const struct sk_buff * skb,const struct mptcp_ext * mpext)985 static bool mptcp_skb_can_collapse_to(u64 write_seq,
986 const struct sk_buff *skb,
987 const struct mptcp_ext *mpext)
988 {
989 if (!tcp_skb_can_collapse_to(skb))
990 return false;
991
992 /* can collapse only if MPTCP level sequence is in order and this
993 * mapping has not been xmitted yet
994 */
995 return mpext && mpext->data_seq + mpext->data_len == write_seq &&
996 !mpext->frozen;
997 }
998
999 /* we can append data to the given data frag if:
1000 * - there is space available in the backing page_frag
1001 * - the data frag tail matches the current page_frag free offset
1002 * - the data frag end sequence number matches the current write seq
1003 */
mptcp_frag_can_collapse_to(const struct mptcp_sock * msk,const struct page_frag * pfrag,const struct mptcp_data_frag * df)1004 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
1005 const struct page_frag *pfrag,
1006 const struct mptcp_data_frag *df)
1007 {
1008 return df && pfrag->page == df->page &&
1009 pfrag->size - pfrag->offset > 0 &&
1010 pfrag->offset == (df->offset + df->data_len) &&
1011 df->data_seq + df->data_len == msk->write_seq;
1012 }
1013
dfrag_uncharge(struct sock * sk,int len)1014 static void dfrag_uncharge(struct sock *sk, int len)
1015 {
1016 sk_mem_uncharge(sk, len);
1017 sk_wmem_queued_add(sk, -len);
1018 }
1019
dfrag_clear(struct sock * sk,struct mptcp_data_frag * dfrag)1020 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
1021 {
1022 int len = dfrag->data_len + dfrag->overhead;
1023
1024 list_del(&dfrag->list);
1025 dfrag_uncharge(sk, len);
1026 put_page(dfrag->page);
1027 }
1028
1029 /* called under both the msk socket lock and the data lock */
__mptcp_clean_una(struct sock * sk)1030 static void __mptcp_clean_una(struct sock *sk)
1031 {
1032 struct mptcp_sock *msk = mptcp_sk(sk);
1033 struct mptcp_data_frag *dtmp, *dfrag;
1034 u64 snd_una;
1035
1036 snd_una = msk->snd_una;
1037 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1038 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1039 break;
1040
1041 if (unlikely(dfrag == msk->first_pending)) {
1042 /* in recovery mode can see ack after the current snd head */
1043 if (WARN_ON_ONCE(!msk->recovery))
1044 break;
1045
1046 msk->first_pending = mptcp_send_next(sk);
1047 }
1048
1049 dfrag_clear(sk, dfrag);
1050 }
1051
1052 dfrag = mptcp_rtx_head(sk);
1053 if (dfrag && after64(snd_una, dfrag->data_seq)) {
1054 u64 delta = snd_una - dfrag->data_seq;
1055
1056 /* prevent wrap around in recovery mode */
1057 if (unlikely(delta > dfrag->already_sent)) {
1058 if (WARN_ON_ONCE(!msk->recovery))
1059 goto out;
1060 if (WARN_ON_ONCE(delta > dfrag->data_len))
1061 goto out;
1062 dfrag->already_sent += delta - dfrag->already_sent;
1063 }
1064
1065 dfrag->data_seq += delta;
1066 dfrag->offset += delta;
1067 dfrag->data_len -= delta;
1068 dfrag->already_sent -= delta;
1069
1070 dfrag_uncharge(sk, delta);
1071 }
1072
1073 /* all retransmitted data acked, recovery completed */
1074 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1075 msk->recovery = false;
1076
1077 out:
1078 if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
1079 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1080 mptcp_stop_rtx_timer(sk);
1081 } else {
1082 mptcp_reset_rtx_timer(sk);
1083 }
1084
1085 if (mptcp_pending_data_fin_ack(sk))
1086 mptcp_schedule_work(sk);
1087 }
1088
__mptcp_clean_una_wakeup(struct sock * sk)1089 static void __mptcp_clean_una_wakeup(struct sock *sk)
1090 {
1091 lockdep_assert_held_once(&sk->sk_lock.slock);
1092
1093 __mptcp_clean_una(sk);
1094 mptcp_write_space(sk);
1095 }
1096
mptcp_clean_una_wakeup(struct sock * sk)1097 static void mptcp_clean_una_wakeup(struct sock *sk)
1098 {
1099 mptcp_data_lock(sk);
1100 __mptcp_clean_una_wakeup(sk);
1101 mptcp_data_unlock(sk);
1102 }
1103
mptcp_enter_memory_pressure(struct sock * sk)1104 static void mptcp_enter_memory_pressure(struct sock *sk)
1105 {
1106 struct mptcp_subflow_context *subflow;
1107 struct mptcp_sock *msk = mptcp_sk(sk);
1108 bool first = true;
1109
1110 mptcp_for_each_subflow(msk, subflow) {
1111 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1112
1113 if (first && !ssk->sk_bypass_prot_mem) {
1114 tcp_enter_memory_pressure(ssk);
1115 first = false;
1116 }
1117
1118 sk_stream_moderate_sndbuf(ssk);
1119 }
1120 __mptcp_sync_sndbuf(sk);
1121 }
1122
1123 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1124 * data
1125 */
mptcp_page_frag_refill(struct sock * sk,struct page_frag * pfrag)1126 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1127 {
1128 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1129 pfrag, sk->sk_allocation)))
1130 return true;
1131
1132 mptcp_enter_memory_pressure(sk);
1133 return false;
1134 }
1135
1136 static struct mptcp_data_frag *
mptcp_carve_data_frag(const struct mptcp_sock * msk,struct page_frag * pfrag,int orig_offset)1137 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1138 int orig_offset)
1139 {
1140 int offset = ALIGN(orig_offset, sizeof(long));
1141 struct mptcp_data_frag *dfrag;
1142
1143 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1144 dfrag->data_len = 0;
1145 dfrag->data_seq = msk->write_seq;
1146 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1147 dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1148 dfrag->already_sent = 0;
1149 dfrag->page = pfrag->page;
1150
1151 return dfrag;
1152 }
1153
1154 struct mptcp_sendmsg_info {
1155 int mss_now;
1156 int size_goal;
1157 u16 limit;
1158 u16 sent;
1159 unsigned int flags;
1160 bool data_lock_held;
1161 };
1162
mptcp_check_allowed_size(const struct mptcp_sock * msk,struct sock * ssk,u64 data_seq,size_t avail_size)1163 static size_t mptcp_check_allowed_size(const struct mptcp_sock *msk,
1164 struct sock *ssk, u64 data_seq,
1165 size_t avail_size)
1166 {
1167 u64 window_end = mptcp_wnd_end(msk);
1168 u64 mptcp_snd_wnd;
1169
1170 if (__mptcp_check_fallback(msk))
1171 return avail_size;
1172
1173 mptcp_snd_wnd = window_end - data_seq;
1174 avail_size = min(mptcp_snd_wnd, avail_size);
1175
1176 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) {
1177 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd);
1178 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED);
1179 }
1180
1181 return avail_size;
1182 }
1183
__mptcp_add_ext(struct sk_buff * skb,gfp_t gfp)1184 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1185 {
1186 struct skb_ext *mpext = __skb_ext_alloc(gfp);
1187
1188 if (!mpext)
1189 return false;
1190 __skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1191 return true;
1192 }
1193
__mptcp_do_alloc_tx_skb(struct sock * sk,gfp_t gfp)1194 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1195 {
1196 struct sk_buff *skb;
1197
1198 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1199 if (likely(skb)) {
1200 if (likely(__mptcp_add_ext(skb, gfp))) {
1201 skb_reserve(skb, MAX_TCP_HEADER);
1202 skb->ip_summed = CHECKSUM_PARTIAL;
1203 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
1204 return skb;
1205 }
1206 __kfree_skb(skb);
1207 } else {
1208 mptcp_enter_memory_pressure(sk);
1209 }
1210 return NULL;
1211 }
1212
__mptcp_alloc_tx_skb(struct sock * sk,struct sock * ssk,gfp_t gfp)1213 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1214 {
1215 struct sk_buff *skb;
1216
1217 skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1218 if (!skb)
1219 return NULL;
1220
1221 if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1222 tcp_skb_entail(ssk, skb);
1223 return skb;
1224 }
1225 tcp_skb_tsorted_anchor_cleanup(skb);
1226 kfree_skb(skb);
1227 return NULL;
1228 }
1229
mptcp_alloc_tx_skb(struct sock * sk,struct sock * ssk,bool data_lock_held)1230 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
1231 {
1232 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
1233
1234 return __mptcp_alloc_tx_skb(sk, ssk, gfp);
1235 }
1236
1237 /* note: this always recompute the csum on the whole skb, even
1238 * if we just appended a single frag. More status info needed
1239 */
mptcp_update_data_checksum(struct sk_buff * skb,int added)1240 static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
1241 {
1242 struct mptcp_ext *mpext = mptcp_get_ext(skb);
1243 __wsum csum = ~csum_unfold(mpext->csum);
1244 int offset = skb->len - added;
1245
1246 mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
1247 }
1248
mptcp_update_infinite_map(struct mptcp_sock * msk,struct sock * ssk,struct mptcp_ext * mpext)1249 static void mptcp_update_infinite_map(struct mptcp_sock *msk,
1250 struct sock *ssk,
1251 struct mptcp_ext *mpext)
1252 {
1253 if (!mpext)
1254 return;
1255
1256 mpext->infinite_map = 1;
1257 mpext->data_len = 0;
1258
1259 if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) {
1260 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
1261 mptcp_subflow_reset(ssk);
1262 return;
1263 }
1264
1265 mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
1266 }
1267
1268 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
1269
mptcp_sendmsg_frag(struct sock * sk,struct sock * ssk,struct mptcp_data_frag * dfrag,struct mptcp_sendmsg_info * info)1270 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1271 struct mptcp_data_frag *dfrag,
1272 struct mptcp_sendmsg_info *info)
1273 {
1274 u64 data_seq = dfrag->data_seq + info->sent;
1275 int offset = dfrag->offset + info->sent;
1276 struct mptcp_sock *msk = mptcp_sk(sk);
1277 bool zero_window_probe = false;
1278 struct mptcp_ext *mpext = NULL;
1279 bool can_coalesce = false;
1280 bool reuse_skb = true;
1281 struct sk_buff *skb;
1282 size_t copy;
1283 int i;
1284
1285 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
1286 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1287
1288 if (WARN_ON_ONCE(info->sent > info->limit ||
1289 info->limit > dfrag->data_len))
1290 return 0;
1291
1292 if (unlikely(!__tcp_can_send(ssk)))
1293 return -EAGAIN;
1294
1295 /* compute send limit */
1296 if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
1297 ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
1298 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1299 copy = info->size_goal;
1300
1301 skb = tcp_write_queue_tail(ssk);
1302 if (skb && copy > skb->len) {
1303 /* Limit the write to the size available in the
1304 * current skb, if any, so that we create at most a new skb.
1305 * Explicitly tells TCP internals to avoid collapsing on later
1306 * queue management operation, to avoid breaking the ext <->
1307 * SSN association set here
1308 */
1309 mpext = mptcp_get_ext(skb);
1310 if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
1311 TCP_SKB_CB(skb)->eor = 1;
1312 tcp_mark_push(tcp_sk(ssk), skb);
1313 goto alloc_skb;
1314 }
1315
1316 i = skb_shinfo(skb)->nr_frags;
1317 can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
1318 if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
1319 tcp_mark_push(tcp_sk(ssk), skb);
1320 goto alloc_skb;
1321 }
1322
1323 copy -= skb->len;
1324 } else {
1325 alloc_skb:
1326 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
1327 if (!skb)
1328 return -ENOMEM;
1329
1330 i = skb_shinfo(skb)->nr_frags;
1331 reuse_skb = false;
1332 mpext = mptcp_get_ext(skb);
1333 }
1334
1335 /* Zero window and all data acked? Probe. */
1336 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy);
1337 if (copy == 0) {
1338 u64 snd_una = READ_ONCE(msk->snd_una);
1339
1340 /* No need for zero probe if there are any data pending
1341 * either at the msk or ssk level; skb is the current write
1342 * queue tail and can be empty at this point.
1343 */
1344 if (snd_una != msk->snd_nxt || skb->len ||
1345 skb != tcp_send_head(ssk)) {
1346 tcp_remove_empty_skb(ssk);
1347 return 0;
1348 }
1349
1350 zero_window_probe = true;
1351 data_seq = snd_una - 1;
1352 copy = 1;
1353 }
1354
1355 copy = min_t(size_t, copy, info->limit - info->sent);
1356 if (!sk_wmem_schedule(ssk, copy)) {
1357 tcp_remove_empty_skb(ssk);
1358 return -ENOMEM;
1359 }
1360
1361 if (can_coalesce) {
1362 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1363 } else {
1364 get_page(dfrag->page);
1365 skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
1366 }
1367
1368 skb->len += copy;
1369 skb->data_len += copy;
1370 skb->truesize += copy;
1371 sk_wmem_queued_add(ssk, copy);
1372 sk_mem_charge(ssk, copy);
1373 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
1374 TCP_SKB_CB(skb)->end_seq += copy;
1375 tcp_skb_pcount_set(skb, 0);
1376
1377 /* on skb reuse we just need to update the DSS len */
1378 if (reuse_skb) {
1379 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1380 mpext->data_len += copy;
1381 goto out;
1382 }
1383
1384 memset(mpext, 0, sizeof(*mpext));
1385 mpext->data_seq = data_seq;
1386 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1387 mpext->data_len = copy;
1388 mpext->use_map = 1;
1389 mpext->dsn64 = 1;
1390
1391 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
1392 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1393 mpext->dsn64);
1394
1395 if (zero_window_probe) {
1396 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
1397 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1398 mpext->frozen = 1;
1399 if (READ_ONCE(msk->csum_enabled))
1400 mptcp_update_data_checksum(skb, copy);
1401 tcp_push_pending_frames(ssk);
1402 return 0;
1403 }
1404 out:
1405 if (READ_ONCE(msk->csum_enabled))
1406 mptcp_update_data_checksum(skb, copy);
1407 if (mptcp_subflow_ctx(ssk)->send_infinite_map)
1408 mptcp_update_infinite_map(msk, ssk, mpext);
1409 trace_mptcp_sendmsg_frag(mpext);
1410 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1411 return copy;
1412 }
1413
1414 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
1415 sizeof(struct tcphdr) - \
1416 MAX_TCP_OPTION_SPACE - \
1417 sizeof(struct ipv6hdr) - \
1418 sizeof(struct frag_hdr))
1419
1420 struct subflow_send_info {
1421 struct sock *ssk;
1422 u64 linger_time;
1423 };
1424
mptcp_subflow_set_active(struct mptcp_subflow_context * subflow)1425 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1426 {
1427 if (!subflow->stale)
1428 return;
1429
1430 subflow->stale = 0;
1431 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1432 }
1433
mptcp_subflow_active(struct mptcp_subflow_context * subflow)1434 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1435 {
1436 if (unlikely(subflow->stale)) {
1437 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1438
1439 if (subflow->stale_rcv_tstamp == rcv_tstamp)
1440 return false;
1441
1442 mptcp_subflow_set_active(subflow);
1443 }
1444 return __mptcp_subflow_active(subflow);
1445 }
1446
1447 #define SSK_MODE_ACTIVE 0
1448 #define SSK_MODE_BACKUP 1
1449 #define SSK_MODE_MAX 2
1450
1451 /* implement the mptcp packet scheduler;
1452 * returns the subflow that will transmit the next DSS
1453 * additionally updates the rtx timeout
1454 */
mptcp_subflow_get_send(struct mptcp_sock * msk)1455 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1456 {
1457 struct subflow_send_info send_info[SSK_MODE_MAX];
1458 struct mptcp_subflow_context *subflow;
1459 struct sock *sk = (struct sock *)msk;
1460 u32 pace, burst, wmem;
1461 int i, nr_active = 0;
1462 struct sock *ssk;
1463 u64 linger_time;
1464 long tout = 0;
1465
1466 /* pick the subflow with the lower wmem/wspace ratio */
1467 for (i = 0; i < SSK_MODE_MAX; ++i) {
1468 send_info[i].ssk = NULL;
1469 send_info[i].linger_time = -1;
1470 }
1471
1472 mptcp_for_each_subflow(msk, subflow) {
1473 bool backup = subflow->backup || subflow->request_bkup;
1474
1475 trace_mptcp_subflow_get_send(subflow);
1476 ssk = mptcp_subflow_tcp_sock(subflow);
1477 if (!mptcp_subflow_active(subflow))
1478 continue;
1479
1480 tout = max(tout, mptcp_timeout_from_subflow(subflow));
1481 nr_active += !backup;
1482 pace = subflow->avg_pacing_rate;
1483 if (unlikely(!pace)) {
1484 /* init pacing rate from socket */
1485 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1486 pace = subflow->avg_pacing_rate;
1487 if (!pace)
1488 continue;
1489 }
1490
1491 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
1492 if (linger_time < send_info[backup].linger_time) {
1493 send_info[backup].ssk = ssk;
1494 send_info[backup].linger_time = linger_time;
1495 }
1496 }
1497 __mptcp_set_timeout(sk, tout);
1498
1499 /* pick the best backup if no other subflow is active */
1500 if (!nr_active)
1501 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
1502
1503 /* According to the blest algorithm, to avoid HoL blocking for the
1504 * faster flow, we need to:
1505 * - estimate the faster flow linger time
1506 * - use the above to estimate the amount of byte transferred
1507 * by the faster flow
1508 * - check that the amount of queued data is greater than the above,
1509 * otherwise do not use the picked, slower, subflow
1510 * We select the subflow with the shorter estimated time to flush
1511 * the queued mem, which basically ensure the above. We just need
1512 * to check that subflow has a non empty cwin.
1513 */
1514 ssk = send_info[SSK_MODE_ACTIVE].ssk;
1515 if (!ssk || !sk_stream_memory_free(ssk))
1516 return NULL;
1517
1518 burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
1519 wmem = READ_ONCE(ssk->sk_wmem_queued);
1520 if (!burst)
1521 return ssk;
1522
1523 subflow = mptcp_subflow_ctx(ssk);
1524 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
1525 READ_ONCE(ssk->sk_pacing_rate) * burst,
1526 burst + wmem);
1527 msk->snd_burst = burst;
1528 return ssk;
1529 }
1530
mptcp_push_release(struct sock * ssk,struct mptcp_sendmsg_info * info)1531 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
1532 {
1533 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1534 release_sock(ssk);
1535 }
1536
mptcp_update_post_push(struct mptcp_sock * msk,struct mptcp_data_frag * dfrag,u32 sent)1537 static void mptcp_update_post_push(struct mptcp_sock *msk,
1538 struct mptcp_data_frag *dfrag,
1539 u32 sent)
1540 {
1541 u64 snd_nxt_new = dfrag->data_seq;
1542
1543 dfrag->already_sent += sent;
1544
1545 msk->snd_burst -= sent;
1546
1547 snd_nxt_new += dfrag->already_sent;
1548
1549 /* snd_nxt_new can be smaller than snd_nxt in case mptcp
1550 * is recovering after a failover. In that event, this re-sends
1551 * old segments.
1552 *
1553 * Thus compute snd_nxt_new candidate based on
1554 * the dfrag->data_seq that was sent and the data
1555 * that has been handed to the subflow for transmission
1556 * and skip update in case it was old dfrag.
1557 */
1558 if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
1559 msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
1560 WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
1561 }
1562 }
1563
mptcp_check_and_set_pending(struct sock * sk)1564 void mptcp_check_and_set_pending(struct sock *sk)
1565 {
1566 if (mptcp_send_head(sk)) {
1567 mptcp_data_lock(sk);
1568 mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING);
1569 mptcp_data_unlock(sk);
1570 }
1571 }
1572
__subflow_push_pending(struct sock * sk,struct sock * ssk,struct mptcp_sendmsg_info * info)1573 static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
1574 struct mptcp_sendmsg_info *info)
1575 {
1576 struct mptcp_sock *msk = mptcp_sk(sk);
1577 struct mptcp_data_frag *dfrag;
1578 int len, copied = 0, err = 0;
1579
1580 while ((dfrag = mptcp_send_head(sk))) {
1581 info->sent = dfrag->already_sent;
1582 info->limit = dfrag->data_len;
1583 len = dfrag->data_len - dfrag->already_sent;
1584 while (len > 0) {
1585 int ret = 0;
1586
1587 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info);
1588 if (ret <= 0) {
1589 err = copied ? : ret;
1590 goto out;
1591 }
1592
1593 info->sent += ret;
1594 copied += ret;
1595 len -= ret;
1596
1597 mptcp_update_post_push(msk, dfrag, ret);
1598 }
1599 msk->first_pending = mptcp_send_next(sk);
1600
1601 if (msk->snd_burst <= 0 ||
1602 !sk_stream_memory_free(ssk) ||
1603 !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
1604 err = copied;
1605 goto out;
1606 }
1607 mptcp_set_timeout(sk);
1608 }
1609 err = copied;
1610
1611 out:
1612 if (err > 0)
1613 msk->last_data_sent = tcp_jiffies32;
1614 return err;
1615 }
1616
__mptcp_push_pending(struct sock * sk,unsigned int flags)1617 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
1618 {
1619 struct sock *prev_ssk = NULL, *ssk = NULL;
1620 struct mptcp_sock *msk = mptcp_sk(sk);
1621 struct mptcp_sendmsg_info info = {
1622 .flags = flags,
1623 };
1624 bool copied = false;
1625 int push_count = 1;
1626
1627 while (mptcp_send_head(sk) && (push_count > 0)) {
1628 struct mptcp_subflow_context *subflow;
1629 int ret = 0;
1630
1631 if (mptcp_sched_get_send(msk))
1632 break;
1633
1634 push_count = 0;
1635
1636 mptcp_for_each_subflow(msk, subflow) {
1637 if (READ_ONCE(subflow->scheduled)) {
1638 mptcp_subflow_set_scheduled(subflow, false);
1639
1640 prev_ssk = ssk;
1641 ssk = mptcp_subflow_tcp_sock(subflow);
1642 if (ssk != prev_ssk) {
1643 /* First check. If the ssk has changed since
1644 * the last round, release prev_ssk
1645 */
1646 if (prev_ssk)
1647 mptcp_push_release(prev_ssk, &info);
1648
1649 /* Need to lock the new subflow only if different
1650 * from the previous one, otherwise we are still
1651 * helding the relevant lock
1652 */
1653 lock_sock(ssk);
1654 }
1655
1656 push_count++;
1657
1658 ret = __subflow_push_pending(sk, ssk, &info);
1659 if (ret <= 0) {
1660 if (ret != -EAGAIN ||
1661 (1 << ssk->sk_state) &
1662 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE))
1663 push_count--;
1664 continue;
1665 }
1666 copied = true;
1667 }
1668 }
1669 }
1670
1671 /* at this point we held the socket lock for the last subflow we used */
1672 if (ssk)
1673 mptcp_push_release(ssk, &info);
1674
1675 /* Avoid scheduling the rtx timer if no data has been pushed; the timer
1676 * will be updated on positive acks by __mptcp_cleanup_una().
1677 */
1678 if (copied) {
1679 if (!mptcp_rtx_timer_pending(sk))
1680 mptcp_reset_rtx_timer(sk);
1681 mptcp_check_send_data_fin(sk);
1682 }
1683 }
1684
__mptcp_subflow_push_pending(struct sock * sk,struct sock * ssk,bool first)1685 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
1686 {
1687 struct mptcp_sock *msk = mptcp_sk(sk);
1688 struct mptcp_sendmsg_info info = {
1689 .data_lock_held = true,
1690 };
1691 bool keep_pushing = true;
1692 struct sock *xmit_ssk;
1693 int copied = 0;
1694
1695 info.flags = 0;
1696 while (mptcp_send_head(sk) && keep_pushing) {
1697 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1698 int ret = 0;
1699
1700 /* check for a different subflow usage only after
1701 * spooling the first chunk of data
1702 */
1703 if (first) {
1704 mptcp_subflow_set_scheduled(subflow, false);
1705 ret = __subflow_push_pending(sk, ssk, &info);
1706 first = false;
1707 if (ret <= 0)
1708 break;
1709 copied += ret;
1710 continue;
1711 }
1712
1713 if (mptcp_sched_get_send(msk))
1714 goto out;
1715
1716 if (READ_ONCE(subflow->scheduled)) {
1717 mptcp_subflow_set_scheduled(subflow, false);
1718 ret = __subflow_push_pending(sk, ssk, &info);
1719 if (ret <= 0)
1720 keep_pushing = false;
1721 copied += ret;
1722 }
1723
1724 mptcp_for_each_subflow(msk, subflow) {
1725 if (READ_ONCE(subflow->scheduled)) {
1726 xmit_ssk = mptcp_subflow_tcp_sock(subflow);
1727 if (xmit_ssk != ssk) {
1728 mptcp_subflow_delegate(subflow,
1729 MPTCP_DELEGATE_SEND);
1730 keep_pushing = false;
1731 }
1732 }
1733 }
1734 }
1735
1736 out:
1737 /* __mptcp_alloc_tx_skb could have released some wmem and we are
1738 * not going to flush it via release_sock()
1739 */
1740 if (copied) {
1741 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1742 info.size_goal);
1743 if (!mptcp_rtx_timer_pending(sk))
1744 mptcp_reset_rtx_timer(sk);
1745
1746 if (msk->snd_data_fin_enable &&
1747 msk->snd_nxt + 1 == msk->write_seq)
1748 mptcp_schedule_work(sk);
1749 }
1750 }
1751
1752 static int mptcp_disconnect(struct sock *sk, int flags);
1753
mptcp_sendmsg_fastopen(struct sock * sk,struct msghdr * msg,size_t len,int * copied_syn)1754 static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1755 size_t len, int *copied_syn)
1756 {
1757 unsigned int saved_flags = msg->msg_flags;
1758 struct mptcp_sock *msk = mptcp_sk(sk);
1759 struct sock *ssk;
1760 int ret;
1761
1762 /* on flags based fastopen the mptcp is supposed to create the
1763 * first subflow right now. Otherwise we are in the defer_connect
1764 * path, and the first subflow must be already present.
1765 * Since the defer_connect flag is cleared after the first succsful
1766 * fastopen attempt, no need to check for additional subflow status.
1767 */
1768 if (msg->msg_flags & MSG_FASTOPEN) {
1769 ssk = __mptcp_nmpc_sk(msk);
1770 if (IS_ERR(ssk))
1771 return PTR_ERR(ssk);
1772 }
1773 if (!msk->first)
1774 return -EINVAL;
1775
1776 ssk = msk->first;
1777
1778 lock_sock(ssk);
1779 msg->msg_flags |= MSG_DONTWAIT;
1780 msk->fastopening = 1;
1781 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
1782 msk->fastopening = 0;
1783 msg->msg_flags = saved_flags;
1784 release_sock(ssk);
1785
1786 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1787 if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) {
1788 ret = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1789 msg->msg_namelen, msg->msg_flags, 1);
1790
1791 /* Keep the same behaviour of plain TCP: zero the copied bytes in
1792 * case of any error, except timeout or signal
1793 */
1794 if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
1795 *copied_syn = 0;
1796 } else if (ret && ret != -EINPROGRESS) {
1797 /* The disconnect() op called by tcp_sendmsg_fastopen()/
1798 * __inet_stream_connect() can fail, due to looking check,
1799 * see mptcp_disconnect().
1800 * Attempt it again outside the problematic scope.
1801 */
1802 if (!mptcp_disconnect(sk, 0)) {
1803 sk->sk_disconnects++;
1804 sk->sk_socket->state = SS_UNCONNECTED;
1805 }
1806 }
1807 inet_clear_bit(DEFER_CONNECT, sk);
1808
1809 return ret;
1810 }
1811
do_copy_data_nocache(struct sock * sk,int copy,struct iov_iter * from,char * to)1812 static int do_copy_data_nocache(struct sock *sk, int copy,
1813 struct iov_iter *from, char *to)
1814 {
1815 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1816 if (!copy_from_iter_full_nocache(to, copy, from))
1817 return -EFAULT;
1818 } else if (!copy_from_iter_full(to, copy, from)) {
1819 return -EFAULT;
1820 }
1821 return 0;
1822 }
1823
1824 /* open-code sk_stream_memory_free() plus sent limit computation to
1825 * avoid indirect calls in fast-path.
1826 * Called under the msk socket lock, so we can avoid a bunch of ONCE
1827 * annotations.
1828 */
mptcp_send_limit(const struct sock * sk)1829 static u32 mptcp_send_limit(const struct sock *sk)
1830 {
1831 const struct mptcp_sock *msk = mptcp_sk(sk);
1832 u32 limit, not_sent;
1833
1834 if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
1835 return 0;
1836
1837 limit = mptcp_notsent_lowat(sk);
1838 if (limit == UINT_MAX)
1839 return UINT_MAX;
1840
1841 not_sent = msk->write_seq - msk->snd_nxt;
1842 if (not_sent >= limit)
1843 return 0;
1844
1845 return limit - not_sent;
1846 }
1847
mptcp_rps_record_subflows(const struct mptcp_sock * msk)1848 static void mptcp_rps_record_subflows(const struct mptcp_sock *msk)
1849 {
1850 struct mptcp_subflow_context *subflow;
1851
1852 if (!rfs_is_needed())
1853 return;
1854
1855 mptcp_for_each_subflow(msk, subflow) {
1856 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1857
1858 sock_rps_record_flow(ssk);
1859 }
1860 }
1861
mptcp_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1862 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1863 {
1864 struct mptcp_sock *msk = mptcp_sk(sk);
1865 struct page_frag *pfrag;
1866 size_t copied = 0;
1867 int ret = 0;
1868 long timeo;
1869
1870 /* silently ignore everything else */
1871 msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_FASTOPEN;
1872
1873 lock_sock(sk);
1874
1875 mptcp_rps_record_subflows(msk);
1876
1877 if (unlikely(inet_test_bit(DEFER_CONNECT, sk) ||
1878 msg->msg_flags & MSG_FASTOPEN)) {
1879 int copied_syn = 0;
1880
1881 ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn);
1882 copied += copied_syn;
1883 if (ret == -EINPROGRESS && copied_syn > 0)
1884 goto out;
1885 else if (ret)
1886 goto do_error;
1887 }
1888
1889 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1890
1891 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1892 ret = sk_stream_wait_connect(sk, &timeo);
1893 if (ret)
1894 goto do_error;
1895 }
1896
1897 ret = -EPIPE;
1898 if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)))
1899 goto do_error;
1900
1901 pfrag = sk_page_frag(sk);
1902
1903 while (msg_data_left(msg)) {
1904 int total_ts, frag_truesize = 0;
1905 struct mptcp_data_frag *dfrag;
1906 bool dfrag_collapsed;
1907 size_t psize, offset;
1908 u32 copy_limit;
1909
1910 /* ensure fitting the notsent_lowat() constraint */
1911 copy_limit = mptcp_send_limit(sk);
1912 if (!copy_limit)
1913 goto wait_for_memory;
1914
1915 /* reuse tail pfrag, if possible, or carve a new one from the
1916 * page allocator
1917 */
1918 dfrag = mptcp_pending_tail(sk);
1919 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1920 if (!dfrag_collapsed) {
1921 if (!mptcp_page_frag_refill(sk, pfrag))
1922 goto wait_for_memory;
1923
1924 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1925 frag_truesize = dfrag->overhead;
1926 }
1927
1928 /* we do not bound vs wspace, to allow a single packet.
1929 * memory accounting will prevent execessive memory usage
1930 * anyway
1931 */
1932 offset = dfrag->offset + dfrag->data_len;
1933 psize = pfrag->size - offset;
1934 psize = min_t(size_t, psize, msg_data_left(msg));
1935 psize = min_t(size_t, psize, copy_limit);
1936 total_ts = psize + frag_truesize;
1937
1938 if (!sk_wmem_schedule(sk, total_ts))
1939 goto wait_for_memory;
1940
1941 ret = do_copy_data_nocache(sk, psize, &msg->msg_iter,
1942 page_address(dfrag->page) + offset);
1943 if (ret)
1944 goto do_error;
1945
1946 /* data successfully copied into the write queue */
1947 sk_forward_alloc_add(sk, -total_ts);
1948 copied += psize;
1949 dfrag->data_len += psize;
1950 frag_truesize += psize;
1951 pfrag->offset += frag_truesize;
1952 WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1953
1954 /* charge data on mptcp pending queue to the msk socket
1955 * Note: we charge such data both to sk and ssk
1956 */
1957 sk_wmem_queued_add(sk, frag_truesize);
1958 if (!dfrag_collapsed) {
1959 get_page(dfrag->page);
1960 list_add_tail(&dfrag->list, &msk->rtx_queue);
1961 if (!msk->first_pending)
1962 msk->first_pending = dfrag;
1963 }
1964 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
1965 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
1966 !dfrag_collapsed);
1967
1968 continue;
1969
1970 wait_for_memory:
1971 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1972 __mptcp_push_pending(sk, msg->msg_flags);
1973 ret = sk_stream_wait_memory(sk, &timeo);
1974 if (ret)
1975 goto do_error;
1976 }
1977
1978 if (copied)
1979 __mptcp_push_pending(sk, msg->msg_flags);
1980
1981 out:
1982 release_sock(sk);
1983 return copied;
1984
1985 do_error:
1986 if (copied)
1987 goto out;
1988
1989 copied = sk_stream_error(sk, msg->msg_flags, ret);
1990 goto out;
1991 }
1992
1993 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
1994
mptcp_eat_recv_skb(struct sock * sk,struct sk_buff * skb)1995 static void mptcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
1996 {
1997 /* avoid the indirect call, we know the destructor is sock_rfree */
1998 skb->destructor = NULL;
1999 skb->sk = NULL;
2000 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2001 sk_mem_uncharge(sk, skb->truesize);
2002 __skb_unlink(skb, &sk->sk_receive_queue);
2003 skb_attempt_defer_free(skb);
2004 }
2005
__mptcp_recvmsg_mskq(struct sock * sk,struct msghdr * msg,size_t len,int flags,int copied_total,struct scm_timestamping_internal * tss,int * cmsg_flags,struct sk_buff ** last)2006 static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
2007 size_t len, int flags, int copied_total,
2008 struct scm_timestamping_internal *tss,
2009 int *cmsg_flags, struct sk_buff **last)
2010 {
2011 struct mptcp_sock *msk = mptcp_sk(sk);
2012 struct sk_buff *skb, *tmp;
2013 int total_data_len = 0;
2014 int copied = 0;
2015
2016 skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
2017 u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
2018 u32 data_len = skb->len - offset;
2019 u32 count;
2020 int err;
2021
2022 if (flags & MSG_PEEK) {
2023 /* skip already peeked skbs */
2024 if (total_data_len + data_len <= copied_total) {
2025 total_data_len += data_len;
2026 *last = skb;
2027 continue;
2028 }
2029
2030 /* skip the already peeked data in the current skb */
2031 delta = copied_total - total_data_len;
2032 offset += delta;
2033 data_len -= delta;
2034 }
2035
2036 count = min_t(size_t, len - copied, data_len);
2037 if (!(flags & MSG_TRUNC)) {
2038 err = skb_copy_datagram_msg(skb, offset, msg, count);
2039 if (unlikely(err < 0)) {
2040 if (!copied)
2041 return err;
2042 break;
2043 }
2044 }
2045
2046 if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
2047 tcp_update_recv_tstamps(skb, tss);
2048 *cmsg_flags |= MPTCP_CMSG_TS;
2049 }
2050
2051 copied += count;
2052
2053 if (!(flags & MSG_PEEK)) {
2054 msk->bytes_consumed += count;
2055 if (count < data_len) {
2056 MPTCP_SKB_CB(skb)->offset += count;
2057 MPTCP_SKB_CB(skb)->map_seq += count;
2058 break;
2059 }
2060
2061 mptcp_eat_recv_skb(sk, skb);
2062 } else {
2063 *last = skb;
2064 }
2065
2066 if (copied >= len)
2067 break;
2068 }
2069
2070 mptcp_rcv_space_adjust(msk, copied);
2071 return copied;
2072 }
2073
mptcp_rcv_space_init(struct mptcp_sock * msk,const struct sock * ssk)2074 static void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2075 {
2076 const struct tcp_sock *tp = tcp_sk(ssk);
2077
2078 msk->rcvspace_init = 1;
2079 msk->rcvq_space.copied = 0;
2080 msk->rcvq_space.rtt_us = 0;
2081
2082 /* initial rcv_space offering made to peer */
2083 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
2084 TCP_INIT_CWND * tp->advmss);
2085 if (msk->rcvq_space.space == 0)
2086 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
2087 }
2088
2089 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
2090 *
2091 * Only difference: Use highest rtt estimate of the subflows in use.
2092 */
mptcp_rcv_space_adjust(struct mptcp_sock * msk,int copied)2093 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
2094 {
2095 struct mptcp_subflow_context *subflow;
2096 struct sock *sk = (struct sock *)msk;
2097 u8 scaling_ratio = U8_MAX;
2098 u32 time, advmss = 1;
2099 u64 rtt_us, mstamp;
2100
2101 msk_owned_by_me(msk);
2102
2103 if (copied <= 0)
2104 return;
2105
2106 if (!msk->rcvspace_init)
2107 mptcp_rcv_space_init(msk, msk->first);
2108
2109 msk->rcvq_space.copied += copied;
2110
2111 mstamp = mptcp_stamp();
2112 time = tcp_stamp_us_delta(mstamp, READ_ONCE(msk->rcvq_space.time));
2113
2114 rtt_us = msk->rcvq_space.rtt_us;
2115 if (rtt_us && time < (rtt_us >> 3))
2116 return;
2117
2118 rtt_us = 0;
2119 mptcp_for_each_subflow(msk, subflow) {
2120 const struct tcp_sock *tp;
2121 u64 sf_rtt_us;
2122 u32 sf_advmss;
2123
2124 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
2125
2126 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
2127 sf_advmss = READ_ONCE(tp->advmss);
2128
2129 rtt_us = max(sf_rtt_us, rtt_us);
2130 advmss = max(sf_advmss, advmss);
2131 scaling_ratio = min(tp->scaling_ratio, scaling_ratio);
2132 }
2133
2134 msk->rcvq_space.rtt_us = rtt_us;
2135 msk->scaling_ratio = scaling_ratio;
2136 if (time < (rtt_us >> 3) || rtt_us == 0)
2137 return;
2138
2139 if (msk->rcvq_space.copied <= msk->rcvq_space.space)
2140 goto new_measure;
2141
2142 trace_mptcp_rcvbuf_grow(sk, time);
2143 if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {
2144 /* Make subflows follow along. If we do not do this, we
2145 * get drops at subflow level if skbs can't be moved to
2146 * the mptcp rx queue fast enough (announced rcv_win can
2147 * exceed ssk->sk_rcvbuf).
2148 */
2149 mptcp_for_each_subflow(msk, subflow) {
2150 struct sock *ssk;
2151 bool slow;
2152
2153 ssk = mptcp_subflow_tcp_sock(subflow);
2154 slow = lock_sock_fast(ssk);
2155 /* subflows can be added before tcp_init_transfer() */
2156 if (tcp_sk(ssk)->rcvq_space.space)
2157 tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);
2158 unlock_sock_fast(ssk, slow);
2159 }
2160 }
2161
2162 new_measure:
2163 msk->rcvq_space.copied = 0;
2164 msk->rcvq_space.time = mstamp;
2165 }
2166
__mptcp_move_skbs(struct sock * sk,struct list_head * skbs,u32 * delta)2167 static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta)
2168 {
2169 struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list);
2170 struct mptcp_sock *msk = mptcp_sk(sk);
2171 bool moved = false;
2172
2173 *delta = 0;
2174 while (1) {
2175 /* If the msk recvbuf is full stop, don't drop */
2176 if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2177 break;
2178
2179 prefetch(skb->next);
2180 list_del(&skb->list);
2181 *delta += skb->truesize;
2182
2183 moved |= __mptcp_move_skb(sk, skb);
2184 if (list_empty(skbs))
2185 break;
2186
2187 skb = list_first_entry(skbs, struct sk_buff, list);
2188 }
2189
2190 __mptcp_ofo_queue(msk);
2191 if (moved)
2192 mptcp_check_data_fin((struct sock *)msk);
2193 return moved;
2194 }
2195
mptcp_can_spool_backlog(struct sock * sk,struct list_head * skbs)2196 static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs)
2197 {
2198 struct mptcp_sock *msk = mptcp_sk(sk);
2199
2200 /* After CG initialization, subflows should never add skb before
2201 * gaining the CG themself.
2202 */
2203 DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket &&
2204 mem_cgroup_from_sk(sk));
2205
2206 /* Don't spool the backlog if the rcvbuf is full. */
2207 if (list_empty(&msk->backlog_list) ||
2208 sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2209 return false;
2210
2211 INIT_LIST_HEAD(skbs);
2212 list_splice_init(&msk->backlog_list, skbs);
2213 return true;
2214 }
2215
mptcp_backlog_spooled(struct sock * sk,u32 moved,struct list_head * skbs)2216 static void mptcp_backlog_spooled(struct sock *sk, u32 moved,
2217 struct list_head *skbs)
2218 {
2219 struct mptcp_sock *msk = mptcp_sk(sk);
2220
2221 WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved);
2222 list_splice(skbs, &msk->backlog_list);
2223 }
2224
mptcp_move_skbs(struct sock * sk)2225 static bool mptcp_move_skbs(struct sock *sk)
2226 {
2227 struct list_head skbs;
2228 bool enqueued = false;
2229 u32 moved;
2230
2231 mptcp_data_lock(sk);
2232 while (mptcp_can_spool_backlog(sk, &skbs)) {
2233 mptcp_data_unlock(sk);
2234 enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
2235
2236 mptcp_data_lock(sk);
2237 mptcp_backlog_spooled(sk, moved, &skbs);
2238 }
2239 mptcp_data_unlock(sk);
2240 return enqueued;
2241 }
2242
mptcp_inq_hint(const struct sock * sk)2243 static unsigned int mptcp_inq_hint(const struct sock *sk)
2244 {
2245 const struct mptcp_sock *msk = mptcp_sk(sk);
2246 const struct sk_buff *skb;
2247
2248 skb = skb_peek(&sk->sk_receive_queue);
2249 if (skb) {
2250 u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
2251
2252 if (hint_val >= INT_MAX)
2253 return INT_MAX;
2254
2255 return (unsigned int)hint_val;
2256 }
2257
2258 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
2259 return 1;
2260
2261 return 0;
2262 }
2263
mptcp_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)2264 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2265 int flags, int *addr_len)
2266 {
2267 struct mptcp_sock *msk = mptcp_sk(sk);
2268 struct scm_timestamping_internal tss;
2269 int copied = 0, cmsg_flags = 0;
2270 int target;
2271 long timeo;
2272
2273 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2274 if (unlikely(flags & MSG_ERRQUEUE))
2275 return inet_recv_error(sk, msg, len, addr_len);
2276
2277 lock_sock(sk);
2278 if (unlikely(sk->sk_state == TCP_LISTEN)) {
2279 copied = -ENOTCONN;
2280 goto out_err;
2281 }
2282
2283 mptcp_rps_record_subflows(msk);
2284
2285 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2286
2287 len = min_t(size_t, len, INT_MAX);
2288 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2289
2290 if (unlikely(msk->recvmsg_inq))
2291 cmsg_flags = MPTCP_CMSG_INQ;
2292
2293 while (copied < len) {
2294 struct sk_buff *last = NULL;
2295 int err, bytes_read;
2296
2297 bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
2298 copied, &tss, &cmsg_flags,
2299 &last);
2300 if (unlikely(bytes_read < 0)) {
2301 if (!copied)
2302 copied = bytes_read;
2303 goto out_err;
2304 }
2305
2306 copied += bytes_read;
2307
2308 if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk))
2309 continue;
2310
2311 /* only the MPTCP socket status is relevant here. The exit
2312 * conditions mirror closely tcp_recvmsg()
2313 */
2314 if (copied >= target)
2315 break;
2316
2317 if (copied) {
2318 if (sk->sk_err ||
2319 sk->sk_state == TCP_CLOSE ||
2320 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2321 !timeo ||
2322 signal_pending(current))
2323 break;
2324 } else {
2325 if (sk->sk_err) {
2326 copied = sock_error(sk);
2327 break;
2328 }
2329
2330 if (sk->sk_shutdown & RCV_SHUTDOWN)
2331 break;
2332
2333 if (sk->sk_state == TCP_CLOSE) {
2334 copied = -ENOTCONN;
2335 break;
2336 }
2337
2338 if (!timeo) {
2339 copied = -EAGAIN;
2340 break;
2341 }
2342
2343 if (signal_pending(current)) {
2344 copied = sock_intr_errno(timeo);
2345 break;
2346 }
2347 }
2348
2349 pr_debug("block timeout %ld\n", timeo);
2350 mptcp_cleanup_rbuf(msk, copied);
2351 err = sk_wait_data(sk, &timeo, last);
2352 if (err < 0) {
2353 err = copied ? : err;
2354 goto out_err;
2355 }
2356 }
2357
2358 mptcp_cleanup_rbuf(msk, copied);
2359
2360 out_err:
2361 if (cmsg_flags && copied >= 0) {
2362 if (cmsg_flags & MPTCP_CMSG_TS)
2363 tcp_recv_timestamp(msg, sk, &tss);
2364
2365 if (cmsg_flags & MPTCP_CMSG_INQ) {
2366 unsigned int inq = mptcp_inq_hint(sk);
2367
2368 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2369 }
2370 }
2371
2372 pr_debug("msk=%p rx queue empty=%d copied=%d\n",
2373 msk, skb_queue_empty(&sk->sk_receive_queue), copied);
2374
2375 release_sock(sk);
2376 return copied;
2377 }
2378
mptcp_retransmit_timer(struct timer_list * t)2379 static void mptcp_retransmit_timer(struct timer_list *t)
2380 {
2381 struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer);
2382 struct mptcp_sock *msk = mptcp_sk(sk);
2383
2384 bh_lock_sock(sk);
2385 if (!sock_owned_by_user(sk)) {
2386 /* we need a process context to retransmit */
2387 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2388 mptcp_schedule_work(sk);
2389 } else {
2390 /* delegate our work to tcp_release_cb() */
2391 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
2392 }
2393 bh_unlock_sock(sk);
2394 sock_put(sk);
2395 }
2396
mptcp_tout_timer(struct timer_list * t)2397 static void mptcp_tout_timer(struct timer_list *t)
2398 {
2399 struct inet_connection_sock *icsk =
2400 timer_container_of(icsk, t, mptcp_tout_timer);
2401 struct sock *sk = &icsk->icsk_inet.sk;
2402
2403 mptcp_schedule_work(sk);
2404 sock_put(sk);
2405 }
2406
2407 /* Find an idle subflow. Return NULL if there is unacked data at tcp
2408 * level.
2409 *
2410 * A backup subflow is returned only if that is the only kind available.
2411 */
mptcp_subflow_get_retrans(struct mptcp_sock * msk)2412 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2413 {
2414 struct sock *backup = NULL, *pick = NULL;
2415 struct mptcp_subflow_context *subflow;
2416 int min_stale_count = INT_MAX;
2417
2418 mptcp_for_each_subflow(msk, subflow) {
2419 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2420
2421 if (!__mptcp_subflow_active(subflow))
2422 continue;
2423
2424 /* still data outstanding at TCP level? skip this */
2425 if (!tcp_rtx_and_write_queues_empty(ssk)) {
2426 mptcp_pm_subflow_chk_stale(msk, ssk);
2427 min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2428 continue;
2429 }
2430
2431 if (subflow->backup || subflow->request_bkup) {
2432 if (!backup)
2433 backup = ssk;
2434 continue;
2435 }
2436
2437 if (!pick)
2438 pick = ssk;
2439 }
2440
2441 if (pick)
2442 return pick;
2443
2444 /* use backup only if there are no progresses anywhere */
2445 return min_stale_count > 1 ? backup : NULL;
2446 }
2447
__mptcp_retransmit_pending_data(struct sock * sk)2448 bool __mptcp_retransmit_pending_data(struct sock *sk)
2449 {
2450 struct mptcp_data_frag *cur, *rtx_head;
2451 struct mptcp_sock *msk = mptcp_sk(sk);
2452
2453 if (__mptcp_check_fallback(msk))
2454 return false;
2455
2456 /* the closing socket has some data untransmitted and/or unacked:
2457 * some data in the mptcp rtx queue has not really xmitted yet.
2458 * keep it simple and re-inject the whole mptcp level rtx queue
2459 */
2460 mptcp_data_lock(sk);
2461 __mptcp_clean_una_wakeup(sk);
2462 rtx_head = mptcp_rtx_head(sk);
2463 if (!rtx_head) {
2464 mptcp_data_unlock(sk);
2465 return false;
2466 }
2467
2468 msk->recovery_snd_nxt = msk->snd_nxt;
2469 msk->recovery = true;
2470 mptcp_data_unlock(sk);
2471
2472 msk->first_pending = rtx_head;
2473 msk->snd_burst = 0;
2474
2475 /* be sure to clear the "sent status" on all re-injected fragments */
2476 list_for_each_entry(cur, &msk->rtx_queue, list) {
2477 if (!cur->already_sent)
2478 break;
2479 cur->already_sent = 0;
2480 }
2481
2482 return true;
2483 }
2484
2485 /* flags for __mptcp_close_ssk() */
2486 #define MPTCP_CF_PUSH BIT(1)
2487
2488 /* be sure to send a reset only if the caller asked for it, also
2489 * clean completely the subflow status when the subflow reaches
2490 * TCP_CLOSE state
2491 */
__mptcp_subflow_disconnect(struct sock * ssk,struct mptcp_subflow_context * subflow,bool fastclosing)2492 static void __mptcp_subflow_disconnect(struct sock *ssk,
2493 struct mptcp_subflow_context *subflow,
2494 bool fastclosing)
2495 {
2496 if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
2497 fastclosing) {
2498 /* The MPTCP code never wait on the subflow sockets, TCP-level
2499 * disconnect should never fail
2500 */
2501 WARN_ON_ONCE(tcp_disconnect(ssk, 0));
2502 mptcp_subflow_ctx_reset(subflow);
2503 } else {
2504 tcp_shutdown(ssk, SEND_SHUTDOWN);
2505 }
2506 }
2507
2508 /* subflow sockets can be either outgoing (connect) or incoming
2509 * (accept).
2510 *
2511 * Outgoing subflows use in-kernel sockets.
2512 * Incoming subflows do not have their own 'struct socket' allocated,
2513 * so we need to use tcp_close() after detaching them from the mptcp
2514 * parent socket.
2515 */
__mptcp_close_ssk(struct sock * sk,struct sock * ssk,struct mptcp_subflow_context * subflow,unsigned int flags)2516 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2517 struct mptcp_subflow_context *subflow,
2518 unsigned int flags)
2519 {
2520 struct mptcp_sock *msk = mptcp_sk(sk);
2521 bool dispose_it, need_push = false;
2522 int fwd_remaining;
2523
2524 /* Do not pass RX data to the msk, even if the subflow socket is not
2525 * going to be freed (i.e. even for the first subflow on graceful
2526 * subflow close.
2527 */
2528 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2529 subflow->closing = 1;
2530
2531 /* Borrow the fwd allocated page left-over; fwd memory for the subflow
2532 * could be negative at this point, but will be reach zero soon - when
2533 * the data allocated using such fragment will be freed.
2534 */
2535 if (subflow->lent_mem_frag) {
2536 fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag;
2537 sk_forward_alloc_add(sk, fwd_remaining);
2538 sk_forward_alloc_add(ssk, -fwd_remaining);
2539 subflow->lent_mem_frag = 0;
2540 }
2541
2542 /* If the first subflow moved to a close state before accept, e.g. due
2543 * to an incoming reset or listener shutdown, the subflow socket is
2544 * already deleted by inet_child_forget() and the mptcp socket can't
2545 * survive too.
2546 */
2547 if (msk->in_accept_queue && msk->first == ssk &&
2548 (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
2549 /* ensure later check in mptcp_worker() will dispose the msk */
2550 sock_set_flag(sk, SOCK_DEAD);
2551 mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1));
2552 mptcp_subflow_drop_ctx(ssk);
2553 goto out_release;
2554 }
2555
2556 dispose_it = msk->free_first || ssk != msk->first;
2557 if (dispose_it)
2558 list_del(&subflow->node);
2559
2560 if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
2561 tcp_set_state(ssk, TCP_CLOSE);
2562
2563 need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
2564 if (!dispose_it) {
2565 __mptcp_subflow_disconnect(ssk, subflow, msk->fastclosing);
2566 release_sock(ssk);
2567
2568 goto out;
2569 }
2570
2571 subflow->disposable = 1;
2572
2573 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2574 * the ssk has been already destroyed, we just need to release the
2575 * reference owned by msk;
2576 */
2577 if (!inet_csk(ssk)->icsk_ulp_ops) {
2578 WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
2579 kfree_rcu(subflow, rcu);
2580 } else {
2581 /* otherwise tcp will dispose of the ssk and subflow ctx */
2582 __tcp_close(ssk, 0);
2583
2584 /* close acquired an extra ref */
2585 __sock_put(ssk);
2586 }
2587
2588 out_release:
2589 __mptcp_subflow_error_report(sk, ssk);
2590 release_sock(ssk);
2591
2592 sock_put(ssk);
2593
2594 if (ssk == msk->first)
2595 WRITE_ONCE(msk->first, NULL);
2596
2597 out:
2598 __mptcp_sync_sndbuf(sk);
2599 if (need_push)
2600 __mptcp_push_pending(sk, 0);
2601
2602 /* Catch every 'all subflows closed' scenario, including peers silently
2603 * closing them, e.g. due to timeout.
2604 * For established sockets, allow an additional timeout before closing,
2605 * as the protocol can still create more subflows.
2606 */
2607 if (list_is_singular(&msk->conn_list) && msk->first &&
2608 inet_sk_state_load(msk->first) == TCP_CLOSE) {
2609 if (sk->sk_state != TCP_ESTABLISHED ||
2610 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
2611 mptcp_set_state(sk, TCP_CLOSE);
2612 mptcp_close_wake_up(sk);
2613 } else {
2614 mptcp_start_tout_timer(sk);
2615 }
2616 }
2617 }
2618
mptcp_close_ssk(struct sock * sk,struct sock * ssk,struct mptcp_subflow_context * subflow)2619 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2620 struct mptcp_subflow_context *subflow)
2621 {
2622 struct mptcp_sock *msk = mptcp_sk(sk);
2623 struct sk_buff *skb;
2624
2625 /* The first subflow can already be closed or disconnected */
2626 if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0)
2627 return;
2628
2629 subflow->close_event_done = true;
2630
2631 if (sk->sk_state == TCP_ESTABLISHED)
2632 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2633
2634 /* Remove any reference from the backlog to this ssk; backlog skbs consume
2635 * space in the msk receive queue, no need to touch sk->sk_rmem_alloc
2636 */
2637 list_for_each_entry(skb, &msk->backlog_list, list) {
2638 if (skb->sk != ssk)
2639 continue;
2640
2641 atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc);
2642 skb->sk = NULL;
2643 }
2644
2645 /* subflow aborted before reaching the fully_established status
2646 * attempt the creation of the next subflow
2647 */
2648 mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow);
2649
2650 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2651 }
2652
mptcp_sync_mss(struct sock * sk,u32 pmtu)2653 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2654 {
2655 return 0;
2656 }
2657
__mptcp_close_subflow(struct sock * sk)2658 static void __mptcp_close_subflow(struct sock *sk)
2659 {
2660 struct mptcp_subflow_context *subflow, *tmp;
2661 struct mptcp_sock *msk = mptcp_sk(sk);
2662
2663 might_sleep();
2664
2665 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2666 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2667 int ssk_state = inet_sk_state_load(ssk);
2668
2669 if (ssk_state != TCP_CLOSE &&
2670 (ssk_state != TCP_CLOSE_WAIT ||
2671 inet_sk_state_load(sk) != TCP_ESTABLISHED ||
2672 __mptcp_check_fallback(msk)))
2673 continue;
2674
2675 /* 'subflow_data_ready' will re-sched once rx queue is empty */
2676 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2677 continue;
2678
2679 mptcp_close_ssk(sk, ssk, subflow);
2680 }
2681
2682 }
2683
mptcp_close_tout_expired(const struct sock * sk)2684 static bool mptcp_close_tout_expired(const struct sock *sk)
2685 {
2686 if (!inet_csk(sk)->icsk_mtup.probe_timestamp ||
2687 sk->sk_state == TCP_CLOSE)
2688 return false;
2689
2690 return time_after32(tcp_jiffies32,
2691 inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk));
2692 }
2693
mptcp_check_fastclose(struct mptcp_sock * msk)2694 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2695 {
2696 struct mptcp_subflow_context *subflow, *tmp;
2697 struct sock *sk = (struct sock *)msk;
2698
2699 if (likely(!READ_ONCE(msk->rcv_fastclose)))
2700 return;
2701
2702 mptcp_token_destroy(msk);
2703
2704 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2705 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2706 bool slow;
2707
2708 slow = lock_sock_fast(tcp_sk);
2709 if (tcp_sk->sk_state != TCP_CLOSE) {
2710 mptcp_send_active_reset_reason(tcp_sk);
2711 tcp_set_state(tcp_sk, TCP_CLOSE);
2712 }
2713 unlock_sock_fast(tcp_sk, slow);
2714 }
2715
2716 /* Mirror the tcp_reset() error propagation */
2717 switch (sk->sk_state) {
2718 case TCP_SYN_SENT:
2719 WRITE_ONCE(sk->sk_err, ECONNREFUSED);
2720 break;
2721 case TCP_CLOSE_WAIT:
2722 WRITE_ONCE(sk->sk_err, EPIPE);
2723 break;
2724 case TCP_CLOSE:
2725 return;
2726 default:
2727 WRITE_ONCE(sk->sk_err, ECONNRESET);
2728 }
2729
2730 mptcp_set_state(sk, TCP_CLOSE);
2731 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
2732 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2733 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2734
2735 /* the calling mptcp_worker will properly destroy the socket */
2736 if (sock_flag(sk, SOCK_DEAD))
2737 return;
2738
2739 sk->sk_state_change(sk);
2740 sk_error_report(sk);
2741 }
2742
__mptcp_retrans(struct sock * sk)2743 static void __mptcp_retrans(struct sock *sk)
2744 {
2745 struct mptcp_sendmsg_info info = { .data_lock_held = true, };
2746 struct mptcp_sock *msk = mptcp_sk(sk);
2747 struct mptcp_subflow_context *subflow;
2748 struct mptcp_data_frag *dfrag;
2749 struct sock *ssk;
2750 int ret, err;
2751 u16 len = 0;
2752
2753 mptcp_clean_una_wakeup(sk);
2754
2755 /* first check ssk: need to kick "stale" logic */
2756 err = mptcp_sched_get_retrans(msk);
2757 dfrag = mptcp_rtx_head(sk);
2758 if (!dfrag) {
2759 if (mptcp_data_fin_enabled(msk)) {
2760 struct inet_connection_sock *icsk = inet_csk(sk);
2761
2762 WRITE_ONCE(icsk->icsk_retransmits,
2763 icsk->icsk_retransmits + 1);
2764 mptcp_set_datafin_timeout(sk);
2765 mptcp_send_ack(msk);
2766
2767 goto reset_timer;
2768 }
2769
2770 if (!mptcp_send_head(sk))
2771 goto clear_scheduled;
2772
2773 goto reset_timer;
2774 }
2775
2776 if (err)
2777 goto reset_timer;
2778
2779 mptcp_for_each_subflow(msk, subflow) {
2780 if (READ_ONCE(subflow->scheduled)) {
2781 u16 copied = 0;
2782
2783 mptcp_subflow_set_scheduled(subflow, false);
2784
2785 ssk = mptcp_subflow_tcp_sock(subflow);
2786
2787 lock_sock(ssk);
2788
2789 /* limit retransmission to the bytes already sent on some subflows */
2790 info.sent = 0;
2791 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len :
2792 dfrag->already_sent;
2793
2794 /*
2795 * make the whole retrans decision, xmit, disallow
2796 * fallback atomic, note that we can't retrans even
2797 * when an infinite fallback is in progress, i.e. new
2798 * subflows are disallowed.
2799 */
2800 spin_lock_bh(&msk->fallback_lock);
2801 if (__mptcp_check_fallback(msk) ||
2802 !msk->allow_subflows) {
2803 spin_unlock_bh(&msk->fallback_lock);
2804 release_sock(ssk);
2805 goto clear_scheduled;
2806 }
2807
2808 while (info.sent < info.limit) {
2809 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2810 if (ret <= 0)
2811 break;
2812
2813 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2814 copied += ret;
2815 info.sent += ret;
2816 }
2817 if (copied) {
2818 len = max(copied, len);
2819 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2820 info.size_goal);
2821 msk->allow_infinite_fallback = false;
2822 }
2823 spin_unlock_bh(&msk->fallback_lock);
2824
2825 release_sock(ssk);
2826 }
2827 }
2828
2829 msk->bytes_retrans += len;
2830 dfrag->already_sent = max(dfrag->already_sent, len);
2831
2832 reset_timer:
2833 mptcp_check_and_set_pending(sk);
2834
2835 if (!mptcp_rtx_timer_pending(sk))
2836 mptcp_reset_rtx_timer(sk);
2837
2838 clear_scheduled:
2839 /* If no rtx data was available or in case of fallback, there
2840 * could be left-over scheduled subflows; clear them all
2841 * or later xmit could use bad ones
2842 */
2843 mptcp_for_each_subflow(msk, subflow)
2844 if (READ_ONCE(subflow->scheduled))
2845 mptcp_subflow_set_scheduled(subflow, false);
2846 }
2847
2848 /* schedule the timeout timer for the relevant event: either close timeout
2849 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
2850 */
mptcp_reset_tout_timer(struct mptcp_sock * msk,unsigned long fail_tout)2851 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
2852 {
2853 struct sock *sk = (struct sock *)msk;
2854 unsigned long timeout, close_timeout;
2855
2856 if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
2857 return;
2858
2859 close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp -
2860 tcp_jiffies32 + jiffies + mptcp_close_timeout(sk);
2861
2862 /* the close timeout takes precedence on the fail one, and here at least one of
2863 * them is active
2864 */
2865 timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
2866
2867 sk_reset_timer(sk, &inet_csk(sk)->mptcp_tout_timer, timeout);
2868 }
2869
mptcp_mp_fail_no_response(struct mptcp_sock * msk)2870 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
2871 {
2872 struct sock *ssk = msk->first;
2873 bool slow;
2874
2875 if (!ssk)
2876 return;
2877
2878 pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
2879
2880 slow = lock_sock_fast(ssk);
2881 mptcp_subflow_reset(ssk);
2882 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
2883 unlock_sock_fast(ssk, slow);
2884 }
2885
mptcp_backlog_purge(struct sock * sk)2886 static void mptcp_backlog_purge(struct sock *sk)
2887 {
2888 struct mptcp_sock *msk = mptcp_sk(sk);
2889 struct sk_buff *tmp, *skb;
2890 LIST_HEAD(backlog);
2891
2892 mptcp_data_lock(sk);
2893 list_splice_init(&msk->backlog_list, &backlog);
2894 msk->backlog_len = 0;
2895 mptcp_data_unlock(sk);
2896
2897 list_for_each_entry_safe(skb, tmp, &backlog, list) {
2898 mptcp_borrow_fwdmem(sk, skb);
2899 kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
2900 }
2901 sk_mem_reclaim(sk);
2902 }
2903
mptcp_do_fastclose(struct sock * sk)2904 static void mptcp_do_fastclose(struct sock *sk)
2905 {
2906 struct mptcp_subflow_context *subflow, *tmp;
2907 struct mptcp_sock *msk = mptcp_sk(sk);
2908
2909 mptcp_set_state(sk, TCP_CLOSE);
2910 mptcp_backlog_purge(sk);
2911 msk->fastclosing = 1;
2912
2913 /* Explicitly send the fastclose reset as need */
2914 if (__mptcp_check_fallback(msk))
2915 return;
2916
2917 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2918 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2919
2920 lock_sock(ssk);
2921
2922 /* Some subflow socket states don't allow/need a reset.*/
2923 if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
2924 goto unlock;
2925
2926 subflow->send_fastclose = 1;
2927
2928 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2929 * issue in __tcp_select_window(), see tcp_disconnect().
2930 */
2931 inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS;
2932
2933 tcp_send_active_reset(ssk, ssk->sk_allocation,
2934 SK_RST_REASON_TCP_ABORT_ON_CLOSE);
2935 unlock:
2936 release_sock(ssk);
2937 }
2938 }
2939
mptcp_worker(struct work_struct * work)2940 static void mptcp_worker(struct work_struct *work)
2941 {
2942 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2943 struct sock *sk = (struct sock *)msk;
2944 unsigned long fail_tout;
2945 int state;
2946
2947 lock_sock(sk);
2948 state = sk->sk_state;
2949 if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
2950 goto unlock;
2951
2952 mptcp_check_fastclose(msk);
2953
2954 mptcp_pm_worker(msk);
2955
2956 mptcp_check_send_data_fin(sk);
2957 mptcp_check_data_fin_ack(sk);
2958 mptcp_check_data_fin(sk);
2959
2960 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2961 __mptcp_close_subflow(sk);
2962
2963 if (mptcp_close_tout_expired(sk)) {
2964 struct mptcp_subflow_context *subflow, *tmp;
2965
2966 mptcp_do_fastclose(sk);
2967 mptcp_for_each_subflow_safe(msk, subflow, tmp)
2968 __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
2969 mptcp_close_wake_up(sk);
2970 }
2971
2972 if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) {
2973 __mptcp_destroy_sock(sk);
2974 goto unlock;
2975 }
2976
2977 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2978 __mptcp_retrans(sk);
2979
2980 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
2981 if (fail_tout && time_after(jiffies, fail_tout))
2982 mptcp_mp_fail_no_response(msk);
2983
2984 unlock:
2985 release_sock(sk);
2986 sock_put(sk);
2987 }
2988
__mptcp_init_sock(struct sock * sk)2989 static void __mptcp_init_sock(struct sock *sk)
2990 {
2991 struct mptcp_sock *msk = mptcp_sk(sk);
2992
2993 INIT_LIST_HEAD(&msk->conn_list);
2994 INIT_LIST_HEAD(&msk->join_list);
2995 INIT_LIST_HEAD(&msk->rtx_queue);
2996 INIT_LIST_HEAD(&msk->backlog_list);
2997 INIT_WORK(&msk->work, mptcp_worker);
2998 msk->out_of_order_queue = RB_ROOT;
2999 msk->first_pending = NULL;
3000 msk->timer_ival = TCP_RTO_MIN;
3001 msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
3002 msk->backlog_len = 0;
3003
3004 WRITE_ONCE(msk->first, NULL);
3005 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
3006 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3007 msk->allow_infinite_fallback = true;
3008 msk->allow_subflows = true;
3009 msk->recovery = false;
3010 msk->subflow_id = 1;
3011 msk->last_data_sent = tcp_jiffies32;
3012 msk->last_data_recv = tcp_jiffies32;
3013 msk->last_ack_recv = tcp_jiffies32;
3014
3015 mptcp_pm_data_init(msk);
3016 spin_lock_init(&msk->fallback_lock);
3017
3018 /* re-use the csk retrans timer for MPTCP-level retrans */
3019 timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0);
3020 timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0);
3021 }
3022
mptcp_ca_reset(struct sock * sk)3023 static void mptcp_ca_reset(struct sock *sk)
3024 {
3025 struct inet_connection_sock *icsk = inet_csk(sk);
3026
3027 tcp_assign_congestion_control(sk);
3028 strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name,
3029 sizeof(mptcp_sk(sk)->ca_name));
3030
3031 /* no need to keep a reference to the ops, the name will suffice */
3032 tcp_cleanup_congestion_control(sk);
3033 icsk->icsk_ca_ops = NULL;
3034 }
3035
mptcp_init_sock(struct sock * sk)3036 static int mptcp_init_sock(struct sock *sk)
3037 {
3038 struct net *net = sock_net(sk);
3039 int ret;
3040
3041 __mptcp_init_sock(sk);
3042
3043 if (!mptcp_is_enabled(net))
3044 return -ENOPROTOOPT;
3045
3046 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
3047 return -ENOMEM;
3048
3049 rcu_read_lock();
3050 ret = mptcp_init_sched(mptcp_sk(sk),
3051 mptcp_sched_find(mptcp_get_scheduler(net)));
3052 rcu_read_unlock();
3053 if (ret)
3054 return ret;
3055
3056 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
3057
3058 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
3059 * propagate the correct value
3060 */
3061 mptcp_ca_reset(sk);
3062
3063 sk_sockets_allocated_inc(sk);
3064 sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]);
3065 sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]);
3066 sk->sk_write_space = sk_stream_write_space;
3067
3068 return 0;
3069 }
3070
__mptcp_clear_xmit(struct sock * sk)3071 static void __mptcp_clear_xmit(struct sock *sk)
3072 {
3073 struct mptcp_sock *msk = mptcp_sk(sk);
3074 struct mptcp_data_frag *dtmp, *dfrag;
3075
3076 msk->first_pending = NULL;
3077 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
3078 dfrag_clear(sk, dfrag);
3079 }
3080
mptcp_cancel_work(struct sock * sk)3081 void mptcp_cancel_work(struct sock *sk)
3082 {
3083 struct mptcp_sock *msk = mptcp_sk(sk);
3084
3085 if (cancel_work_sync(&msk->work))
3086 __sock_put(sk);
3087 }
3088
mptcp_subflow_shutdown(struct sock * sk,struct sock * ssk,int how)3089 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
3090 {
3091 lock_sock(ssk);
3092
3093 switch (ssk->sk_state) {
3094 case TCP_LISTEN:
3095 if (!(how & RCV_SHUTDOWN))
3096 break;
3097 fallthrough;
3098 case TCP_SYN_SENT:
3099 WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK));
3100 break;
3101 default:
3102 if (__mptcp_check_fallback(mptcp_sk(sk))) {
3103 pr_debug("Fallback\n");
3104 ssk->sk_shutdown |= how;
3105 tcp_shutdown(ssk, how);
3106
3107 /* simulate the data_fin ack reception to let the state
3108 * machine move forward
3109 */
3110 WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
3111 mptcp_schedule_work(sk);
3112 } else {
3113 pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
3114 tcp_send_ack(ssk);
3115 if (!mptcp_rtx_timer_pending(sk))
3116 mptcp_reset_rtx_timer(sk);
3117 }
3118 break;
3119 }
3120
3121 release_sock(ssk);
3122 }
3123
mptcp_set_state(struct sock * sk,int state)3124 void mptcp_set_state(struct sock *sk, int state)
3125 {
3126 int oldstate = sk->sk_state;
3127
3128 switch (state) {
3129 case TCP_ESTABLISHED:
3130 if (oldstate != TCP_ESTABLISHED)
3131 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3132 break;
3133 case TCP_CLOSE_WAIT:
3134 /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
3135 * MPTCP "accepted" sockets will be created later on. So no
3136 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
3137 */
3138 break;
3139 default:
3140 if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
3141 MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3142 }
3143
3144 inet_sk_state_store(sk, state);
3145 }
3146
3147 static const unsigned char new_state[16] = {
3148 /* current state: new state: action: */
3149 [0 /* (Invalid) */] = TCP_CLOSE,
3150 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3151 [TCP_SYN_SENT] = TCP_CLOSE,
3152 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3153 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
3154 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
3155 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */
3156 [TCP_CLOSE] = TCP_CLOSE,
3157 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
3158 [TCP_LAST_ACK] = TCP_LAST_ACK,
3159 [TCP_LISTEN] = TCP_CLOSE,
3160 [TCP_CLOSING] = TCP_CLOSING,
3161 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
3162 };
3163
mptcp_close_state(struct sock * sk)3164 static int mptcp_close_state(struct sock *sk)
3165 {
3166 int next = (int)new_state[sk->sk_state];
3167 int ns = next & TCP_STATE_MASK;
3168
3169 mptcp_set_state(sk, ns);
3170
3171 return next & TCP_ACTION_FIN;
3172 }
3173
mptcp_check_send_data_fin(struct sock * sk)3174 static void mptcp_check_send_data_fin(struct sock *sk)
3175 {
3176 struct mptcp_subflow_context *subflow;
3177 struct mptcp_sock *msk = mptcp_sk(sk);
3178
3179 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
3180 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
3181 msk->snd_nxt, msk->write_seq);
3182
3183 /* we still need to enqueue subflows or not really shutting down,
3184 * skip this
3185 */
3186 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
3187 mptcp_send_head(sk))
3188 return;
3189
3190 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3191
3192 mptcp_for_each_subflow(msk, subflow) {
3193 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
3194
3195 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
3196 }
3197 }
3198
__mptcp_wr_shutdown(struct sock * sk)3199 static void __mptcp_wr_shutdown(struct sock *sk)
3200 {
3201 struct mptcp_sock *msk = mptcp_sk(sk);
3202
3203 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
3204 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
3205 !!mptcp_send_head(sk));
3206
3207 /* will be ignored by fallback sockets */
3208 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
3209 WRITE_ONCE(msk->snd_data_fin_enable, 1);
3210
3211 mptcp_check_send_data_fin(sk);
3212 }
3213
__mptcp_destroy_sock(struct sock * sk)3214 static void __mptcp_destroy_sock(struct sock *sk)
3215 {
3216 struct mptcp_sock *msk = mptcp_sk(sk);
3217
3218 pr_debug("msk=%p\n", msk);
3219
3220 might_sleep();
3221
3222 mptcp_stop_rtx_timer(sk);
3223 sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer);
3224 msk->pm.status = 0;
3225 mptcp_release_sched(msk);
3226
3227 sk->sk_prot->destroy(sk);
3228
3229 sk_stream_kill_queues(sk);
3230 xfrm_sk_free_policy(sk);
3231
3232 sock_put(sk);
3233 }
3234
__mptcp_unaccepted_force_close(struct sock * sk)3235 void __mptcp_unaccepted_force_close(struct sock *sk)
3236 {
3237 sock_set_flag(sk, SOCK_DEAD);
3238 mptcp_do_fastclose(sk);
3239 __mptcp_destroy_sock(sk);
3240 }
3241
mptcp_check_readable(struct sock * sk)3242 static __poll_t mptcp_check_readable(struct sock *sk)
3243 {
3244 return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0;
3245 }
3246
mptcp_check_listen_stop(struct sock * sk)3247 static void mptcp_check_listen_stop(struct sock *sk)
3248 {
3249 struct sock *ssk;
3250
3251 if (inet_sk_state_load(sk) != TCP_LISTEN)
3252 return;
3253
3254 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3255 ssk = mptcp_sk(sk)->first;
3256 if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN))
3257 return;
3258
3259 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
3260 tcp_set_state(ssk, TCP_CLOSE);
3261 mptcp_subflow_queue_clean(sk, ssk);
3262 inet_csk_listen_stop(ssk);
3263 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
3264 release_sock(ssk);
3265 }
3266
__mptcp_close(struct sock * sk,long timeout)3267 bool __mptcp_close(struct sock *sk, long timeout)
3268 {
3269 struct mptcp_subflow_context *subflow;
3270 struct mptcp_sock *msk = mptcp_sk(sk);
3271 bool do_cancel_work = false;
3272 int subflows_alive = 0;
3273
3274 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
3275
3276 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
3277 mptcp_check_listen_stop(sk);
3278 mptcp_set_state(sk, TCP_CLOSE);
3279 goto cleanup;
3280 }
3281
3282 if (mptcp_data_avail(msk) || timeout < 0) {
3283 /* If the msk has read data, or the caller explicitly ask it,
3284 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
3285 */
3286 mptcp_do_fastclose(sk);
3287 timeout = 0;
3288 } else if (mptcp_close_state(sk)) {
3289 __mptcp_wr_shutdown(sk);
3290 }
3291
3292 sk_stream_wait_close(sk, timeout);
3293
3294 cleanup:
3295 /* orphan all the subflows */
3296 mptcp_for_each_subflow(msk, subflow) {
3297 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3298 bool slow = lock_sock_fast_nested(ssk);
3299
3300 subflows_alive += ssk->sk_state != TCP_CLOSE;
3301
3302 /* since the close timeout takes precedence on the fail one,
3303 * cancel the latter
3304 */
3305 if (ssk == msk->first)
3306 subflow->fail_tout = 0;
3307
3308 /* detach from the parent socket, but allow data_ready to
3309 * push incoming data into the mptcp stack, to properly ack it
3310 */
3311 ssk->sk_socket = NULL;
3312 ssk->sk_wq = NULL;
3313 unlock_sock_fast(ssk, slow);
3314 }
3315 sock_orphan(sk);
3316
3317 /* all the subflows are closed, only timeout can change the msk
3318 * state, let's not keep resources busy for no reasons
3319 */
3320 if (subflows_alive == 0)
3321 mptcp_set_state(sk, TCP_CLOSE);
3322
3323 sock_hold(sk);
3324 pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
3325 mptcp_pm_connection_closed(msk);
3326
3327 if (sk->sk_state == TCP_CLOSE) {
3328 __mptcp_destroy_sock(sk);
3329 do_cancel_work = true;
3330 } else {
3331 mptcp_start_tout_timer(sk);
3332 }
3333
3334 return do_cancel_work;
3335 }
3336
mptcp_close(struct sock * sk,long timeout)3337 static void mptcp_close(struct sock *sk, long timeout)
3338 {
3339 bool do_cancel_work;
3340
3341 lock_sock(sk);
3342
3343 do_cancel_work = __mptcp_close(sk, timeout);
3344 release_sock(sk);
3345 if (do_cancel_work)
3346 mptcp_cancel_work(sk);
3347
3348 sock_put(sk);
3349 }
3350
mptcp_copy_inaddrs(struct sock * msk,const struct sock * ssk)3351 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
3352 {
3353 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3354 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
3355 struct ipv6_pinfo *msk6 = inet6_sk(msk);
3356
3357 msk->sk_v6_daddr = ssk->sk_v6_daddr;
3358 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
3359
3360 if (msk6 && ssk6) {
3361 msk6->saddr = ssk6->saddr;
3362 msk6->flow_label = ssk6->flow_label;
3363 }
3364 #endif
3365
3366 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
3367 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
3368 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
3369 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
3370 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
3371 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
3372 }
3373
mptcp_destroy_common(struct mptcp_sock * msk)3374 static void mptcp_destroy_common(struct mptcp_sock *msk)
3375 {
3376 struct mptcp_subflow_context *subflow, *tmp;
3377 struct sock *sk = (struct sock *)msk;
3378
3379 __mptcp_clear_xmit(sk);
3380 mptcp_backlog_purge(sk);
3381
3382 /* join list will be eventually flushed (with rst) at sock lock release time */
3383 mptcp_for_each_subflow_safe(msk, subflow, tmp)
3384 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
3385
3386 __skb_queue_purge(&sk->sk_receive_queue);
3387 skb_rbtree_purge(&msk->out_of_order_queue);
3388
3389 /* move all the rx fwd alloc into the sk_mem_reclaim_final in
3390 * inet_sock_destruct() will dispose it
3391 */
3392 mptcp_token_destroy(msk);
3393 mptcp_pm_destroy(msk);
3394 }
3395
mptcp_disconnect(struct sock * sk,int flags)3396 static int mptcp_disconnect(struct sock *sk, int flags)
3397 {
3398 struct mptcp_sock *msk = mptcp_sk(sk);
3399
3400 /* We are on the fastopen error path. We can't call straight into the
3401 * subflows cleanup code due to lock nesting (we are already under
3402 * msk->firstsocket lock).
3403 */
3404 if (msk->fastopening)
3405 return -EBUSY;
3406
3407 mptcp_check_listen_stop(sk);
3408 mptcp_set_state(sk, TCP_CLOSE);
3409
3410 mptcp_stop_rtx_timer(sk);
3411 mptcp_stop_tout_timer(sk);
3412
3413 mptcp_pm_connection_closed(msk);
3414
3415 /* msk->subflow is still intact, the following will not free the first
3416 * subflow
3417 */
3418 mptcp_do_fastclose(sk);
3419 mptcp_destroy_common(msk);
3420
3421 /* The first subflow is already in TCP_CLOSE status, the following
3422 * can't overlap with a fallback anymore
3423 */
3424 spin_lock_bh(&msk->fallback_lock);
3425 msk->allow_subflows = true;
3426 msk->allow_infinite_fallback = true;
3427 WRITE_ONCE(msk->flags, 0);
3428 spin_unlock_bh(&msk->fallback_lock);
3429
3430 msk->cb_flags = 0;
3431 msk->recovery = false;
3432 WRITE_ONCE(msk->can_ack, false);
3433 WRITE_ONCE(msk->fully_established, false);
3434 WRITE_ONCE(msk->rcv_data_fin, false);
3435 WRITE_ONCE(msk->snd_data_fin_enable, false);
3436 WRITE_ONCE(msk->rcv_fastclose, false);
3437 WRITE_ONCE(msk->use_64bit_ack, false);
3438 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3439 mptcp_pm_data_reset(msk);
3440 mptcp_ca_reset(sk);
3441 msk->bytes_consumed = 0;
3442 msk->bytes_acked = 0;
3443 msk->bytes_received = 0;
3444 msk->bytes_sent = 0;
3445 msk->bytes_retrans = 0;
3446 msk->rcvspace_init = 0;
3447 msk->fastclosing = 0;
3448
3449 /* for fallback's sake */
3450 WRITE_ONCE(msk->ack_seq, 0);
3451
3452 WRITE_ONCE(sk->sk_shutdown, 0);
3453 sk_error_report(sk);
3454 return 0;
3455 }
3456
3457 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
mptcp_inet6_sk(const struct sock * sk)3458 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
3459 {
3460 struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk);
3461
3462 return &msk6->np;
3463 }
3464
mptcp_copy_ip6_options(struct sock * newsk,const struct sock * sk)3465 static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
3466 {
3467 const struct ipv6_pinfo *np = inet6_sk(sk);
3468 struct ipv6_txoptions *opt;
3469 struct ipv6_pinfo *newnp;
3470
3471 newnp = inet6_sk(newsk);
3472
3473 rcu_read_lock();
3474 opt = rcu_dereference(np->opt);
3475 if (opt) {
3476 opt = ipv6_dup_options(newsk, opt);
3477 if (!opt)
3478 net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
3479 }
3480 RCU_INIT_POINTER(newnp->opt, opt);
3481 rcu_read_unlock();
3482 }
3483 #endif
3484
mptcp_copy_ip_options(struct sock * newsk,const struct sock * sk)3485 static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
3486 {
3487 struct ip_options_rcu *inet_opt, *newopt = NULL;
3488 const struct inet_sock *inet = inet_sk(sk);
3489 struct inet_sock *newinet;
3490
3491 newinet = inet_sk(newsk);
3492
3493 rcu_read_lock();
3494 inet_opt = rcu_dereference(inet->inet_opt);
3495 if (inet_opt) {
3496 newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) +
3497 inet_opt->opt.optlen, GFP_ATOMIC);
3498 if (!newopt)
3499 net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
3500 }
3501 RCU_INIT_POINTER(newinet->inet_opt, newopt);
3502 rcu_read_unlock();
3503 }
3504
mptcp_sk_clone_init(const struct sock * sk,const struct mptcp_options_received * mp_opt,struct sock * ssk,struct request_sock * req)3505 struct sock *mptcp_sk_clone_init(const struct sock *sk,
3506 const struct mptcp_options_received *mp_opt,
3507 struct sock *ssk,
3508 struct request_sock *req)
3509 {
3510 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
3511 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
3512 struct mptcp_subflow_context *subflow;
3513 struct mptcp_sock *msk;
3514
3515 if (!nsk)
3516 return NULL;
3517
3518 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3519 if (nsk->sk_family == AF_INET6)
3520 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
3521 #endif
3522
3523 __mptcp_init_sock(nsk);
3524
3525 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3526 if (nsk->sk_family == AF_INET6)
3527 mptcp_copy_ip6_options(nsk, sk);
3528 else
3529 #endif
3530 mptcp_copy_ip_options(nsk, sk);
3531
3532 msk = mptcp_sk(nsk);
3533 WRITE_ONCE(msk->local_key, subflow_req->local_key);
3534 WRITE_ONCE(msk->token, subflow_req->token);
3535 msk->in_accept_queue = 1;
3536 WRITE_ONCE(msk->fully_established, false);
3537 if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
3538 WRITE_ONCE(msk->csum_enabled, true);
3539
3540 WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
3541 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3542 WRITE_ONCE(msk->snd_una, msk->write_seq);
3543 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
3544 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
3545 mptcp_init_sched(msk, mptcp_sk(sk)->sched);
3546
3547 /* passive msk is created after the first/MPC subflow */
3548 msk->subflow_id = 2;
3549
3550 sock_reset_flag(nsk, SOCK_RCU_FREE);
3551 security_inet_csk_clone(nsk, req);
3552
3553 /* this can't race with mptcp_close(), as the msk is
3554 * not yet exposted to user-space
3555 */
3556 mptcp_set_state(nsk, TCP_ESTABLISHED);
3557
3558 /* The msk maintain a ref to each subflow in the connections list */
3559 WRITE_ONCE(msk->first, ssk);
3560 subflow = mptcp_subflow_ctx(ssk);
3561 list_add(&subflow->node, &msk->conn_list);
3562 sock_hold(ssk);
3563
3564 /* new mpc subflow takes ownership of the newly
3565 * created mptcp socket
3566 */
3567 mptcp_token_accept(subflow_req, msk);
3568
3569 /* set msk addresses early to ensure mptcp_pm_get_local_id()
3570 * uses the correct data
3571 */
3572 mptcp_copy_inaddrs(nsk, ssk);
3573 __mptcp_propagate_sndbuf(nsk, ssk);
3574
3575 mptcp_rcv_space_init(msk, ssk);
3576 msk->rcvq_space.time = mptcp_stamp();
3577
3578 if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
3579 __mptcp_subflow_fully_established(msk, subflow, mp_opt);
3580 bh_unlock_sock(nsk);
3581
3582 /* note: the newly allocated socket refcount is 2 now */
3583 return nsk;
3584 }
3585
mptcp_destroy(struct sock * sk)3586 static void mptcp_destroy(struct sock *sk)
3587 {
3588 struct mptcp_sock *msk = mptcp_sk(sk);
3589
3590 /* allow the following to close even the initial subflow */
3591 msk->free_first = 1;
3592 mptcp_destroy_common(msk);
3593 sk_sockets_allocated_dec(sk);
3594 }
3595
__mptcp_data_acked(struct sock * sk)3596 void __mptcp_data_acked(struct sock *sk)
3597 {
3598 if (!sock_owned_by_user(sk))
3599 __mptcp_clean_una(sk);
3600 else
3601 __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
3602 }
3603
__mptcp_check_push(struct sock * sk,struct sock * ssk)3604 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3605 {
3606 if (!sock_owned_by_user(sk))
3607 __mptcp_subflow_push_pending(sk, ssk, false);
3608 else
3609 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3610 }
3611
3612 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3613 BIT(MPTCP_RETRANSMIT) | \
3614 BIT(MPTCP_FLUSH_JOIN_LIST))
3615
3616 /* processes deferred events and flush wmem */
mptcp_release_cb(struct sock * sk)3617 static void mptcp_release_cb(struct sock *sk)
3618 __must_hold(&sk->sk_lock.slock)
3619 {
3620 struct mptcp_sock *msk = mptcp_sk(sk);
3621
3622 for (;;) {
3623 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
3624 struct list_head join_list, skbs;
3625 bool spool_bl;
3626 u32 moved;
3627
3628 spool_bl = mptcp_can_spool_backlog(sk, &skbs);
3629 if (!flags && !spool_bl)
3630 break;
3631
3632 INIT_LIST_HEAD(&join_list);
3633 list_splice_init(&msk->join_list, &join_list);
3634
3635 /* the following actions acquire the subflow socket lock
3636 *
3637 * 1) can't be invoked in atomic scope
3638 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3639 * datapath acquires the msk socket spinlock while helding
3640 * the subflow socket lock
3641 */
3642 msk->cb_flags &= ~flags;
3643 spin_unlock_bh(&sk->sk_lock.slock);
3644
3645 if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
3646 __mptcp_flush_join_list(sk, &join_list);
3647 if (flags & BIT(MPTCP_PUSH_PENDING))
3648 __mptcp_push_pending(sk, 0);
3649 if (flags & BIT(MPTCP_RETRANSMIT))
3650 __mptcp_retrans(sk);
3651 if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) {
3652 /* notify ack seq update */
3653 mptcp_cleanup_rbuf(msk, 0);
3654 sk->sk_data_ready(sk);
3655 }
3656
3657 cond_resched();
3658 spin_lock_bh(&sk->sk_lock.slock);
3659 if (spool_bl)
3660 mptcp_backlog_spooled(sk, moved, &skbs);
3661 }
3662
3663 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
3664 __mptcp_clean_una_wakeup(sk);
3665 if (unlikely(msk->cb_flags)) {
3666 /* be sure to sync the msk state before taking actions
3667 * depending on sk_state (MPTCP_ERROR_REPORT)
3668 * On sk release avoid actions depending on the first subflow
3669 */
3670 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first)
3671 __mptcp_sync_state(sk, msk->pending_state);
3672 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
3673 __mptcp_error_report(sk);
3674 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
3675 __mptcp_sync_sndbuf(sk);
3676 }
3677 }
3678
3679 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3680 * TCP can't schedule delack timer before the subflow is fully established.
3681 * MPTCP uses the delack timer to do 3rd ack retransmissions
3682 */
schedule_3rdack_retransmission(struct sock * ssk)3683 static void schedule_3rdack_retransmission(struct sock *ssk)
3684 {
3685 struct inet_connection_sock *icsk = inet_csk(ssk);
3686 struct tcp_sock *tp = tcp_sk(ssk);
3687 unsigned long timeout;
3688
3689 if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established))
3690 return;
3691
3692 /* reschedule with a timeout above RTT, as we must look only for drop */
3693 if (tp->srtt_us)
3694 timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
3695 else
3696 timeout = TCP_TIMEOUT_INIT;
3697 timeout += jiffies;
3698
3699 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
3700 smp_store_release(&icsk->icsk_ack.pending,
3701 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
3702 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
3703 }
3704
mptcp_subflow_process_delegated(struct sock * ssk,long status)3705 void mptcp_subflow_process_delegated(struct sock *ssk, long status)
3706 {
3707 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3708 struct sock *sk = subflow->conn;
3709
3710 if (status & BIT(MPTCP_DELEGATE_SEND)) {
3711 mptcp_data_lock(sk);
3712 if (!sock_owned_by_user(sk))
3713 __mptcp_subflow_push_pending(sk, ssk, true);
3714 else
3715 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3716 mptcp_data_unlock(sk);
3717 }
3718 if (status & BIT(MPTCP_DELEGATE_SNDBUF)) {
3719 mptcp_data_lock(sk);
3720 if (!sock_owned_by_user(sk))
3721 __mptcp_sync_sndbuf(sk);
3722 else
3723 __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags);
3724 mptcp_data_unlock(sk);
3725 }
3726 if (status & BIT(MPTCP_DELEGATE_ACK))
3727 schedule_3rdack_retransmission(ssk);
3728 }
3729
mptcp_hash(struct sock * sk)3730 static int mptcp_hash(struct sock *sk)
3731 {
3732 /* should never be called,
3733 * we hash the TCP subflows not the MPTCP socket
3734 */
3735 WARN_ON_ONCE(1);
3736 return 0;
3737 }
3738
mptcp_unhash(struct sock * sk)3739 static void mptcp_unhash(struct sock *sk)
3740 {
3741 /* called from sk_common_release(), but nothing to do here */
3742 }
3743
mptcp_get_port(struct sock * sk,unsigned short snum)3744 static int mptcp_get_port(struct sock *sk, unsigned short snum)
3745 {
3746 struct mptcp_sock *msk = mptcp_sk(sk);
3747
3748 pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
3749 if (WARN_ON_ONCE(!msk->first))
3750 return -EINVAL;
3751
3752 return inet_csk_get_port(msk->first, snum);
3753 }
3754
mptcp_finish_connect(struct sock * ssk)3755 void mptcp_finish_connect(struct sock *ssk)
3756 {
3757 struct mptcp_subflow_context *subflow;
3758 struct mptcp_sock *msk;
3759 struct sock *sk;
3760
3761 subflow = mptcp_subflow_ctx(ssk);
3762 sk = subflow->conn;
3763 msk = mptcp_sk(sk);
3764
3765 pr_debug("msk=%p, token=%u\n", sk, subflow->token);
3766
3767 subflow->map_seq = subflow->iasn;
3768 subflow->map_subflow_seq = 1;
3769
3770 /* the socket is not connected yet, no msk/subflow ops can access/race
3771 * accessing the field below
3772 */
3773 WRITE_ONCE(msk->local_key, subflow->local_key);
3774 WRITE_ONCE(msk->rcvq_space.time, mptcp_stamp());
3775
3776 mptcp_pm_new_connection(msk, ssk, 0);
3777 }
3778
mptcp_sock_graft(struct sock * sk,struct socket * parent)3779 void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3780 {
3781 write_lock_bh(&sk->sk_callback_lock);
3782 rcu_assign_pointer(sk->sk_wq, &parent->wq);
3783 sk_set_socket(sk, parent);
3784 write_unlock_bh(&sk->sk_callback_lock);
3785 }
3786
3787 /* Can be called without holding the msk socket lock; use the callback lock
3788 * to avoid {READ_,WRITE_}ONCE annotations on sk_socket.
3789 */
mptcp_sock_check_graft(struct sock * sk,struct sock * ssk)3790 static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk)
3791 {
3792 struct socket *sock;
3793
3794 write_lock_bh(&sk->sk_callback_lock);
3795 sock = sk->sk_socket;
3796 write_unlock_bh(&sk->sk_callback_lock);
3797 if (sock) {
3798 mptcp_sock_graft(ssk, sock);
3799 __mptcp_inherit_cgrp_data(sk, ssk);
3800 __mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC);
3801 }
3802 }
3803
mptcp_finish_join(struct sock * ssk)3804 bool mptcp_finish_join(struct sock *ssk)
3805 {
3806 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3807 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3808 struct sock *parent = (void *)msk;
3809 bool ret = true;
3810
3811 pr_debug("msk=%p, subflow=%p\n", msk, subflow);
3812
3813 /* mptcp socket already closing? */
3814 if (!mptcp_is_fully_established(parent)) {
3815 subflow->reset_reason = MPTCP_RST_EMPTCP;
3816 return false;
3817 }
3818
3819 /* Active subflow, already present inside the conn_list; is grafted
3820 * either by __mptcp_subflow_connect() or accept.
3821 */
3822 if (!list_empty(&subflow->node)) {
3823 spin_lock_bh(&msk->fallback_lock);
3824 if (!msk->allow_subflows) {
3825 spin_unlock_bh(&msk->fallback_lock);
3826 return false;
3827 }
3828 mptcp_subflow_joined(msk, ssk);
3829 spin_unlock_bh(&msk->fallback_lock);
3830 mptcp_propagate_sndbuf(parent, ssk);
3831 return true;
3832 }
3833
3834 if (!mptcp_pm_allow_new_subflow(msk)) {
3835 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_JOINREJECTED);
3836 goto err_prohibited;
3837 }
3838
3839 /* If we can't acquire msk socket lock here, let the release callback
3840 * handle it
3841 */
3842 mptcp_data_lock(parent);
3843 if (!sock_owned_by_user(parent)) {
3844 ret = __mptcp_finish_join(msk, ssk);
3845 if (ret) {
3846 sock_hold(ssk);
3847 list_add_tail(&subflow->node, &msk->conn_list);
3848 mptcp_sock_check_graft(parent, ssk);
3849 }
3850 } else {
3851 sock_hold(ssk);
3852 list_add_tail(&subflow->node, &msk->join_list);
3853 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
3854
3855 /* In case of later failures, __mptcp_flush_join_list() will
3856 * properly orphan the ssk via mptcp_close_ssk().
3857 */
3858 mptcp_sock_check_graft(parent, ssk);
3859 }
3860 mptcp_data_unlock(parent);
3861
3862 if (!ret) {
3863 err_prohibited:
3864 subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3865 return false;
3866 }
3867
3868 return true;
3869 }
3870
mptcp_shutdown(struct sock * sk,int how)3871 static void mptcp_shutdown(struct sock *sk, int how)
3872 {
3873 pr_debug("sk=%p, how=%d\n", sk, how);
3874
3875 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3876 __mptcp_wr_shutdown(sk);
3877 }
3878
mptcp_ioctl_outq(const struct mptcp_sock * msk,u64 v)3879 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
3880 {
3881 const struct sock *sk = (void *)msk;
3882 u64 delta;
3883
3884 if (sk->sk_state == TCP_LISTEN)
3885 return -EINVAL;
3886
3887 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
3888 return 0;
3889
3890 delta = msk->write_seq - v;
3891 if (__mptcp_check_fallback(msk) && msk->first) {
3892 struct tcp_sock *tp = tcp_sk(msk->first);
3893
3894 /* the first subflow is disconnected after close - see
3895 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3896 * so ignore that status, too.
3897 */
3898 if (!((1 << msk->first->sk_state) &
3899 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
3900 delta += READ_ONCE(tp->write_seq) - tp->snd_una;
3901 }
3902 if (delta > INT_MAX)
3903 delta = INT_MAX;
3904
3905 return (int)delta;
3906 }
3907
mptcp_ioctl(struct sock * sk,int cmd,int * karg)3908 static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
3909 {
3910 struct mptcp_sock *msk = mptcp_sk(sk);
3911 bool slow;
3912
3913 switch (cmd) {
3914 case SIOCINQ:
3915 if (sk->sk_state == TCP_LISTEN)
3916 return -EINVAL;
3917
3918 lock_sock(sk);
3919 if (mptcp_move_skbs(sk))
3920 mptcp_cleanup_rbuf(msk, 0);
3921 *karg = mptcp_inq_hint(sk);
3922 release_sock(sk);
3923 break;
3924 case SIOCOUTQ:
3925 slow = lock_sock_fast(sk);
3926 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
3927 unlock_sock_fast(sk, slow);
3928 break;
3929 case SIOCOUTQNSD:
3930 slow = lock_sock_fast(sk);
3931 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt);
3932 unlock_sock_fast(sk, slow);
3933 break;
3934 default:
3935 return -ENOIOCTLCMD;
3936 }
3937
3938 return 0;
3939 }
3940
mptcp_connect(struct sock * sk,struct sockaddr_unsized * uaddr,int addr_len)3941 static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
3942 int addr_len)
3943 {
3944 struct mptcp_subflow_context *subflow;
3945 struct mptcp_sock *msk = mptcp_sk(sk);
3946 int err = -EINVAL;
3947 struct sock *ssk;
3948
3949 ssk = __mptcp_nmpc_sk(msk);
3950 if (IS_ERR(ssk))
3951 return PTR_ERR(ssk);
3952
3953 mptcp_set_state(sk, TCP_SYN_SENT);
3954 subflow = mptcp_subflow_ctx(ssk);
3955 #ifdef CONFIG_TCP_MD5SIG
3956 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3957 * TCP option space.
3958 */
3959 if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
3960 mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK);
3961 #endif
3962 if (subflow->request_mptcp) {
3963 if (mptcp_active_should_disable(sk))
3964 mptcp_early_fallback(msk, subflow,
3965 MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
3966 else if (mptcp_token_new_connect(ssk) < 0)
3967 mptcp_early_fallback(msk, subflow,
3968 MPTCP_MIB_TOKENFALLBACKINIT);
3969 }
3970
3971 WRITE_ONCE(msk->write_seq, subflow->idsn);
3972 WRITE_ONCE(msk->snd_nxt, subflow->idsn);
3973 WRITE_ONCE(msk->snd_una, subflow->idsn);
3974 if (likely(!__mptcp_check_fallback(msk)))
3975 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
3976
3977 /* if reaching here via the fastopen/sendmsg path, the caller already
3978 * acquired the subflow socket lock, too.
3979 */
3980 if (!msk->fastopening)
3981 lock_sock(ssk);
3982
3983 /* the following mirrors closely a very small chunk of code from
3984 * __inet_stream_connect()
3985 */
3986 if (ssk->sk_state != TCP_CLOSE)
3987 goto out;
3988
3989 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) {
3990 err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len);
3991 if (err)
3992 goto out;
3993 }
3994
3995 err = ssk->sk_prot->connect(ssk, uaddr, addr_len);
3996 if (err < 0)
3997 goto out;
3998
3999 inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk));
4000
4001 out:
4002 if (!msk->fastopening)
4003 release_sock(ssk);
4004
4005 /* on successful connect, the msk state will be moved to established by
4006 * subflow_finish_connect()
4007 */
4008 if (unlikely(err)) {
4009 /* avoid leaving a dangling token in an unconnected socket */
4010 mptcp_token_destroy(msk);
4011 mptcp_set_state(sk, TCP_CLOSE);
4012 return err;
4013 }
4014
4015 mptcp_copy_inaddrs(sk, ssk);
4016 return 0;
4017 }
4018
4019 static struct proto mptcp_prot = {
4020 .name = "MPTCP",
4021 .owner = THIS_MODULE,
4022 .init = mptcp_init_sock,
4023 .connect = mptcp_connect,
4024 .disconnect = mptcp_disconnect,
4025 .close = mptcp_close,
4026 .setsockopt = mptcp_setsockopt,
4027 .getsockopt = mptcp_getsockopt,
4028 .shutdown = mptcp_shutdown,
4029 .destroy = mptcp_destroy,
4030 .sendmsg = mptcp_sendmsg,
4031 .ioctl = mptcp_ioctl,
4032 .recvmsg = mptcp_recvmsg,
4033 .release_cb = mptcp_release_cb,
4034 .hash = mptcp_hash,
4035 .unhash = mptcp_unhash,
4036 .get_port = mptcp_get_port,
4037 .stream_memory_free = mptcp_stream_memory_free,
4038 .sockets_allocated = &mptcp_sockets_allocated,
4039
4040 .memory_allocated = &net_aligned_data.tcp_memory_allocated,
4041 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
4042
4043 .memory_pressure = &tcp_memory_pressure,
4044 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
4045 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
4046 .sysctl_mem = sysctl_tcp_mem,
4047 .obj_size = sizeof(struct mptcp_sock),
4048 .slab_flags = SLAB_TYPESAFE_BY_RCU,
4049 .no_autobind = true,
4050 };
4051
mptcp_bind(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len)4052 static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
4053 {
4054 struct mptcp_sock *msk = mptcp_sk(sock->sk);
4055 struct sock *ssk, *sk = sock->sk;
4056 int err = -EINVAL;
4057
4058 lock_sock(sk);
4059 ssk = __mptcp_nmpc_sk(msk);
4060 if (IS_ERR(ssk)) {
4061 err = PTR_ERR(ssk);
4062 goto unlock;
4063 }
4064
4065 if (sk->sk_family == AF_INET)
4066 err = inet_bind_sk(ssk, uaddr, addr_len);
4067 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4068 else if (sk->sk_family == AF_INET6)
4069 err = inet6_bind_sk(ssk, uaddr, addr_len);
4070 #endif
4071 if (!err)
4072 mptcp_copy_inaddrs(sk, ssk);
4073
4074 unlock:
4075 release_sock(sk);
4076 return err;
4077 }
4078
mptcp_listen(struct socket * sock,int backlog)4079 static int mptcp_listen(struct socket *sock, int backlog)
4080 {
4081 struct mptcp_sock *msk = mptcp_sk(sock->sk);
4082 struct sock *sk = sock->sk;
4083 struct sock *ssk;
4084 int err;
4085
4086 pr_debug("msk=%p\n", msk);
4087
4088 lock_sock(sk);
4089
4090 err = -EINVAL;
4091 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
4092 goto unlock;
4093
4094 ssk = __mptcp_nmpc_sk(msk);
4095 if (IS_ERR(ssk)) {
4096 err = PTR_ERR(ssk);
4097 goto unlock;
4098 }
4099
4100 mptcp_set_state(sk, TCP_LISTEN);
4101 sock_set_flag(sk, SOCK_RCU_FREE);
4102
4103 lock_sock(ssk);
4104 err = __inet_listen_sk(ssk, backlog);
4105 release_sock(ssk);
4106 mptcp_set_state(sk, inet_sk_state_load(ssk));
4107
4108 if (!err) {
4109 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
4110 mptcp_copy_inaddrs(sk, ssk);
4111 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
4112 }
4113
4114 unlock:
4115 release_sock(sk);
4116 return err;
4117 }
4118
mptcp_graft_subflows(struct sock * sk)4119 static void mptcp_graft_subflows(struct sock *sk)
4120 {
4121 struct mptcp_subflow_context *subflow;
4122 struct mptcp_sock *msk = mptcp_sk(sk);
4123
4124 if (mem_cgroup_sockets_enabled) {
4125 LIST_HEAD(join_list);
4126
4127 /* Subflows joining after __inet_accept() will get the
4128 * mem CG properly initialized at mptcp_finish_join() time,
4129 * but subflows pending in join_list need explicit
4130 * initialization before flushing `backlog_unaccounted`
4131 * or MPTCP can later unexpectedly observe unaccounted memory.
4132 */
4133 mptcp_data_lock(sk);
4134 list_splice_init(&msk->join_list, &join_list);
4135 mptcp_data_unlock(sk);
4136
4137 __mptcp_flush_join_list(sk, &join_list);
4138 }
4139
4140 mptcp_for_each_subflow(msk, subflow) {
4141 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4142
4143 lock_sock(ssk);
4144
4145 /* Set ssk->sk_socket of accept()ed flows to mptcp socket.
4146 * This is needed so NOSPACE flag can be set from tcp stack.
4147 */
4148 if (!ssk->sk_socket)
4149 mptcp_sock_graft(ssk, sk->sk_socket);
4150
4151 if (!mem_cgroup_sk_enabled(sk))
4152 goto unlock;
4153
4154 __mptcp_inherit_cgrp_data(sk, ssk);
4155 __mptcp_inherit_memcg(sk, ssk, GFP_KERNEL);
4156
4157 unlock:
4158 release_sock(ssk);
4159 }
4160
4161 if (mem_cgroup_sk_enabled(sk)) {
4162 gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
4163 int amt;
4164
4165 /* Account the backlog memory; prior accept() is aware of
4166 * fwd and rmem only.
4167 */
4168 mptcp_data_lock(sk);
4169 amt = sk_mem_pages(sk->sk_forward_alloc +
4170 msk->backlog_unaccounted +
4171 atomic_read(&sk->sk_rmem_alloc)) -
4172 sk_mem_pages(sk->sk_forward_alloc +
4173 atomic_read(&sk->sk_rmem_alloc));
4174 msk->backlog_unaccounted = 0;
4175 mptcp_data_unlock(sk);
4176
4177 if (amt)
4178 mem_cgroup_sk_charge(sk, amt, gfp);
4179 }
4180 }
4181
mptcp_stream_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)4182 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
4183 struct proto_accept_arg *arg)
4184 {
4185 struct mptcp_sock *msk = mptcp_sk(sock->sk);
4186 struct sock *ssk, *newsk;
4187
4188 pr_debug("msk=%p\n", msk);
4189
4190 /* Buggy applications can call accept on socket states other then LISTEN
4191 * but no need to allocate the first subflow just to error out.
4192 */
4193 ssk = READ_ONCE(msk->first);
4194 if (!ssk)
4195 return -EINVAL;
4196
4197 pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
4198 newsk = inet_csk_accept(ssk, arg);
4199 if (!newsk)
4200 return arg->err;
4201
4202 pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
4203 if (sk_is_mptcp(newsk)) {
4204 struct mptcp_subflow_context *subflow;
4205 struct sock *new_mptcp_sock;
4206
4207 subflow = mptcp_subflow_ctx(newsk);
4208 new_mptcp_sock = subflow->conn;
4209
4210 /* is_mptcp should be false if subflow->conn is missing, see
4211 * subflow_syn_recv_sock()
4212 */
4213 if (WARN_ON_ONCE(!new_mptcp_sock)) {
4214 tcp_sk(newsk)->is_mptcp = 0;
4215 goto tcpfallback;
4216 }
4217
4218 newsk = new_mptcp_sock;
4219 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
4220
4221 newsk->sk_kern_sock = arg->kern;
4222 lock_sock(newsk);
4223 __inet_accept(sock, newsock, newsk);
4224
4225 set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
4226 msk = mptcp_sk(newsk);
4227 msk->in_accept_queue = 0;
4228
4229 mptcp_graft_subflows(newsk);
4230 mptcp_rps_record_subflows(msk);
4231
4232 /* Do late cleanup for the first subflow as necessary. Also
4233 * deal with bad peers not doing a complete shutdown.
4234 */
4235 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
4236 if (unlikely(list_is_singular(&msk->conn_list)))
4237 mptcp_set_state(newsk, TCP_CLOSE);
4238 mptcp_close_ssk(newsk, msk->first,
4239 mptcp_subflow_ctx(msk->first));
4240 }
4241 } else {
4242 tcpfallback:
4243 newsk->sk_kern_sock = arg->kern;
4244 lock_sock(newsk);
4245 __inet_accept(sock, newsock, newsk);
4246 /* we are being invoked after accepting a non-mp-capable
4247 * flow: sk is a tcp_sk, not an mptcp one.
4248 *
4249 * Hand the socket over to tcp so all further socket ops
4250 * bypass mptcp.
4251 */
4252 WRITE_ONCE(newsock->sk->sk_socket->ops,
4253 mptcp_fallback_tcp_ops(newsock->sk));
4254 }
4255 release_sock(newsk);
4256
4257 return 0;
4258 }
4259
mptcp_check_writeable(struct mptcp_sock * msk)4260 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
4261 {
4262 struct sock *sk = (struct sock *)msk;
4263
4264 if (__mptcp_stream_is_writeable(sk, 1))
4265 return EPOLLOUT | EPOLLWRNORM;
4266
4267 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
4268 smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
4269 if (__mptcp_stream_is_writeable(sk, 1))
4270 return EPOLLOUT | EPOLLWRNORM;
4271
4272 return 0;
4273 }
4274
mptcp_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)4275 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
4276 struct poll_table_struct *wait)
4277 {
4278 struct sock *sk = sock->sk;
4279 struct mptcp_sock *msk;
4280 __poll_t mask = 0;
4281 u8 shutdown;
4282 int state;
4283
4284 msk = mptcp_sk(sk);
4285 sock_poll_wait(file, sock, wait);
4286
4287 state = inet_sk_state_load(sk);
4288 pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
4289 if (state == TCP_LISTEN) {
4290 struct sock *ssk = READ_ONCE(msk->first);
4291
4292 if (WARN_ON_ONCE(!ssk))
4293 return 0;
4294
4295 return inet_csk_listen_poll(ssk);
4296 }
4297
4298 shutdown = READ_ONCE(sk->sk_shutdown);
4299 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
4300 mask |= EPOLLHUP;
4301 if (shutdown & RCV_SHUTDOWN)
4302 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
4303
4304 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
4305 mask |= mptcp_check_readable(sk);
4306 if (shutdown & SEND_SHUTDOWN)
4307 mask |= EPOLLOUT | EPOLLWRNORM;
4308 else
4309 mask |= mptcp_check_writeable(msk);
4310 } else if (state == TCP_SYN_SENT &&
4311 inet_test_bit(DEFER_CONNECT, sk)) {
4312 /* cf tcp_poll() note about TFO */
4313 mask |= EPOLLOUT | EPOLLWRNORM;
4314 }
4315
4316 /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
4317 smp_rmb();
4318 if (READ_ONCE(sk->sk_err))
4319 mask |= EPOLLERR;
4320
4321 return mask;
4322 }
4323
mptcp_recv_skb(struct sock * sk,u32 * off)4324 static struct sk_buff *mptcp_recv_skb(struct sock *sk, u32 *off)
4325 {
4326 struct mptcp_sock *msk = mptcp_sk(sk);
4327 struct sk_buff *skb;
4328 u32 offset;
4329
4330 if (!list_empty(&msk->backlog_list))
4331 mptcp_move_skbs(sk);
4332
4333 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
4334 offset = MPTCP_SKB_CB(skb)->offset;
4335 if (offset < skb->len) {
4336 *off = offset;
4337 return skb;
4338 }
4339 mptcp_eat_recv_skb(sk, skb);
4340 }
4341 return NULL;
4342 }
4343
4344 /*
4345 * Note:
4346 * - It is assumed that the socket was locked by the caller.
4347 */
__mptcp_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor,bool noack)4348 static int __mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4349 sk_read_actor_t recv_actor, bool noack)
4350 {
4351 struct mptcp_sock *msk = mptcp_sk(sk);
4352 struct sk_buff *skb;
4353 int copied = 0;
4354 u32 offset;
4355
4356 msk_owned_by_me(msk);
4357
4358 if (sk->sk_state == TCP_LISTEN)
4359 return -ENOTCONN;
4360 while ((skb = mptcp_recv_skb(sk, &offset)) != NULL) {
4361 u32 data_len = skb->len - offset;
4362 int count;
4363 u32 size;
4364
4365 size = min_t(size_t, data_len, INT_MAX);
4366 count = recv_actor(desc, skb, offset, size);
4367 if (count <= 0) {
4368 if (!copied)
4369 copied = count;
4370 break;
4371 }
4372
4373 copied += count;
4374
4375 msk->bytes_consumed += count;
4376 if (count < data_len) {
4377 MPTCP_SKB_CB(skb)->offset += count;
4378 MPTCP_SKB_CB(skb)->map_seq += count;
4379 break;
4380 }
4381
4382 mptcp_eat_recv_skb(sk, skb);
4383 }
4384
4385 if (noack)
4386 goto out;
4387
4388 mptcp_rcv_space_adjust(msk, copied);
4389
4390 if (copied > 0) {
4391 mptcp_recv_skb(sk, &offset);
4392 mptcp_cleanup_rbuf(msk, copied);
4393 }
4394 out:
4395 return copied;
4396 }
4397
mptcp_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor)4398 static int mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4399 sk_read_actor_t recv_actor)
4400 {
4401 return __mptcp_read_sock(sk, desc, recv_actor, false);
4402 }
4403
__mptcp_splice_read(struct sock * sk,struct tcp_splice_state * tss)4404 static int __mptcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
4405 {
4406 /* Store TCP splice context information in read_descriptor_t. */
4407 read_descriptor_t rd_desc = {
4408 .arg.data = tss,
4409 .count = tss->len,
4410 };
4411
4412 return mptcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
4413 }
4414
4415 /**
4416 * mptcp_splice_read - splice data from MPTCP socket to a pipe
4417 * @sock: socket to splice from
4418 * @ppos: position (not valid)
4419 * @pipe: pipe to splice to
4420 * @len: number of bytes to splice
4421 * @flags: splice modifier flags
4422 *
4423 * Description:
4424 * Will read pages from given socket and fill them into a pipe.
4425 *
4426 * Return:
4427 * Amount of bytes that have been spliced.
4428 *
4429 **/
mptcp_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)4430 static ssize_t mptcp_splice_read(struct socket *sock, loff_t *ppos,
4431 struct pipe_inode_info *pipe, size_t len,
4432 unsigned int flags)
4433 {
4434 struct tcp_splice_state tss = {
4435 .pipe = pipe,
4436 .len = len,
4437 .flags = flags,
4438 };
4439 struct sock *sk = sock->sk;
4440 ssize_t spliced = 0;
4441 int ret = 0;
4442 long timeo;
4443
4444 /*
4445 * We can't seek on a socket input
4446 */
4447 if (unlikely(*ppos))
4448 return -ESPIPE;
4449
4450 lock_sock(sk);
4451
4452 mptcp_rps_record_subflows(mptcp_sk(sk));
4453
4454 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
4455 while (tss.len) {
4456 ret = __mptcp_splice_read(sk, &tss);
4457 if (ret < 0) {
4458 break;
4459 } else if (!ret) {
4460 if (spliced)
4461 break;
4462 if (sock_flag(sk, SOCK_DONE))
4463 break;
4464 if (sk->sk_err) {
4465 ret = sock_error(sk);
4466 break;
4467 }
4468 if (sk->sk_shutdown & RCV_SHUTDOWN)
4469 break;
4470 if (sk->sk_state == TCP_CLOSE) {
4471 /*
4472 * This occurs when user tries to read
4473 * from never connected socket.
4474 */
4475 ret = -ENOTCONN;
4476 break;
4477 }
4478 if (!timeo) {
4479 ret = -EAGAIN;
4480 break;
4481 }
4482 /* if __mptcp_splice_read() got nothing while we have
4483 * an skb in receive queue, we do not want to loop.
4484 * This might happen with URG data.
4485 */
4486 if (!skb_queue_empty(&sk->sk_receive_queue))
4487 break;
4488 ret = sk_wait_data(sk, &timeo, NULL);
4489 if (ret < 0)
4490 break;
4491 if (signal_pending(current)) {
4492 ret = sock_intr_errno(timeo);
4493 break;
4494 }
4495 continue;
4496 }
4497 tss.len -= ret;
4498 spliced += ret;
4499
4500 if (!tss.len || !timeo)
4501 break;
4502 release_sock(sk);
4503 lock_sock(sk);
4504
4505 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
4506 (sk->sk_shutdown & RCV_SHUTDOWN) ||
4507 signal_pending(current))
4508 break;
4509 }
4510
4511 release_sock(sk);
4512
4513 if (spliced)
4514 return spliced;
4515
4516 return ret;
4517 }
4518
4519 static const struct proto_ops mptcp_stream_ops = {
4520 .family = PF_INET,
4521 .owner = THIS_MODULE,
4522 .release = inet_release,
4523 .bind = mptcp_bind,
4524 .connect = inet_stream_connect,
4525 .socketpair = sock_no_socketpair,
4526 .accept = mptcp_stream_accept,
4527 .getname = inet_getname,
4528 .poll = mptcp_poll,
4529 .ioctl = inet_ioctl,
4530 .gettstamp = sock_gettstamp,
4531 .listen = mptcp_listen,
4532 .shutdown = inet_shutdown,
4533 .setsockopt = sock_common_setsockopt,
4534 .getsockopt = sock_common_getsockopt,
4535 .sendmsg = inet_sendmsg,
4536 .recvmsg = inet_recvmsg,
4537 .mmap = sock_no_mmap,
4538 .set_rcvlowat = mptcp_set_rcvlowat,
4539 .read_sock = mptcp_read_sock,
4540 .splice_read = mptcp_splice_read,
4541 };
4542
4543 static struct inet_protosw mptcp_protosw = {
4544 .type = SOCK_STREAM,
4545 .protocol = IPPROTO_MPTCP,
4546 .prot = &mptcp_prot,
4547 .ops = &mptcp_stream_ops,
4548 .flags = INET_PROTOSW_ICSK,
4549 };
4550
mptcp_napi_poll(struct napi_struct * napi,int budget)4551 static int mptcp_napi_poll(struct napi_struct *napi, int budget)
4552 {
4553 struct mptcp_delegated_action *delegated;
4554 struct mptcp_subflow_context *subflow;
4555 int work_done = 0;
4556
4557 delegated = container_of(napi, struct mptcp_delegated_action, napi);
4558 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
4559 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4560
4561 bh_lock_sock_nested(ssk);
4562 if (!sock_owned_by_user(ssk)) {
4563 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
4564 } else {
4565 /* tcp_release_cb_override already processed
4566 * the action or will do at next release_sock().
4567 * In both case must dequeue the subflow here - on the same
4568 * CPU that scheduled it.
4569 */
4570 smp_wmb();
4571 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
4572 }
4573 bh_unlock_sock(ssk);
4574 sock_put(ssk);
4575
4576 if (++work_done == budget)
4577 return budget;
4578 }
4579
4580 /* always provide a 0 'work_done' argument, so that napi_complete_done
4581 * will not try accessing the NULL napi->dev ptr
4582 */
4583 napi_complete_done(napi, 0);
4584 return work_done;
4585 }
4586
mptcp_proto_init(void)4587 void __init mptcp_proto_init(void)
4588 {
4589 struct mptcp_delegated_action *delegated;
4590 int cpu;
4591
4592 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
4593
4594 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
4595 panic("Failed to allocate MPTCP pcpu counter\n");
4596
4597 mptcp_napi_dev = alloc_netdev_dummy(0);
4598 if (!mptcp_napi_dev)
4599 panic("Failed to allocate MPTCP dummy netdev\n");
4600 for_each_possible_cpu(cpu) {
4601 delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
4602 INIT_LIST_HEAD(&delegated->head);
4603 netif_napi_add_tx(mptcp_napi_dev, &delegated->napi,
4604 mptcp_napi_poll);
4605 napi_enable(&delegated->napi);
4606 }
4607
4608 mptcp_subflow_init();
4609 mptcp_pm_init();
4610 mptcp_sched_init();
4611 mptcp_token_init();
4612
4613 if (proto_register(&mptcp_prot, 1) != 0)
4614 panic("Failed to register MPTCP proto.\n");
4615
4616 inet_register_protosw(&mptcp_protosw);
4617
4618 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
4619 }
4620
4621 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4622 static const struct proto_ops mptcp_v6_stream_ops = {
4623 .family = PF_INET6,
4624 .owner = THIS_MODULE,
4625 .release = inet6_release,
4626 .bind = mptcp_bind,
4627 .connect = inet_stream_connect,
4628 .socketpair = sock_no_socketpair,
4629 .accept = mptcp_stream_accept,
4630 .getname = inet6_getname,
4631 .poll = mptcp_poll,
4632 .ioctl = inet6_ioctl,
4633 .gettstamp = sock_gettstamp,
4634 .listen = mptcp_listen,
4635 .shutdown = inet_shutdown,
4636 .setsockopt = sock_common_setsockopt,
4637 .getsockopt = sock_common_getsockopt,
4638 .sendmsg = inet6_sendmsg,
4639 .recvmsg = inet6_recvmsg,
4640 .mmap = sock_no_mmap,
4641 #ifdef CONFIG_COMPAT
4642 .compat_ioctl = inet6_compat_ioctl,
4643 #endif
4644 .set_rcvlowat = mptcp_set_rcvlowat,
4645 .read_sock = mptcp_read_sock,
4646 .splice_read = mptcp_splice_read,
4647 };
4648
4649 static struct proto mptcp_v6_prot;
4650
4651 static struct inet_protosw mptcp_v6_protosw = {
4652 .type = SOCK_STREAM,
4653 .protocol = IPPROTO_MPTCP,
4654 .prot = &mptcp_v6_prot,
4655 .ops = &mptcp_v6_stream_ops,
4656 .flags = INET_PROTOSW_ICSK,
4657 };
4658
mptcp_proto_v6_init(void)4659 int __init mptcp_proto_v6_init(void)
4660 {
4661 int err;
4662
4663 mptcp_subflow_v6_init();
4664
4665 mptcp_v6_prot = mptcp_prot;
4666 strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name));
4667 mptcp_v6_prot.slab = NULL;
4668 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
4669 mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np);
4670
4671 err = proto_register(&mptcp_v6_prot, 1);
4672 if (err)
4673 return err;
4674
4675 err = inet6_register_protosw(&mptcp_v6_protosw);
4676 if (err)
4677 proto_unregister(&mptcp_v6_prot);
4678
4679 return err;
4680 }
4681 #endif
4682