1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
14 #include <net/aligned_data.h>
15 #include <net/rps.h>
16 #include <net/sock.h>
17 #include <net/inet_common.h>
18 #include <net/inet_hashtables.h>
19 #include <net/protocol.h>
20 #include <net/tcp_states.h>
21 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
22 #include <net/transp_v6.h>
23 #endif
24 #include <net/mptcp.h>
25 #include <net/hotdata.h>
26 #include <net/xfrm.h>
27 #include <asm/ioctls.h>
28 #include "protocol.h"
29 #include "mib.h"
30
31 static unsigned int mptcp_inq_hint(const struct sock *sk);
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/mptcp.h>
35
36 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
37 struct mptcp6_sock {
38 struct mptcp_sock msk;
39 struct ipv6_pinfo np;
40 };
41 #endif
42
43 enum {
44 MPTCP_CMSG_TS = BIT(0),
45 MPTCP_CMSG_INQ = BIT(1),
46 };
47
48 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
49
50 static void __mptcp_destroy_sock(struct sock *sk);
51 static void mptcp_check_send_data_fin(struct sock *sk);
52
53 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = {
54 .bh_lock = INIT_LOCAL_LOCK(bh_lock),
55 };
56 static struct net_device *mptcp_napi_dev;
57
58 /* Returns end sequence number of the receiver's advertised window */
mptcp_wnd_end(const struct mptcp_sock * msk)59 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
60 {
61 return READ_ONCE(msk->wnd_end);
62 }
63
mptcp_fallback_tcp_ops(const struct sock * sk)64 static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
65 {
66 unsigned short family = READ_ONCE(sk->sk_family);
67
68 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
69 if (family == AF_INET6)
70 return &inet6_stream_ops;
71 #endif
72 WARN_ON_ONCE(family != AF_INET);
73 return &inet_stream_ops;
74 }
75
__mptcp_try_fallback(struct mptcp_sock * msk,int fb_mib)76 bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
77 {
78 struct net *net = sock_net((struct sock *)msk);
79
80 if (__mptcp_check_fallback(msk))
81 return true;
82
83 /* The caller possibly is not holding the msk socket lock, but
84 * in the fallback case only the current subflow is touching
85 * the OoO queue.
86 */
87 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
88 return false;
89
90 spin_lock_bh(&msk->fallback_lock);
91 if (!msk->allow_infinite_fallback) {
92 spin_unlock_bh(&msk->fallback_lock);
93 return false;
94 }
95
96 msk->allow_subflows = false;
97 set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
98 __MPTCP_INC_STATS(net, fb_mib);
99 spin_unlock_bh(&msk->fallback_lock);
100 return true;
101 }
102
__mptcp_socket_create(struct mptcp_sock * msk)103 static int __mptcp_socket_create(struct mptcp_sock *msk)
104 {
105 struct mptcp_subflow_context *subflow;
106 struct sock *sk = (struct sock *)msk;
107 struct socket *ssock;
108 int err;
109
110 err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock);
111 if (err)
112 return err;
113
114 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio;
115 WRITE_ONCE(msk->first, ssock->sk);
116 subflow = mptcp_subflow_ctx(ssock->sk);
117 list_add(&subflow->node, &msk->conn_list);
118 sock_hold(ssock->sk);
119 subflow->request_mptcp = 1;
120 subflow->subflow_id = msk->subflow_id++;
121
122 /* This is the first subflow, always with id 0 */
123 WRITE_ONCE(subflow->local_id, 0);
124 mptcp_sock_graft(msk->first, sk->sk_socket);
125 iput(SOCK_INODE(ssock));
126
127 return 0;
128 }
129
130 /* If the MPC handshake is not started, returns the first subflow,
131 * eventually allocating it.
132 */
__mptcp_nmpc_sk(struct mptcp_sock * msk)133 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
134 {
135 struct sock *sk = (struct sock *)msk;
136 int ret;
137
138 if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
139 return ERR_PTR(-EINVAL);
140
141 if (!msk->first) {
142 ret = __mptcp_socket_create(msk);
143 if (ret)
144 return ERR_PTR(ret);
145 }
146
147 return msk->first;
148 }
149
mptcp_drop(struct sock * sk,struct sk_buff * skb)150 static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
151 {
152 sk_drops_skbadd(sk, skb);
153 __kfree_skb(skb);
154 }
155
__mptcp_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen,int * delta)156 static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
157 struct sk_buff *from, bool *fragstolen,
158 int *delta)
159 {
160 int limit = READ_ONCE(sk->sk_rcvbuf);
161
162 if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) ||
163 MPTCP_SKB_CB(from)->offset ||
164 ((to->len + from->len) > (limit >> 3)) ||
165 !skb_try_coalesce(to, from, fragstolen, delta))
166 return false;
167
168 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
169 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
170 to->len, MPTCP_SKB_CB(from)->end_seq);
171 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
172 return true;
173 }
174
mptcp_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from)175 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
176 struct sk_buff *from)
177 {
178 bool fragstolen;
179 int delta;
180
181 if (!__mptcp_try_coalesce(sk, to, from, &fragstolen, &delta))
182 return false;
183
184 /* note the fwd memory can reach a negative value after accounting
185 * for the delta, but the later skb free will restore a non
186 * negative one
187 */
188 atomic_add(delta, &sk->sk_rmem_alloc);
189 sk_mem_charge(sk, delta);
190 kfree_skb_partial(from, fragstolen);
191
192 return true;
193 }
194
mptcp_ooo_try_coalesce(struct mptcp_sock * msk,struct sk_buff * to,struct sk_buff * from)195 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
196 struct sk_buff *from)
197 {
198 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
199 return false;
200
201 return mptcp_try_coalesce((struct sock *)msk, to, from);
202 }
203
204 /* "inspired" by tcp_rcvbuf_grow(), main difference:
205 * - mptcp does not maintain a msk-level window clamp
206 * - returns true when the receive buffer is actually updated
207 */
mptcp_rcvbuf_grow(struct sock * sk,u32 newval)208 static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
209 {
210 struct mptcp_sock *msk = mptcp_sk(sk);
211 const struct net *net = sock_net(sk);
212 u32 rcvwin, rcvbuf, cap, oldval;
213 u64 grow;
214
215 oldval = msk->rcvq_space.space;
216 msk->rcvq_space.space = newval;
217 if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
218 (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
219 return false;
220
221 /* DRS is always one RTT late. */
222 rcvwin = newval << 1;
223
224 /* slow start: allow the sender to double its rate. */
225 grow = (u64)rcvwin * (newval - oldval);
226 do_div(grow, oldval);
227 rcvwin += grow << 1;
228
229 cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
230
231 rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap);
232 if (rcvbuf > sk->sk_rcvbuf) {
233 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
234 return true;
235 }
236 return false;
237 }
238
239 /* "inspired" by tcp_data_queue_ofo(), main differences:
240 * - use mptcp seqs
241 * - don't cope with sacks
242 */
mptcp_data_queue_ofo(struct mptcp_sock * msk,struct sk_buff * skb)243 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
244 {
245 struct sock *sk = (struct sock *)msk;
246 struct rb_node **p, *parent;
247 u64 seq, end_seq, max_seq;
248 struct sk_buff *skb1;
249
250 seq = MPTCP_SKB_CB(skb)->map_seq;
251 end_seq = MPTCP_SKB_CB(skb)->end_seq;
252 max_seq = atomic64_read(&msk->rcv_wnd_sent);
253
254 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
255 RB_EMPTY_ROOT(&msk->out_of_order_queue));
256 if (after64(end_seq, max_seq)) {
257 /* out of window */
258 mptcp_drop(sk, skb);
259 pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
260 (unsigned long long)end_seq - (unsigned long)max_seq,
261 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent));
262 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
263 return;
264 }
265
266 p = &msk->out_of_order_queue.rb_node;
267 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
268 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
269 rb_link_node(&skb->rbnode, NULL, p);
270 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
271 msk->ooo_last_skb = skb;
272 goto end;
273 }
274
275 /* with 2 subflows, adding at end of ooo queue is quite likely
276 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
277 */
278 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
279 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
280 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
281 return;
282 }
283
284 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
285 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
286 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
287 parent = &msk->ooo_last_skb->rbnode;
288 p = &parent->rb_right;
289 goto insert;
290 }
291
292 /* Find place to insert this segment. Handle overlaps on the way. */
293 parent = NULL;
294 while (*p) {
295 parent = *p;
296 skb1 = rb_to_skb(parent);
297 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
298 p = &parent->rb_left;
299 continue;
300 }
301 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
302 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
303 /* All the bits are present. Drop. */
304 mptcp_drop(sk, skb);
305 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
306 return;
307 }
308 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
309 /* partial overlap:
310 * | skb |
311 * | skb1 |
312 * continue traversing
313 */
314 } else {
315 /* skb's seq == skb1's seq and skb covers skb1.
316 * Replace skb1 with skb.
317 */
318 rb_replace_node(&skb1->rbnode, &skb->rbnode,
319 &msk->out_of_order_queue);
320 mptcp_drop(sk, skb1);
321 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
322 goto merge_right;
323 }
324 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
325 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
326 return;
327 }
328 p = &parent->rb_right;
329 }
330
331 insert:
332 /* Insert segment into RB tree. */
333 rb_link_node(&skb->rbnode, parent, p);
334 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
335
336 merge_right:
337 /* Remove other segments covered by skb. */
338 while ((skb1 = skb_rb_next(skb)) != NULL) {
339 if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
340 break;
341 rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
342 mptcp_drop(sk, skb1);
343 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
344 }
345 /* If there is no skb after us, we are the last_skb ! */
346 if (!skb1)
347 msk->ooo_last_skb = skb;
348
349 end:
350 skb_condense(skb);
351 skb_set_owner_r(skb, sk);
352 }
353
mptcp_init_skb(struct sock * ssk,struct sk_buff * skb,int offset,int copy_len)354 static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
355 int copy_len)
356 {
357 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
358 bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
359
360 /* the skb map_seq accounts for the skb offset:
361 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
362 * value
363 */
364 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
365 MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
366 MPTCP_SKB_CB(skb)->offset = offset;
367 MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
368 MPTCP_SKB_CB(skb)->cant_coalesce = 0;
369
370 __skb_unlink(skb, &ssk->sk_receive_queue);
371
372 skb_ext_reset(skb);
373 skb_dst_drop(skb);
374 }
375
__mptcp_move_skb(struct sock * sk,struct sk_buff * skb)376 static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
377 {
378 u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq;
379 struct mptcp_sock *msk = mptcp_sk(sk);
380 struct sk_buff *tail;
381
382 mptcp_borrow_fwdmem(sk, skb);
383
384 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
385 /* in sequence */
386 msk->bytes_received += copy_len;
387 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
388 tail = skb_peek_tail(&sk->sk_receive_queue);
389 if (tail && mptcp_try_coalesce(sk, tail, skb))
390 return true;
391
392 skb_set_owner_r(skb, sk);
393 __skb_queue_tail(&sk->sk_receive_queue, skb);
394 return true;
395 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
396 mptcp_data_queue_ofo(msk, skb);
397 return false;
398 }
399
400 /* old data, keep it simple and drop the whole pkt, sender
401 * will retransmit as needed, if needed.
402 */
403 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
404 mptcp_drop(sk, skb);
405 return false;
406 }
407
mptcp_stop_rtx_timer(struct sock * sk)408 static void mptcp_stop_rtx_timer(struct sock *sk)
409 {
410 sk_stop_timer(sk, &sk->mptcp_retransmit_timer);
411 mptcp_sk(sk)->timer_ival = 0;
412 }
413
mptcp_close_wake_up(struct sock * sk)414 static void mptcp_close_wake_up(struct sock *sk)
415 {
416 if (sock_flag(sk, SOCK_DEAD))
417 return;
418
419 sk->sk_state_change(sk);
420 if (sk->sk_shutdown == SHUTDOWN_MASK ||
421 sk->sk_state == TCP_CLOSE)
422 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
423 else
424 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
425 }
426
mptcp_shutdown_subflows(struct mptcp_sock * msk)427 static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
428 {
429 struct mptcp_subflow_context *subflow;
430
431 mptcp_for_each_subflow(msk, subflow) {
432 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
433 bool slow;
434
435 slow = lock_sock_fast(ssk);
436 tcp_shutdown(ssk, SEND_SHUTDOWN);
437 unlock_sock_fast(ssk, slow);
438 }
439 }
440
441 /* called under the msk socket lock */
mptcp_pending_data_fin_ack(struct sock * sk)442 static bool mptcp_pending_data_fin_ack(struct sock *sk)
443 {
444 struct mptcp_sock *msk = mptcp_sk(sk);
445
446 return ((1 << sk->sk_state) &
447 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
448 msk->write_seq == READ_ONCE(msk->snd_una);
449 }
450
mptcp_check_data_fin_ack(struct sock * sk)451 static void mptcp_check_data_fin_ack(struct sock *sk)
452 {
453 struct mptcp_sock *msk = mptcp_sk(sk);
454
455 /* Look for an acknowledged DATA_FIN */
456 if (mptcp_pending_data_fin_ack(sk)) {
457 WRITE_ONCE(msk->snd_data_fin_enable, 0);
458
459 switch (sk->sk_state) {
460 case TCP_FIN_WAIT1:
461 mptcp_set_state(sk, TCP_FIN_WAIT2);
462 break;
463 case TCP_CLOSING:
464 case TCP_LAST_ACK:
465 mptcp_shutdown_subflows(msk);
466 mptcp_set_state(sk, TCP_CLOSE);
467 break;
468 }
469
470 mptcp_close_wake_up(sk);
471 }
472 }
473
474 /* can be called with no lock acquired */
mptcp_pending_data_fin(struct sock * sk,u64 * seq)475 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
476 {
477 struct mptcp_sock *msk = mptcp_sk(sk);
478
479 if (READ_ONCE(msk->rcv_data_fin) &&
480 ((1 << inet_sk_state_load(sk)) &
481 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
482 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
483
484 if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
485 if (seq)
486 *seq = rcv_data_fin_seq;
487
488 return true;
489 }
490 }
491
492 return false;
493 }
494
mptcp_set_datafin_timeout(struct sock * sk)495 static void mptcp_set_datafin_timeout(struct sock *sk)
496 {
497 struct inet_connection_sock *icsk = inet_csk(sk);
498 u32 retransmits;
499
500 retransmits = min_t(u32, icsk->icsk_retransmits,
501 ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
502
503 mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
504 }
505
__mptcp_set_timeout(struct sock * sk,long tout)506 static void __mptcp_set_timeout(struct sock *sk, long tout)
507 {
508 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
509 }
510
mptcp_timeout_from_subflow(const struct mptcp_subflow_context * subflow)511 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
512 {
513 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
514
515 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
516 tcp_timeout_expires(ssk) - jiffies : 0;
517 }
518
mptcp_set_timeout(struct sock * sk)519 static void mptcp_set_timeout(struct sock *sk)
520 {
521 struct mptcp_subflow_context *subflow;
522 long tout = 0;
523
524 mptcp_for_each_subflow(mptcp_sk(sk), subflow)
525 tout = max(tout, mptcp_timeout_from_subflow(subflow));
526 __mptcp_set_timeout(sk, tout);
527 }
528
tcp_can_send_ack(const struct sock * ssk)529 static inline bool tcp_can_send_ack(const struct sock *ssk)
530 {
531 return !((1 << inet_sk_state_load(ssk)) &
532 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
533 }
534
__mptcp_subflow_send_ack(struct sock * ssk)535 void __mptcp_subflow_send_ack(struct sock *ssk)
536 {
537 if (tcp_can_send_ack(ssk))
538 tcp_send_ack(ssk);
539 }
540
mptcp_subflow_send_ack(struct sock * ssk)541 static void mptcp_subflow_send_ack(struct sock *ssk)
542 {
543 bool slow;
544
545 slow = lock_sock_fast(ssk);
546 __mptcp_subflow_send_ack(ssk);
547 unlock_sock_fast(ssk, slow);
548 }
549
mptcp_send_ack(struct mptcp_sock * msk)550 static void mptcp_send_ack(struct mptcp_sock *msk)
551 {
552 struct mptcp_subflow_context *subflow;
553
554 mptcp_for_each_subflow(msk, subflow)
555 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
556 }
557
mptcp_subflow_cleanup_rbuf(struct sock * ssk,int copied)558 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
559 {
560 bool slow;
561
562 slow = lock_sock_fast(ssk);
563 if (tcp_can_send_ack(ssk))
564 tcp_cleanup_rbuf(ssk, copied);
565 unlock_sock_fast(ssk, slow);
566 }
567
mptcp_subflow_could_cleanup(const struct sock * ssk,bool rx_empty)568 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
569 {
570 const struct inet_connection_sock *icsk = inet_csk(ssk);
571 u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
572 const struct tcp_sock *tp = tcp_sk(ssk);
573
574 return (ack_pending & ICSK_ACK_SCHED) &&
575 ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
576 READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
577 (rx_empty && ack_pending &
578 (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
579 }
580
mptcp_cleanup_rbuf(struct mptcp_sock * msk,int copied)581 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
582 {
583 int old_space = READ_ONCE(msk->old_wspace);
584 struct mptcp_subflow_context *subflow;
585 struct sock *sk = (struct sock *)msk;
586 int space = __mptcp_space(sk);
587 bool cleanup, rx_empty;
588
589 cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
590 rx_empty = !sk_rmem_alloc_get(sk) && copied;
591
592 mptcp_for_each_subflow(msk, subflow) {
593 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
594
595 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
596 mptcp_subflow_cleanup_rbuf(ssk, copied);
597 }
598 }
599
mptcp_check_data_fin(struct sock * sk)600 static void mptcp_check_data_fin(struct sock *sk)
601 {
602 struct mptcp_sock *msk = mptcp_sk(sk);
603 u64 rcv_data_fin_seq;
604
605 /* Need to ack a DATA_FIN received from a peer while this side
606 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
607 * msk->rcv_data_fin was set when parsing the incoming options
608 * at the subflow level and the msk lock was not held, so this
609 * is the first opportunity to act on the DATA_FIN and change
610 * the msk state.
611 *
612 * If we are caught up to the sequence number of the incoming
613 * DATA_FIN, send the DATA_ACK now and do state transition. If
614 * not caught up, do nothing and let the recv code send DATA_ACK
615 * when catching up.
616 */
617
618 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
619 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
620 WRITE_ONCE(msk->rcv_data_fin, 0);
621
622 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
623 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
624
625 switch (sk->sk_state) {
626 case TCP_ESTABLISHED:
627 mptcp_set_state(sk, TCP_CLOSE_WAIT);
628 break;
629 case TCP_FIN_WAIT1:
630 mptcp_set_state(sk, TCP_CLOSING);
631 break;
632 case TCP_FIN_WAIT2:
633 mptcp_shutdown_subflows(msk);
634 mptcp_set_state(sk, TCP_CLOSE);
635 break;
636 default:
637 /* Other states not expected */
638 WARN_ON_ONCE(1);
639 break;
640 }
641
642 if (!__mptcp_check_fallback(msk))
643 mptcp_send_ack(msk);
644 mptcp_close_wake_up(sk);
645 }
646 }
647
mptcp_dss_corruption(struct mptcp_sock * msk,struct sock * ssk)648 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
649 {
650 if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) {
651 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
652 mptcp_subflow_reset(ssk);
653 }
654 }
655
__mptcp_add_backlog(struct sock * sk,struct mptcp_subflow_context * subflow,struct sk_buff * skb)656 static void __mptcp_add_backlog(struct sock *sk,
657 struct mptcp_subflow_context *subflow,
658 struct sk_buff *skb)
659 {
660 struct mptcp_sock *msk = mptcp_sk(sk);
661 struct sk_buff *tail = NULL;
662 struct sock *ssk = skb->sk;
663 bool fragstolen;
664 int delta;
665
666 if (unlikely(sk->sk_state == TCP_CLOSE)) {
667 kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
668 return;
669 }
670
671 /* Try to coalesce with the last skb in our backlog */
672 if (!list_empty(&msk->backlog_list))
673 tail = list_last_entry(&msk->backlog_list, struct sk_buff, list);
674
675 if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq &&
676 ssk == tail->sk &&
677 __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) {
678 skb->truesize -= delta;
679 kfree_skb_partial(skb, fragstolen);
680 __mptcp_subflow_lend_fwdmem(subflow, delta);
681 goto account;
682 }
683
684 list_add_tail(&skb->list, &msk->backlog_list);
685 mptcp_subflow_lend_fwdmem(subflow, skb);
686 delta = skb->truesize;
687
688 account:
689 WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
690
691 /* Possibly not accept()ed yet, keep track of memory not CG
692 * accounted, mptcp_graft_subflows() will handle it.
693 */
694 if (!mem_cgroup_from_sk(ssk))
695 msk->backlog_unaccounted += delta;
696 }
697
__mptcp_move_skbs_from_subflow(struct mptcp_sock * msk,struct sock * ssk,bool own_msk)698 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
699 struct sock *ssk, bool own_msk)
700 {
701 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
702 struct sock *sk = (struct sock *)msk;
703 bool more_data_avail;
704 struct tcp_sock *tp;
705 bool ret = false;
706
707 pr_debug("msk=%p ssk=%p\n", msk, ssk);
708 tp = tcp_sk(ssk);
709 do {
710 u32 map_remaining, offset;
711 u32 seq = tp->copied_seq;
712 struct sk_buff *skb;
713 bool fin;
714
715 /* try to move as much data as available */
716 map_remaining = subflow->map_data_len -
717 mptcp_subflow_get_map_offset(subflow);
718
719 skb = skb_peek(&ssk->sk_receive_queue);
720 if (unlikely(!skb))
721 break;
722
723 if (__mptcp_check_fallback(msk)) {
724 /* Under fallback skbs have no MPTCP extension and TCP could
725 * collapse them between the dummy map creation and the
726 * current dequeue. Be sure to adjust the map size.
727 */
728 map_remaining = skb->len;
729 subflow->map_data_len = skb->len;
730 }
731
732 offset = seq - TCP_SKB_CB(skb)->seq;
733 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
734 if (fin)
735 seq++;
736
737 if (offset < skb->len) {
738 size_t len = skb->len - offset;
739
740 mptcp_init_skb(ssk, skb, offset, len);
741
742 if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) {
743 mptcp_subflow_lend_fwdmem(subflow, skb);
744 ret |= __mptcp_move_skb(sk, skb);
745 } else {
746 __mptcp_add_backlog(sk, subflow, skb);
747 }
748 seq += len;
749
750 if (unlikely(map_remaining < len)) {
751 DEBUG_NET_WARN_ON_ONCE(1);
752 mptcp_dss_corruption(msk, ssk);
753 }
754 } else {
755 if (unlikely(!fin)) {
756 DEBUG_NET_WARN_ON_ONCE(1);
757 mptcp_dss_corruption(msk, ssk);
758 }
759
760 sk_eat_skb(ssk, skb);
761 }
762
763 WRITE_ONCE(tp->copied_seq, seq);
764 more_data_avail = mptcp_subflow_data_available(ssk);
765
766 } while (more_data_avail);
767
768 if (ret)
769 msk->last_data_recv = tcp_jiffies32;
770 return ret;
771 }
772
__mptcp_ofo_queue(struct mptcp_sock * msk)773 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
774 {
775 struct sock *sk = (struct sock *)msk;
776 struct sk_buff *skb, *tail;
777 bool moved = false;
778 struct rb_node *p;
779 u64 end_seq;
780
781 p = rb_first(&msk->out_of_order_queue);
782 pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
783 while (p) {
784 skb = rb_to_skb(p);
785 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
786 break;
787
788 p = rb_next(p);
789 rb_erase(&skb->rbnode, &msk->out_of_order_queue);
790
791 if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
792 msk->ack_seq))) {
793 mptcp_drop(sk, skb);
794 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
795 continue;
796 }
797
798 end_seq = MPTCP_SKB_CB(skb)->end_seq;
799 tail = skb_peek_tail(&sk->sk_receive_queue);
800 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
801 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
802
803 /* skip overlapping data, if any */
804 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
805 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
806 delta);
807 MPTCP_SKB_CB(skb)->offset += delta;
808 MPTCP_SKB_CB(skb)->map_seq += delta;
809 __skb_queue_tail(&sk->sk_receive_queue, skb);
810 }
811 msk->bytes_received += end_seq - msk->ack_seq;
812 WRITE_ONCE(msk->ack_seq, end_seq);
813 moved = true;
814 }
815 return moved;
816 }
817
__mptcp_subflow_error_report(struct sock * sk,struct sock * ssk)818 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
819 {
820 int ssk_state;
821 int err;
822
823 /* only propagate errors on fallen-back sockets or
824 * on MPC connect
825 */
826 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk)))
827 return false;
828
829 err = sock_error(ssk);
830 if (!err)
831 return false;
832
833 /* We need to propagate only transition to CLOSE state.
834 * Orphaned socket will see such state change via
835 * subflow_sched_work_if_closed() and that path will properly
836 * destroy the msk as needed.
837 */
838 ssk_state = inet_sk_state_load(ssk);
839 if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
840 mptcp_set_state(sk, ssk_state);
841 WRITE_ONCE(sk->sk_err, -err);
842
843 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
844 smp_wmb();
845 sk_error_report(sk);
846 return true;
847 }
848
__mptcp_error_report(struct sock * sk)849 void __mptcp_error_report(struct sock *sk)
850 {
851 struct mptcp_subflow_context *subflow;
852 struct mptcp_sock *msk = mptcp_sk(sk);
853
854 mptcp_for_each_subflow(msk, subflow)
855 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
856 break;
857 }
858
859 /* In most cases we will be able to lock the mptcp socket. If its already
860 * owned, we need to defer to the work queue to avoid ABBA deadlock.
861 */
move_skbs_to_msk(struct mptcp_sock * msk,struct sock * ssk)862 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
863 {
864 struct sock *sk = (struct sock *)msk;
865 bool moved;
866
867 moved = __mptcp_move_skbs_from_subflow(msk, ssk, true);
868 __mptcp_ofo_queue(msk);
869 if (unlikely(ssk->sk_err))
870 __mptcp_subflow_error_report(sk, ssk);
871
872 /* If the moves have caught up with the DATA_FIN sequence number
873 * it's time to ack the DATA_FIN and change socket state, but
874 * this is not a good place to change state. Let the workqueue
875 * do it.
876 */
877 if (mptcp_pending_data_fin(sk, NULL))
878 mptcp_schedule_work(sk);
879 return moved;
880 }
881
mptcp_rcv_rtt_update(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow)882 static void mptcp_rcv_rtt_update(struct mptcp_sock *msk,
883 struct mptcp_subflow_context *subflow)
884 {
885 const struct tcp_sock *tp = tcp_sk(subflow->tcp_sock);
886 u32 rtt_us = tp->rcv_rtt_est.rtt_us;
887 int id;
888
889 /* Update once per subflow per rcvwnd to avoid touching the msk
890 * too often.
891 */
892 if (!rtt_us || tp->rcv_rtt_est.seq == subflow->prev_rtt_seq)
893 return;
894
895 subflow->prev_rtt_seq = tp->rcv_rtt_est.seq;
896
897 /* Pairs with READ_ONCE() in mptcp_rtt_us_est(). */
898 id = msk->rcv_rtt_est.next_sample;
899 WRITE_ONCE(msk->rcv_rtt_est.samples[id], rtt_us);
900 if (++msk->rcv_rtt_est.next_sample == MPTCP_RTT_SAMPLES)
901 msk->rcv_rtt_est.next_sample = 0;
902
903 /* EWMA among the incoming subflows */
904 msk->scaling_ratio = ((msk->scaling_ratio << 3) - msk->scaling_ratio +
905 tp->scaling_ratio) >> 3;
906 }
907
mptcp_data_ready(struct sock * sk,struct sock * ssk)908 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
909 {
910 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
911 struct mptcp_sock *msk = mptcp_sk(sk);
912
913 /* The peer can send data while we are shutting down this
914 * subflow at subflow destruction time, but we must avoid enqueuing
915 * more data to the msk receive queue
916 */
917 if (unlikely(subflow->closing))
918 return;
919
920 mptcp_data_lock(sk);
921 mptcp_rcv_rtt_update(msk, subflow);
922 if (!sock_owned_by_user(sk)) {
923 /* Wake-up the reader only for in-sequence data */
924 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
925 sk->sk_data_ready(sk);
926 } else {
927 __mptcp_move_skbs_from_subflow(msk, ssk, false);
928 }
929 mptcp_data_unlock(sk);
930 }
931
mptcp_subflow_joined(struct mptcp_sock * msk,struct sock * ssk)932 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
933 {
934 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
935 msk->allow_infinite_fallback = false;
936 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
937 }
938
__mptcp_finish_join(struct mptcp_sock * msk,struct sock * ssk)939 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
940 {
941 struct sock *sk = (struct sock *)msk;
942
943 if (sk->sk_state != TCP_ESTABLISHED)
944 return false;
945
946 spin_lock_bh(&msk->fallback_lock);
947 if (!msk->allow_subflows) {
948 spin_unlock_bh(&msk->fallback_lock);
949 return false;
950 }
951 mptcp_subflow_joined(msk, ssk);
952 spin_unlock_bh(&msk->fallback_lock);
953
954 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
955 mptcp_sockopt_sync_locked(msk, ssk);
956 mptcp_stop_tout_timer(sk);
957 __mptcp_propagate_sndbuf(sk, ssk);
958 return true;
959 }
960
__mptcp_flush_join_list(struct sock * sk,struct list_head * join_list)961 static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list)
962 {
963 struct mptcp_subflow_context *tmp, *subflow;
964 struct mptcp_sock *msk = mptcp_sk(sk);
965
966 list_for_each_entry_safe(subflow, tmp, join_list, node) {
967 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
968 bool slow = lock_sock_fast(ssk);
969
970 list_move_tail(&subflow->node, &msk->conn_list);
971 if (!__mptcp_finish_join(msk, ssk))
972 mptcp_subflow_reset(ssk);
973 unlock_sock_fast(ssk, slow);
974 }
975 }
976
mptcp_rtx_timer_pending(struct sock * sk)977 static bool mptcp_rtx_timer_pending(struct sock *sk)
978 {
979 return timer_pending(&sk->mptcp_retransmit_timer);
980 }
981
mptcp_reset_rtx_timer(struct sock * sk)982 static void mptcp_reset_rtx_timer(struct sock *sk)
983 {
984 unsigned long tout;
985
986 /* prevent rescheduling on close */
987 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
988 return;
989
990 tout = mptcp_sk(sk)->timer_ival;
991 sk_reset_timer(sk, &sk->mptcp_retransmit_timer, jiffies + tout);
992 }
993
mptcp_schedule_work(struct sock * sk)994 bool mptcp_schedule_work(struct sock *sk)
995 {
996 if (inet_sk_state_load(sk) == TCP_CLOSE)
997 return false;
998
999 /* Get a reference on this socket, mptcp_worker() will release it.
1000 * As mptcp_worker() might complete before us, we can not avoid
1001 * a sock_hold()/sock_put() if schedule_work() returns false.
1002 */
1003 sock_hold(sk);
1004
1005 if (schedule_work(&mptcp_sk(sk)->work))
1006 return true;
1007
1008 sock_put(sk);
1009 return false;
1010 }
1011
mptcp_skb_can_collapse_to(u64 write_seq,const struct sk_buff * skb,const struct mptcp_ext * mpext)1012 static bool mptcp_skb_can_collapse_to(u64 write_seq,
1013 const struct sk_buff *skb,
1014 const struct mptcp_ext *mpext)
1015 {
1016 if (!tcp_skb_can_collapse_to(skb))
1017 return false;
1018
1019 /* can collapse only if MPTCP level sequence is in order and this
1020 * mapping has not been xmitted yet
1021 */
1022 return mpext && mpext->data_seq + mpext->data_len == write_seq &&
1023 !mpext->frozen;
1024 }
1025
1026 /* we can append data to the given data frag if:
1027 * - there is space available in the backing page_frag
1028 * - the data frag tail matches the current page_frag free offset
1029 * - the data frag end sequence number matches the current write seq
1030 */
mptcp_frag_can_collapse_to(const struct mptcp_sock * msk,const struct page_frag * pfrag,const struct mptcp_data_frag * df)1031 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
1032 const struct page_frag *pfrag,
1033 const struct mptcp_data_frag *df)
1034 {
1035 return df && !df->eor &&
1036 pfrag->page == df->page &&
1037 pfrag->size - pfrag->offset > 0 &&
1038 pfrag->offset == (df->offset + df->data_len) &&
1039 df->data_seq + df->data_len == msk->write_seq;
1040 }
1041
dfrag_uncharge(struct sock * sk,int len)1042 static void dfrag_uncharge(struct sock *sk, int len)
1043 {
1044 sk_mem_uncharge(sk, len);
1045 sk_wmem_queued_add(sk, -len);
1046 }
1047
dfrag_clear(struct sock * sk,struct mptcp_data_frag * dfrag)1048 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
1049 {
1050 int len = dfrag->data_len + dfrag->overhead;
1051
1052 list_del(&dfrag->list);
1053 dfrag_uncharge(sk, len);
1054 put_page(dfrag->page);
1055 }
1056
1057 /* called under both the msk socket lock and the data lock */
__mptcp_clean_una(struct sock * sk)1058 static void __mptcp_clean_una(struct sock *sk)
1059 {
1060 struct mptcp_sock *msk = mptcp_sk(sk);
1061 struct mptcp_data_frag *dtmp, *dfrag;
1062 u64 snd_una;
1063
1064 snd_una = msk->snd_una;
1065 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1066 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1067 break;
1068
1069 if (unlikely(dfrag == msk->first_pending)) {
1070 /* in recovery mode can see ack after the current snd head */
1071 if (WARN_ON_ONCE(!msk->recovery))
1072 break;
1073
1074 msk->first_pending = mptcp_send_next(sk);
1075 }
1076
1077 dfrag_clear(sk, dfrag);
1078 }
1079
1080 dfrag = mptcp_rtx_head(sk);
1081 if (dfrag && after64(snd_una, dfrag->data_seq)) {
1082 u64 delta = snd_una - dfrag->data_seq;
1083
1084 /* prevent wrap around in recovery mode */
1085 if (unlikely(delta > dfrag->already_sent)) {
1086 if (WARN_ON_ONCE(!msk->recovery))
1087 goto out;
1088 if (WARN_ON_ONCE(delta > dfrag->data_len))
1089 goto out;
1090 dfrag->already_sent += delta - dfrag->already_sent;
1091 }
1092
1093 dfrag->data_seq += delta;
1094 dfrag->offset += delta;
1095 dfrag->data_len -= delta;
1096 dfrag->already_sent -= delta;
1097
1098 dfrag_uncharge(sk, delta);
1099 }
1100
1101 /* all retransmitted data acked, recovery completed */
1102 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1103 msk->recovery = false;
1104
1105 out:
1106 if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
1107 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1108 mptcp_stop_rtx_timer(sk);
1109 } else {
1110 mptcp_reset_rtx_timer(sk);
1111 }
1112
1113 if (mptcp_pending_data_fin_ack(sk))
1114 mptcp_schedule_work(sk);
1115 }
1116
__mptcp_clean_una_wakeup(struct sock * sk)1117 static void __mptcp_clean_una_wakeup(struct sock *sk)
1118 {
1119 lockdep_assert_held_once(&sk->sk_lock.slock);
1120
1121 __mptcp_clean_una(sk);
1122 mptcp_write_space(sk);
1123 }
1124
mptcp_clean_una_wakeup(struct sock * sk)1125 static void mptcp_clean_una_wakeup(struct sock *sk)
1126 {
1127 mptcp_data_lock(sk);
1128 __mptcp_clean_una_wakeup(sk);
1129 mptcp_data_unlock(sk);
1130 }
1131
mptcp_enter_memory_pressure(struct sock * sk)1132 static void mptcp_enter_memory_pressure(struct sock *sk)
1133 {
1134 struct mptcp_subflow_context *subflow;
1135 struct mptcp_sock *msk = mptcp_sk(sk);
1136 bool first = true;
1137
1138 mptcp_for_each_subflow(msk, subflow) {
1139 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1140
1141 if (first && !ssk->sk_bypass_prot_mem) {
1142 tcp_enter_memory_pressure(ssk);
1143 first = false;
1144 }
1145
1146 sk_stream_moderate_sndbuf(ssk);
1147 }
1148 __mptcp_sync_sndbuf(sk);
1149 }
1150
1151 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1152 * data
1153 */
mptcp_page_frag_refill(struct sock * sk,struct page_frag * pfrag)1154 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1155 {
1156 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1157 pfrag, sk->sk_allocation)))
1158 return true;
1159
1160 mptcp_enter_memory_pressure(sk);
1161 return false;
1162 }
1163
1164 static struct mptcp_data_frag *
mptcp_carve_data_frag(const struct mptcp_sock * msk,struct page_frag * pfrag,int orig_offset)1165 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1166 int orig_offset)
1167 {
1168 int offset = ALIGN(orig_offset, sizeof(long));
1169 struct mptcp_data_frag *dfrag;
1170
1171 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1172 dfrag->data_len = 0;
1173 dfrag->data_seq = msk->write_seq;
1174 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1175 dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1176 dfrag->already_sent = 0;
1177 dfrag->page = pfrag->page;
1178 dfrag->eor = 0;
1179
1180 return dfrag;
1181 }
1182
1183 struct mptcp_sendmsg_info {
1184 int mss_now;
1185 int size_goal;
1186 u16 limit;
1187 u16 sent;
1188 unsigned int flags;
1189 bool data_lock_held;
1190 };
1191
mptcp_check_allowed_size(const struct mptcp_sock * msk,struct sock * ssk,u64 data_seq,size_t avail_size)1192 static size_t mptcp_check_allowed_size(const struct mptcp_sock *msk,
1193 struct sock *ssk, u64 data_seq,
1194 size_t avail_size)
1195 {
1196 u64 window_end = mptcp_wnd_end(msk);
1197 u64 mptcp_snd_wnd;
1198
1199 if (__mptcp_check_fallback(msk))
1200 return avail_size;
1201
1202 mptcp_snd_wnd = window_end - data_seq;
1203 avail_size = min(mptcp_snd_wnd, avail_size);
1204
1205 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) {
1206 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd);
1207 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED);
1208 }
1209
1210 return avail_size;
1211 }
1212
__mptcp_add_ext(struct sk_buff * skb,gfp_t gfp)1213 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1214 {
1215 struct skb_ext *mpext = __skb_ext_alloc(gfp);
1216
1217 if (!mpext)
1218 return false;
1219 __skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1220 return true;
1221 }
1222
__mptcp_do_alloc_tx_skb(struct sock * sk,gfp_t gfp)1223 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1224 {
1225 struct sk_buff *skb;
1226
1227 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1228 if (likely(skb)) {
1229 if (likely(__mptcp_add_ext(skb, gfp))) {
1230 skb_reserve(skb, MAX_TCP_HEADER);
1231 skb->ip_summed = CHECKSUM_PARTIAL;
1232 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
1233 return skb;
1234 }
1235 __kfree_skb(skb);
1236 } else {
1237 mptcp_enter_memory_pressure(sk);
1238 }
1239 return NULL;
1240 }
1241
__mptcp_alloc_tx_skb(struct sock * sk,struct sock * ssk,gfp_t gfp)1242 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1243 {
1244 struct sk_buff *skb;
1245
1246 skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1247 if (!skb)
1248 return NULL;
1249
1250 if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1251 tcp_skb_entail(ssk, skb);
1252 return skb;
1253 }
1254 tcp_skb_tsorted_anchor_cleanup(skb);
1255 kfree_skb(skb);
1256 return NULL;
1257 }
1258
mptcp_alloc_tx_skb(struct sock * sk,struct sock * ssk,bool data_lock_held)1259 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
1260 {
1261 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
1262
1263 return __mptcp_alloc_tx_skb(sk, ssk, gfp);
1264 }
1265
1266 /* note: this always recompute the csum on the whole skb, even
1267 * if we just appended a single frag. More status info needed
1268 */
mptcp_update_data_checksum(struct sk_buff * skb,int added)1269 static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
1270 {
1271 struct mptcp_ext *mpext = mptcp_get_ext(skb);
1272 __wsum csum = ~csum_unfold(mpext->csum);
1273 int offset = skb->len - added;
1274
1275 mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
1276 }
1277
mptcp_update_infinite_map(struct mptcp_sock * msk,struct sock * ssk,struct mptcp_ext * mpext)1278 static void mptcp_update_infinite_map(struct mptcp_sock *msk,
1279 struct sock *ssk,
1280 struct mptcp_ext *mpext)
1281 {
1282 if (!mpext)
1283 return;
1284
1285 mpext->infinite_map = 1;
1286 mpext->data_len = 0;
1287
1288 if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) {
1289 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
1290 mptcp_subflow_reset(ssk);
1291 return;
1292 }
1293
1294 mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
1295 }
1296
1297 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
1298
mptcp_sendmsg_frag(struct sock * sk,struct sock * ssk,struct mptcp_data_frag * dfrag,struct mptcp_sendmsg_info * info)1299 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1300 struct mptcp_data_frag *dfrag,
1301 struct mptcp_sendmsg_info *info)
1302 {
1303 u64 data_seq = dfrag->data_seq + info->sent;
1304 int offset = dfrag->offset + info->sent;
1305 struct mptcp_sock *msk = mptcp_sk(sk);
1306 bool zero_window_probe = false;
1307 struct mptcp_ext *mpext = NULL;
1308 bool can_coalesce = false;
1309 bool reuse_skb = true;
1310 struct sk_buff *skb;
1311 size_t copy;
1312 int i;
1313
1314 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
1315 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1316
1317 if (WARN_ON_ONCE(info->sent > info->limit ||
1318 info->limit > dfrag->data_len))
1319 return 0;
1320
1321 if (unlikely(!__tcp_can_send(ssk)))
1322 return -EAGAIN;
1323
1324 /* compute send limit */
1325 if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
1326 ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
1327 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1328 copy = info->size_goal;
1329
1330 skb = tcp_write_queue_tail(ssk);
1331 if (skb && copy > skb->len) {
1332 /* Limit the write to the size available in the
1333 * current skb, if any, so that we create at most a new skb.
1334 * Explicitly tells TCP internals to avoid collapsing on later
1335 * queue management operation, to avoid breaking the ext <->
1336 * SSN association set here
1337 */
1338 mpext = mptcp_get_ext(skb);
1339 if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
1340 TCP_SKB_CB(skb)->eor = 1;
1341 tcp_mark_push(tcp_sk(ssk), skb);
1342 goto alloc_skb;
1343 }
1344
1345 i = skb_shinfo(skb)->nr_frags;
1346 can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
1347 if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
1348 tcp_mark_push(tcp_sk(ssk), skb);
1349 goto alloc_skb;
1350 }
1351
1352 copy -= skb->len;
1353 } else {
1354 alloc_skb:
1355 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
1356 if (!skb)
1357 return -ENOMEM;
1358
1359 i = skb_shinfo(skb)->nr_frags;
1360 reuse_skb = false;
1361 mpext = mptcp_get_ext(skb);
1362 }
1363
1364 /* Zero window and all data acked? Probe. */
1365 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy);
1366 if (copy == 0) {
1367 u64 snd_una = READ_ONCE(msk->snd_una);
1368
1369 /* No need for zero probe if there are any data pending
1370 * either at the msk or ssk level; skb is the current write
1371 * queue tail and can be empty at this point.
1372 */
1373 if (snd_una != msk->snd_nxt || skb->len ||
1374 skb != tcp_send_head(ssk)) {
1375 tcp_remove_empty_skb(ssk);
1376 return 0;
1377 }
1378
1379 zero_window_probe = true;
1380 data_seq = snd_una - 1;
1381 copy = 1;
1382 }
1383
1384 copy = min_t(size_t, copy, info->limit - info->sent);
1385 if (!sk_wmem_schedule(ssk, copy)) {
1386 tcp_remove_empty_skb(ssk);
1387 return -ENOMEM;
1388 }
1389
1390 if (can_coalesce) {
1391 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1392 } else {
1393 get_page(dfrag->page);
1394 skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
1395 }
1396
1397 skb->len += copy;
1398 skb->data_len += copy;
1399 skb->truesize += copy;
1400 sk_wmem_queued_add(ssk, copy);
1401 sk_mem_charge(ssk, copy);
1402 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
1403 TCP_SKB_CB(skb)->end_seq += copy;
1404 tcp_skb_pcount_set(skb, 0);
1405
1406 /* on skb reuse we just need to update the DSS len */
1407 if (reuse_skb) {
1408 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1409 mpext->data_len += copy;
1410 goto out;
1411 }
1412
1413 memset(mpext, 0, sizeof(*mpext));
1414 mpext->data_seq = data_seq;
1415 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1416 mpext->data_len = copy;
1417 mpext->use_map = 1;
1418 mpext->dsn64 = 1;
1419
1420 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
1421 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1422 mpext->dsn64);
1423
1424 if (zero_window_probe) {
1425 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
1426 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1427 mpext->frozen = 1;
1428 if (READ_ONCE(msk->csum_enabled))
1429 mptcp_update_data_checksum(skb, copy);
1430 tcp_push_pending_frames(ssk);
1431 return 0;
1432 }
1433 out:
1434 if (READ_ONCE(msk->csum_enabled))
1435 mptcp_update_data_checksum(skb, copy);
1436 if (mptcp_subflow_ctx(ssk)->send_infinite_map)
1437 mptcp_update_infinite_map(msk, ssk, mpext);
1438 trace_mptcp_sendmsg_frag(mpext);
1439 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1440
1441 /* if this is the last chunk of a dfrag with MSG_EOR set,
1442 * mark the skb to prevent coalescing with subsequent data.
1443 */
1444 if (dfrag->eor && info->sent + copy >= dfrag->data_len)
1445 TCP_SKB_CB(skb)->eor = 1;
1446
1447 return copy;
1448 }
1449
1450 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
1451 sizeof(struct tcphdr) - \
1452 MAX_TCP_OPTION_SPACE - \
1453 sizeof(struct ipv6hdr) - \
1454 sizeof(struct frag_hdr))
1455
1456 struct subflow_send_info {
1457 struct sock *ssk;
1458 u64 linger_time;
1459 };
1460
mptcp_subflow_set_active(struct mptcp_subflow_context * subflow)1461 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1462 {
1463 if (!subflow->stale)
1464 return;
1465
1466 subflow->stale = 0;
1467 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1468 }
1469
mptcp_subflow_active(struct mptcp_subflow_context * subflow)1470 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1471 {
1472 if (unlikely(subflow->stale)) {
1473 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1474
1475 if (subflow->stale_rcv_tstamp == rcv_tstamp)
1476 return false;
1477
1478 mptcp_subflow_set_active(subflow);
1479 }
1480 return __mptcp_subflow_active(subflow);
1481 }
1482
1483 #define SSK_MODE_ACTIVE 0
1484 #define SSK_MODE_BACKUP 1
1485 #define SSK_MODE_MAX 2
1486
1487 /* implement the mptcp packet scheduler;
1488 * returns the subflow that will transmit the next DSS
1489 * additionally updates the rtx timeout
1490 */
mptcp_subflow_get_send(struct mptcp_sock * msk)1491 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1492 {
1493 struct subflow_send_info send_info[SSK_MODE_MAX];
1494 struct mptcp_subflow_context *subflow;
1495 struct sock *sk = (struct sock *)msk;
1496 u32 pace, burst, wmem;
1497 int i, nr_active = 0;
1498 struct sock *ssk;
1499 u64 linger_time;
1500 long tout = 0;
1501
1502 /* pick the subflow with the lower wmem/wspace ratio */
1503 for (i = 0; i < SSK_MODE_MAX; ++i) {
1504 send_info[i].ssk = NULL;
1505 send_info[i].linger_time = -1;
1506 }
1507
1508 mptcp_for_each_subflow(msk, subflow) {
1509 bool backup = subflow->backup || subflow->request_bkup;
1510
1511 trace_mptcp_subflow_get_send(subflow);
1512 ssk = mptcp_subflow_tcp_sock(subflow);
1513 if (!mptcp_subflow_active(subflow))
1514 continue;
1515
1516 tout = max(tout, mptcp_timeout_from_subflow(subflow));
1517 nr_active += !backup;
1518 pace = subflow->avg_pacing_rate;
1519 if (unlikely(!pace)) {
1520 /* init pacing rate from socket */
1521 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1522 pace = subflow->avg_pacing_rate;
1523 if (!pace)
1524 continue;
1525 }
1526
1527 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
1528 if (linger_time < send_info[backup].linger_time) {
1529 send_info[backup].ssk = ssk;
1530 send_info[backup].linger_time = linger_time;
1531 }
1532 }
1533 __mptcp_set_timeout(sk, tout);
1534
1535 /* pick the best backup if no other subflow is active */
1536 if (!nr_active)
1537 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
1538
1539 /* According to the blest algorithm, to avoid HoL blocking for the
1540 * faster flow, we need to:
1541 * - estimate the faster flow linger time
1542 * - use the above to estimate the amount of byte transferred
1543 * by the faster flow
1544 * - check that the amount of queued data is greater than the above,
1545 * otherwise do not use the picked, slower, subflow
1546 * We select the subflow with the shorter estimated time to flush
1547 * the queued mem, which basically ensure the above. We just need
1548 * to check that subflow has a non empty cwin.
1549 */
1550 ssk = send_info[SSK_MODE_ACTIVE].ssk;
1551 if (!ssk || !sk_stream_memory_free(ssk))
1552 return NULL;
1553
1554 burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
1555 wmem = READ_ONCE(ssk->sk_wmem_queued);
1556 if (!burst)
1557 return ssk;
1558
1559 subflow = mptcp_subflow_ctx(ssk);
1560 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
1561 READ_ONCE(ssk->sk_pacing_rate) * burst,
1562 burst + wmem);
1563 msk->snd_burst = burst;
1564 return ssk;
1565 }
1566
mptcp_push_release(struct sock * ssk,struct mptcp_sendmsg_info * info)1567 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
1568 {
1569 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1570 release_sock(ssk);
1571 }
1572
mptcp_update_post_push(struct mptcp_sock * msk,struct mptcp_data_frag * dfrag,u32 sent)1573 static void mptcp_update_post_push(struct mptcp_sock *msk,
1574 struct mptcp_data_frag *dfrag,
1575 u32 sent)
1576 {
1577 u64 snd_nxt_new = dfrag->data_seq;
1578
1579 dfrag->already_sent += sent;
1580
1581 msk->snd_burst -= sent;
1582
1583 snd_nxt_new += dfrag->already_sent;
1584
1585 /* snd_nxt_new can be smaller than snd_nxt in case mptcp
1586 * is recovering after a failover. In that event, this re-sends
1587 * old segments.
1588 *
1589 * Thus compute snd_nxt_new candidate based on
1590 * the dfrag->data_seq that was sent and the data
1591 * that has been handed to the subflow for transmission
1592 * and skip update in case it was old dfrag.
1593 */
1594 if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
1595 msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
1596 WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
1597 }
1598 }
1599
mptcp_check_and_set_pending(struct sock * sk)1600 void mptcp_check_and_set_pending(struct sock *sk)
1601 {
1602 if (mptcp_send_head(sk)) {
1603 mptcp_data_lock(sk);
1604 mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING);
1605 mptcp_data_unlock(sk);
1606 }
1607 }
1608
__subflow_push_pending(struct sock * sk,struct sock * ssk,struct mptcp_sendmsg_info * info)1609 static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
1610 struct mptcp_sendmsg_info *info)
1611 {
1612 struct mptcp_sock *msk = mptcp_sk(sk);
1613 struct mptcp_data_frag *dfrag;
1614 int len, copied = 0, err = 0;
1615
1616 while ((dfrag = mptcp_send_head(sk))) {
1617 info->sent = dfrag->already_sent;
1618 info->limit = dfrag->data_len;
1619 len = dfrag->data_len - dfrag->already_sent;
1620 while (len > 0) {
1621 int ret = 0;
1622
1623 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info);
1624 if (ret <= 0) {
1625 err = copied ? : ret;
1626 goto out;
1627 }
1628
1629 info->sent += ret;
1630 copied += ret;
1631 len -= ret;
1632
1633 mptcp_update_post_push(msk, dfrag, ret);
1634 }
1635 msk->first_pending = mptcp_send_next(sk);
1636
1637 if (msk->snd_burst <= 0 ||
1638 !sk_stream_memory_free(ssk) ||
1639 !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
1640 err = copied;
1641 goto out;
1642 }
1643 mptcp_set_timeout(sk);
1644 }
1645 err = copied;
1646
1647 out:
1648 if (err > 0)
1649 msk->last_data_sent = tcp_jiffies32;
1650 return err;
1651 }
1652
__mptcp_push_pending(struct sock * sk,unsigned int flags)1653 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
1654 {
1655 struct sock *prev_ssk = NULL, *ssk = NULL;
1656 struct mptcp_sock *msk = mptcp_sk(sk);
1657 struct mptcp_sendmsg_info info = {
1658 .flags = flags,
1659 };
1660 bool copied = false;
1661 int push_count = 1;
1662
1663 while (mptcp_send_head(sk) && (push_count > 0)) {
1664 struct mptcp_subflow_context *subflow;
1665 int ret = 0;
1666
1667 if (mptcp_sched_get_send(msk))
1668 break;
1669
1670 push_count = 0;
1671
1672 mptcp_for_each_subflow(msk, subflow) {
1673 if (READ_ONCE(subflow->scheduled)) {
1674 mptcp_subflow_set_scheduled(subflow, false);
1675
1676 prev_ssk = ssk;
1677 ssk = mptcp_subflow_tcp_sock(subflow);
1678 if (ssk != prev_ssk) {
1679 /* First check. If the ssk has changed since
1680 * the last round, release prev_ssk
1681 */
1682 if (prev_ssk)
1683 mptcp_push_release(prev_ssk, &info);
1684
1685 /* Need to lock the new subflow only if different
1686 * from the previous one, otherwise we are still
1687 * helding the relevant lock
1688 */
1689 lock_sock(ssk);
1690 }
1691
1692 push_count++;
1693
1694 ret = __subflow_push_pending(sk, ssk, &info);
1695 if (ret <= 0) {
1696 if (ret != -EAGAIN ||
1697 (1 << ssk->sk_state) &
1698 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE))
1699 push_count--;
1700 continue;
1701 }
1702 copied = true;
1703 }
1704 }
1705 }
1706
1707 /* at this point we held the socket lock for the last subflow we used */
1708 if (ssk)
1709 mptcp_push_release(ssk, &info);
1710
1711 /* Avoid scheduling the rtx timer if no data has been pushed; the timer
1712 * will be updated on positive acks by __mptcp_cleanup_una().
1713 */
1714 if (copied) {
1715 if (!mptcp_rtx_timer_pending(sk))
1716 mptcp_reset_rtx_timer(sk);
1717 mptcp_check_send_data_fin(sk);
1718 }
1719 }
1720
__mptcp_subflow_push_pending(struct sock * sk,struct sock * ssk,bool first)1721 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
1722 {
1723 struct mptcp_sock *msk = mptcp_sk(sk);
1724 struct mptcp_sendmsg_info info = {
1725 .data_lock_held = true,
1726 };
1727 bool keep_pushing = true;
1728 struct sock *xmit_ssk;
1729 int copied = 0;
1730
1731 info.flags = 0;
1732 while (mptcp_send_head(sk) && keep_pushing) {
1733 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1734 int ret = 0;
1735
1736 /* check for a different subflow usage only after
1737 * spooling the first chunk of data
1738 */
1739 if (first) {
1740 mptcp_subflow_set_scheduled(subflow, false);
1741 ret = __subflow_push_pending(sk, ssk, &info);
1742 first = false;
1743 if (ret <= 0)
1744 break;
1745 copied += ret;
1746 continue;
1747 }
1748
1749 if (mptcp_sched_get_send(msk))
1750 goto out;
1751
1752 if (READ_ONCE(subflow->scheduled)) {
1753 mptcp_subflow_set_scheduled(subflow, false);
1754 ret = __subflow_push_pending(sk, ssk, &info);
1755 if (ret <= 0)
1756 keep_pushing = false;
1757 copied += ret;
1758 }
1759
1760 mptcp_for_each_subflow(msk, subflow) {
1761 if (READ_ONCE(subflow->scheduled)) {
1762 xmit_ssk = mptcp_subflow_tcp_sock(subflow);
1763 if (xmit_ssk != ssk) {
1764 mptcp_subflow_delegate(subflow,
1765 MPTCP_DELEGATE_SEND);
1766 keep_pushing = false;
1767 }
1768 }
1769 }
1770 }
1771
1772 out:
1773 /* __mptcp_alloc_tx_skb could have released some wmem and we are
1774 * not going to flush it via release_sock()
1775 */
1776 if (copied) {
1777 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1778 info.size_goal);
1779 if (!mptcp_rtx_timer_pending(sk))
1780 mptcp_reset_rtx_timer(sk);
1781
1782 if (msk->snd_data_fin_enable &&
1783 msk->snd_nxt + 1 == msk->write_seq)
1784 mptcp_schedule_work(sk);
1785 }
1786 }
1787
1788 static int mptcp_disconnect(struct sock *sk, int flags);
1789
mptcp_sendmsg_fastopen(struct sock * sk,struct msghdr * msg,size_t len,int * copied_syn)1790 static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1791 size_t len, int *copied_syn)
1792 {
1793 unsigned int saved_flags = msg->msg_flags;
1794 struct mptcp_sock *msk = mptcp_sk(sk);
1795 struct sock *ssk;
1796 int ret;
1797
1798 /* on flags based fastopen the mptcp is supposed to create the
1799 * first subflow right now. Otherwise we are in the defer_connect
1800 * path, and the first subflow must be already present.
1801 * Since the defer_connect flag is cleared after the first succsful
1802 * fastopen attempt, no need to check for additional subflow status.
1803 */
1804 if (msg->msg_flags & MSG_FASTOPEN) {
1805 ssk = __mptcp_nmpc_sk(msk);
1806 if (IS_ERR(ssk))
1807 return PTR_ERR(ssk);
1808 }
1809 if (!msk->first)
1810 return -EINVAL;
1811
1812 ssk = msk->first;
1813
1814 lock_sock(ssk);
1815 msg->msg_flags |= MSG_DONTWAIT;
1816 msk->fastopening = 1;
1817 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
1818 msk->fastopening = 0;
1819 msg->msg_flags = saved_flags;
1820 release_sock(ssk);
1821
1822 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1823 if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) {
1824 ret = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1825 msg->msg_namelen, msg->msg_flags, 1);
1826
1827 /* Keep the same behaviour of plain TCP: zero the copied bytes in
1828 * case of any error, except timeout or signal
1829 */
1830 if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
1831 *copied_syn = 0;
1832 } else if (ret && ret != -EINPROGRESS) {
1833 /* The disconnect() op called by tcp_sendmsg_fastopen()/
1834 * __inet_stream_connect() can fail, due to looking check,
1835 * see mptcp_disconnect().
1836 * Attempt it again outside the problematic scope.
1837 */
1838 if (!mptcp_disconnect(sk, 0)) {
1839 sk->sk_disconnects++;
1840 sk->sk_socket->state = SS_UNCONNECTED;
1841 }
1842 }
1843 inet_clear_bit(DEFER_CONNECT, sk);
1844
1845 return ret;
1846 }
1847
do_copy_data_nocache(struct sock * sk,int copy,struct iov_iter * from,char * to)1848 static int do_copy_data_nocache(struct sock *sk, int copy,
1849 struct iov_iter *from, char *to)
1850 {
1851 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1852 if (!copy_from_iter_full_nocache(to, copy, from))
1853 return -EFAULT;
1854 } else if (!copy_from_iter_full(to, copy, from)) {
1855 return -EFAULT;
1856 }
1857 return 0;
1858 }
1859
1860 /* open-code sk_stream_memory_free() plus sent limit computation to
1861 * avoid indirect calls in fast-path.
1862 * Called under the msk socket lock, so we can avoid a bunch of ONCE
1863 * annotations.
1864 */
mptcp_send_limit(const struct sock * sk)1865 static u32 mptcp_send_limit(const struct sock *sk)
1866 {
1867 const struct mptcp_sock *msk = mptcp_sk(sk);
1868 u32 limit, not_sent;
1869
1870 if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
1871 return 0;
1872
1873 limit = mptcp_notsent_lowat(sk);
1874 if (limit == UINT_MAX)
1875 return UINT_MAX;
1876
1877 not_sent = msk->write_seq - msk->snd_nxt;
1878 if (not_sent >= limit)
1879 return 0;
1880
1881 return limit - not_sent;
1882 }
1883
mptcp_rps_record_subflows(const struct mptcp_sock * msk)1884 static void mptcp_rps_record_subflows(const struct mptcp_sock *msk)
1885 {
1886 struct mptcp_subflow_context *subflow;
1887
1888 if (!rfs_is_needed())
1889 return;
1890
1891 mptcp_for_each_subflow(msk, subflow) {
1892 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1893
1894 sock_rps_record_flow(ssk);
1895 }
1896 }
1897
mptcp_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1898 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1899 {
1900 struct mptcp_sock *msk = mptcp_sk(sk);
1901 struct page_frag *pfrag;
1902 size_t copied = 0;
1903 int ret = 0;
1904 long timeo;
1905
1906 /* silently ignore everything else */
1907 msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1908 MSG_FASTOPEN | MSG_EOR;
1909
1910 lock_sock(sk);
1911
1912 mptcp_rps_record_subflows(msk);
1913
1914 if (unlikely(inet_test_bit(DEFER_CONNECT, sk) ||
1915 msg->msg_flags & MSG_FASTOPEN)) {
1916 int copied_syn = 0;
1917
1918 ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn);
1919 copied += copied_syn;
1920 if (ret == -EINPROGRESS && copied_syn > 0)
1921 goto out;
1922 else if (ret)
1923 goto do_error;
1924 }
1925
1926 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1927
1928 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1929 ret = sk_stream_wait_connect(sk, &timeo);
1930 if (ret)
1931 goto do_error;
1932 }
1933
1934 ret = -EPIPE;
1935 if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)))
1936 goto do_error;
1937
1938 pfrag = sk_page_frag(sk);
1939
1940 while (msg_data_left(msg)) {
1941 int total_ts, frag_truesize = 0;
1942 struct mptcp_data_frag *dfrag;
1943 bool dfrag_collapsed;
1944 size_t psize, offset;
1945 u32 copy_limit;
1946
1947 /* ensure fitting the notsent_lowat() constraint */
1948 copy_limit = mptcp_send_limit(sk);
1949 if (!copy_limit)
1950 goto wait_for_memory;
1951
1952 /* reuse tail pfrag, if possible, or carve a new one from the
1953 * page allocator
1954 */
1955 dfrag = mptcp_pending_tail(sk);
1956 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1957 if (!dfrag_collapsed) {
1958 if (!mptcp_page_frag_refill(sk, pfrag))
1959 goto wait_for_memory;
1960
1961 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1962 frag_truesize = dfrag->overhead;
1963 }
1964
1965 /* we do not bound vs wspace, to allow a single packet.
1966 * memory accounting will prevent execessive memory usage
1967 * anyway
1968 */
1969 offset = dfrag->offset + dfrag->data_len;
1970 psize = pfrag->size - offset;
1971 psize = min_t(size_t, psize, msg_data_left(msg));
1972 psize = min_t(size_t, psize, copy_limit);
1973 total_ts = psize + frag_truesize;
1974
1975 if (!sk_wmem_schedule(sk, total_ts))
1976 goto wait_for_memory;
1977
1978 ret = do_copy_data_nocache(sk, psize, &msg->msg_iter,
1979 page_address(dfrag->page) + offset);
1980 if (ret)
1981 goto do_error;
1982
1983 /* data successfully copied into the write queue */
1984 sk_forward_alloc_add(sk, -total_ts);
1985 copied += psize;
1986 dfrag->data_len += psize;
1987 frag_truesize += psize;
1988 pfrag->offset += frag_truesize;
1989 WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1990
1991 /* charge data on mptcp pending queue to the msk socket
1992 * Note: we charge such data both to sk and ssk
1993 */
1994 sk_wmem_queued_add(sk, frag_truesize);
1995 if (!dfrag_collapsed) {
1996 get_page(dfrag->page);
1997 list_add_tail(&dfrag->list, &msk->rtx_queue);
1998 if (!msk->first_pending)
1999 msk->first_pending = dfrag;
2000 }
2001 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
2002 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
2003 !dfrag_collapsed);
2004
2005 continue;
2006
2007 wait_for_memory:
2008 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2009 __mptcp_push_pending(sk, msg->msg_flags);
2010 ret = sk_stream_wait_memory(sk, &timeo);
2011 if (ret)
2012 goto do_error;
2013 }
2014
2015 if (copied) {
2016 /* mark the last dfrag with EOR if MSG_EOR was set */
2017 if (msg->msg_flags & MSG_EOR) {
2018 struct mptcp_data_frag *dfrag = mptcp_pending_tail(sk);
2019
2020 if (dfrag)
2021 dfrag->eor = 1;
2022 }
2023 __mptcp_push_pending(sk, msg->msg_flags);
2024 }
2025
2026 out:
2027 release_sock(sk);
2028 return copied;
2029
2030 do_error:
2031 if (copied)
2032 goto out;
2033
2034 copied = sk_stream_error(sk, msg->msg_flags, ret);
2035 goto out;
2036 }
2037
2038 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
2039
mptcp_eat_recv_skb(struct sock * sk,struct sk_buff * skb)2040 static void mptcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
2041 {
2042 /* avoid the indirect call, we know the destructor is sock_rfree */
2043 skb->destructor = NULL;
2044 skb->sk = NULL;
2045 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2046 sk_mem_uncharge(sk, skb->truesize);
2047 __skb_unlink(skb, &sk->sk_receive_queue);
2048 skb_attempt_defer_free(skb);
2049 }
2050
__mptcp_recvmsg_mskq(struct sock * sk,struct msghdr * msg,size_t len,int flags,int copied_total,struct scm_timestamping_internal * tss,int * cmsg_flags,struct sk_buff ** last)2051 static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
2052 size_t len, int flags, int copied_total,
2053 struct scm_timestamping_internal *tss,
2054 int *cmsg_flags, struct sk_buff **last)
2055 {
2056 struct mptcp_sock *msk = mptcp_sk(sk);
2057 struct sk_buff *skb, *tmp;
2058 int total_data_len = 0;
2059 int copied = 0;
2060
2061 skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
2062 u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
2063 u32 data_len = skb->len - offset;
2064 u32 count;
2065 int err;
2066
2067 if (flags & MSG_PEEK) {
2068 /* skip already peeked skbs */
2069 if (total_data_len + data_len <= copied_total) {
2070 total_data_len += data_len;
2071 *last = skb;
2072 continue;
2073 }
2074
2075 /* skip the already peeked data in the current skb */
2076 delta = copied_total - total_data_len;
2077 offset += delta;
2078 data_len -= delta;
2079 }
2080
2081 count = min_t(size_t, len - copied, data_len);
2082 if (!(flags & MSG_TRUNC)) {
2083 err = skb_copy_datagram_msg(skb, offset, msg, count);
2084 if (unlikely(err < 0)) {
2085 if (!copied)
2086 return err;
2087 break;
2088 }
2089 }
2090
2091 if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
2092 tcp_update_recv_tstamps(skb, tss);
2093 *cmsg_flags |= MPTCP_CMSG_TS;
2094 }
2095
2096 copied += count;
2097
2098 if (!(flags & MSG_PEEK)) {
2099 msk->bytes_consumed += count;
2100 if (count < data_len) {
2101 MPTCP_SKB_CB(skb)->offset += count;
2102 MPTCP_SKB_CB(skb)->map_seq += count;
2103 break;
2104 }
2105
2106 mptcp_eat_recv_skb(sk, skb);
2107 } else {
2108 *last = skb;
2109 }
2110
2111 if (copied >= len)
2112 break;
2113 }
2114
2115 mptcp_rcv_space_adjust(msk, copied);
2116 return copied;
2117 }
2118
mptcp_rcv_space_init(struct mptcp_sock * msk,const struct sock * ssk)2119 static void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2120 {
2121 const struct tcp_sock *tp = tcp_sk(ssk);
2122
2123 msk->rcvspace_init = 1;
2124 msk->rcvq_space.copied = 0;
2125
2126 /* initial rcv_space offering made to peer */
2127 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
2128 TCP_INIT_CWND * tp->advmss);
2129 if (msk->rcvq_space.space == 0)
2130 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
2131 }
2132
2133 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
2134 *
2135 * Only difference: Use lowest rtt estimate of the subflows in use, see
2136 * mptcp_rcv_rtt_update() and mptcp_rtt_us_est().
2137 */
mptcp_rcv_space_adjust(struct mptcp_sock * msk,int copied)2138 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
2139 {
2140 struct mptcp_subflow_context *subflow;
2141 struct sock *sk = (struct sock *)msk;
2142 u32 time, rtt_us;
2143 u64 mstamp;
2144
2145 msk_owned_by_me(msk);
2146
2147 if (copied <= 0)
2148 return;
2149
2150 if (!msk->rcvspace_init)
2151 mptcp_rcv_space_init(msk, msk->first);
2152
2153 msk->rcvq_space.copied += copied;
2154
2155 mstamp = mptcp_stamp();
2156 time = tcp_stamp_us_delta(mstamp, READ_ONCE(msk->rcvq_space.time));
2157
2158 rtt_us = mptcp_rtt_us_est(msk);
2159 if (rtt_us == U32_MAX || time < (rtt_us >> 3))
2160 return;
2161
2162 copied = msk->rcvq_space.copied;
2163 copied -= mptcp_inq_hint(sk);
2164 if (copied <= msk->rcvq_space.space)
2165 goto new_measure;
2166
2167 trace_mptcp_rcvbuf_grow(sk, time);
2168 if (mptcp_rcvbuf_grow(sk, copied)) {
2169 /* Make subflows follow along. If we do not do this, we
2170 * get drops at subflow level if skbs can't be moved to
2171 * the mptcp rx queue fast enough (announced rcv_win can
2172 * exceed ssk->sk_rcvbuf).
2173 */
2174 mptcp_for_each_subflow(msk, subflow) {
2175 struct sock *ssk;
2176 bool slow;
2177
2178 ssk = mptcp_subflow_tcp_sock(subflow);
2179 slow = lock_sock_fast(ssk);
2180 /* subflows can be added before tcp_init_transfer() */
2181 if (tcp_sk(ssk)->rcvq_space.space)
2182 tcp_rcvbuf_grow(ssk, copied);
2183 unlock_sock_fast(ssk, slow);
2184 }
2185 }
2186
2187 new_measure:
2188 msk->rcvq_space.copied = 0;
2189 msk->rcvq_space.time = mstamp;
2190 }
2191
__mptcp_move_skbs(struct sock * sk,struct list_head * skbs,u32 * delta)2192 static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta)
2193 {
2194 struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list);
2195 struct mptcp_sock *msk = mptcp_sk(sk);
2196 bool moved = false;
2197
2198 *delta = 0;
2199 while (1) {
2200 /* If the msk recvbuf is full stop, don't drop */
2201 if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2202 break;
2203
2204 prefetch(skb->next);
2205 list_del(&skb->list);
2206 *delta += skb->truesize;
2207
2208 moved |= __mptcp_move_skb(sk, skb);
2209 if (list_empty(skbs))
2210 break;
2211
2212 skb = list_first_entry(skbs, struct sk_buff, list);
2213 }
2214
2215 __mptcp_ofo_queue(msk);
2216 if (moved)
2217 mptcp_check_data_fin((struct sock *)msk);
2218 return moved;
2219 }
2220
mptcp_can_spool_backlog(struct sock * sk,struct list_head * skbs)2221 static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs)
2222 {
2223 struct mptcp_sock *msk = mptcp_sk(sk);
2224
2225 /* After CG initialization, subflows should never add skb before
2226 * gaining the CG themself.
2227 */
2228 DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket &&
2229 mem_cgroup_from_sk(sk));
2230
2231 /* Don't spool the backlog if the rcvbuf is full. */
2232 if (list_empty(&msk->backlog_list) ||
2233 sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
2234 return false;
2235
2236 INIT_LIST_HEAD(skbs);
2237 list_splice_init(&msk->backlog_list, skbs);
2238 return true;
2239 }
2240
mptcp_backlog_spooled(struct sock * sk,u32 moved,struct list_head * skbs)2241 static void mptcp_backlog_spooled(struct sock *sk, u32 moved,
2242 struct list_head *skbs)
2243 {
2244 struct mptcp_sock *msk = mptcp_sk(sk);
2245
2246 WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved);
2247 list_splice(skbs, &msk->backlog_list);
2248 }
2249
mptcp_move_skbs(struct sock * sk)2250 static bool mptcp_move_skbs(struct sock *sk)
2251 {
2252 struct list_head skbs;
2253 bool enqueued = false;
2254 u32 moved;
2255
2256 mptcp_data_lock(sk);
2257 while (mptcp_can_spool_backlog(sk, &skbs)) {
2258 mptcp_data_unlock(sk);
2259 enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
2260
2261 mptcp_data_lock(sk);
2262 mptcp_backlog_spooled(sk, moved, &skbs);
2263 }
2264 mptcp_data_unlock(sk);
2265 return enqueued;
2266 }
2267
mptcp_inq_hint(const struct sock * sk)2268 static unsigned int mptcp_inq_hint(const struct sock *sk)
2269 {
2270 const struct mptcp_sock *msk = mptcp_sk(sk);
2271 const struct sk_buff *skb;
2272
2273 skb = skb_peek(&sk->sk_receive_queue);
2274 if (skb) {
2275 u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
2276
2277 if (hint_val >= INT_MAX)
2278 return INT_MAX;
2279
2280 return (unsigned int)hint_val;
2281 }
2282
2283 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
2284 return 1;
2285
2286 return 0;
2287 }
2288
mptcp_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2289 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2290 int flags)
2291 {
2292 struct mptcp_sock *msk = mptcp_sk(sk);
2293 struct scm_timestamping_internal tss;
2294 int copied = 0, cmsg_flags = 0;
2295 int target;
2296 long timeo;
2297
2298 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2299 if (unlikely(flags & MSG_ERRQUEUE))
2300 return inet_recv_error(sk, msg, len);
2301
2302 lock_sock(sk);
2303 if (unlikely(sk->sk_state == TCP_LISTEN)) {
2304 copied = -ENOTCONN;
2305 goto out_err;
2306 }
2307
2308 mptcp_rps_record_subflows(msk);
2309
2310 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2311
2312 len = min_t(size_t, len, INT_MAX);
2313 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2314
2315 if (unlikely(msk->recvmsg_inq))
2316 cmsg_flags = MPTCP_CMSG_INQ;
2317
2318 while (copied < len) {
2319 struct sk_buff *last = NULL;
2320 int err, bytes_read;
2321
2322 bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
2323 copied, &tss, &cmsg_flags,
2324 &last);
2325 if (unlikely(bytes_read < 0)) {
2326 if (!copied)
2327 copied = bytes_read;
2328 goto out_err;
2329 }
2330
2331 copied += bytes_read;
2332
2333 if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk))
2334 continue;
2335
2336 /* only the MPTCP socket status is relevant here. The exit
2337 * conditions mirror closely tcp_recvmsg()
2338 */
2339 if (copied >= target)
2340 break;
2341
2342 if (copied) {
2343 if (tcp_recv_should_stop(sk) ||
2344 !timeo)
2345 break;
2346 } else {
2347 if (sk->sk_err) {
2348 copied = sock_error(sk);
2349 break;
2350 }
2351
2352 if (sk->sk_shutdown & RCV_SHUTDOWN)
2353 break;
2354
2355 if (sk->sk_state == TCP_CLOSE) {
2356 copied = -ENOTCONN;
2357 break;
2358 }
2359
2360 if (!timeo) {
2361 copied = -EAGAIN;
2362 break;
2363 }
2364
2365 if (signal_pending(current)) {
2366 copied = sock_intr_errno(timeo);
2367 break;
2368 }
2369 }
2370
2371 pr_debug("block timeout %ld\n", timeo);
2372 mptcp_cleanup_rbuf(msk, copied);
2373 err = sk_wait_data(sk, &timeo, last);
2374 if (err < 0) {
2375 err = copied ? : err;
2376 goto out_err;
2377 }
2378 }
2379
2380 mptcp_cleanup_rbuf(msk, copied);
2381
2382 out_err:
2383 if (cmsg_flags && copied >= 0) {
2384 if (cmsg_flags & MPTCP_CMSG_TS)
2385 tcp_recv_timestamp(msg, sk, &tss);
2386
2387 if (cmsg_flags & MPTCP_CMSG_INQ) {
2388 unsigned int inq = mptcp_inq_hint(sk);
2389
2390 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2391 }
2392 }
2393
2394 pr_debug("msk=%p rx queue empty=%d copied=%d\n",
2395 msk, skb_queue_empty(&sk->sk_receive_queue), copied);
2396
2397 release_sock(sk);
2398 return copied;
2399 }
2400
mptcp_retransmit_timer(struct timer_list * t)2401 static void mptcp_retransmit_timer(struct timer_list *t)
2402 {
2403 struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer);
2404 struct mptcp_sock *msk = mptcp_sk(sk);
2405
2406 bh_lock_sock(sk);
2407 if (!sock_owned_by_user(sk)) {
2408 /* we need a process context to retransmit */
2409 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2410 mptcp_schedule_work(sk);
2411 } else {
2412 /* delegate our work to tcp_release_cb() */
2413 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
2414 }
2415 bh_unlock_sock(sk);
2416 sock_put(sk);
2417 }
2418
mptcp_tout_timer(struct timer_list * t)2419 static void mptcp_tout_timer(struct timer_list *t)
2420 {
2421 struct inet_connection_sock *icsk =
2422 timer_container_of(icsk, t, mptcp_tout_timer);
2423 struct sock *sk = &icsk->icsk_inet.sk;
2424
2425 mptcp_schedule_work(sk);
2426 sock_put(sk);
2427 }
2428
2429 /* Find an idle subflow. Return NULL if there is unacked data at tcp
2430 * level.
2431 *
2432 * A backup subflow is returned only if that is the only kind available.
2433 */
mptcp_subflow_get_retrans(struct mptcp_sock * msk)2434 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2435 {
2436 struct sock *backup = NULL, *pick = NULL;
2437 struct mptcp_subflow_context *subflow;
2438 int min_stale_count = INT_MAX;
2439
2440 mptcp_for_each_subflow(msk, subflow) {
2441 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2442
2443 if (!__mptcp_subflow_active(subflow))
2444 continue;
2445
2446 /* still data outstanding at TCP level? skip this */
2447 if (!tcp_rtx_and_write_queues_empty(ssk)) {
2448 mptcp_pm_subflow_chk_stale(msk, ssk);
2449 min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2450 continue;
2451 }
2452
2453 if (subflow->backup || subflow->request_bkup) {
2454 if (!backup)
2455 backup = ssk;
2456 continue;
2457 }
2458
2459 if (!pick)
2460 pick = ssk;
2461 }
2462
2463 if (pick)
2464 return pick;
2465
2466 /* use backup only if there are no progresses anywhere */
2467 return min_stale_count > 1 ? backup : NULL;
2468 }
2469
__mptcp_retransmit_pending_data(struct sock * sk)2470 bool __mptcp_retransmit_pending_data(struct sock *sk)
2471 {
2472 struct mptcp_data_frag *cur, *rtx_head;
2473 struct mptcp_sock *msk = mptcp_sk(sk);
2474
2475 if (__mptcp_check_fallback(msk))
2476 return false;
2477
2478 /* the closing socket has some data untransmitted and/or unacked:
2479 * some data in the mptcp rtx queue has not really xmitted yet.
2480 * keep it simple and re-inject the whole mptcp level rtx queue
2481 */
2482 mptcp_data_lock(sk);
2483 __mptcp_clean_una_wakeup(sk);
2484 rtx_head = mptcp_rtx_head(sk);
2485 if (!rtx_head) {
2486 mptcp_data_unlock(sk);
2487 return false;
2488 }
2489
2490 msk->recovery_snd_nxt = msk->snd_nxt;
2491 msk->recovery = true;
2492 mptcp_data_unlock(sk);
2493
2494 msk->first_pending = rtx_head;
2495 msk->snd_burst = 0;
2496
2497 /* be sure to clear the "sent status" on all re-injected fragments */
2498 list_for_each_entry(cur, &msk->rtx_queue, list) {
2499 if (!cur->already_sent)
2500 break;
2501 cur->already_sent = 0;
2502 }
2503
2504 return true;
2505 }
2506
2507 /* flags for __mptcp_close_ssk() */
2508 #define MPTCP_CF_PUSH BIT(1)
2509
2510 /* be sure to send a reset only if the caller asked for it, also
2511 * clean completely the subflow status when the subflow reaches
2512 * TCP_CLOSE state
2513 */
__mptcp_subflow_disconnect(struct sock * ssk,struct mptcp_subflow_context * subflow,bool fastclosing)2514 static void __mptcp_subflow_disconnect(struct sock *ssk,
2515 struct mptcp_subflow_context *subflow,
2516 bool fastclosing)
2517 {
2518 if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
2519 fastclosing) {
2520 /* The MPTCP code never wait on the subflow sockets, TCP-level
2521 * disconnect should never fail
2522 */
2523 WARN_ON_ONCE(tcp_disconnect(ssk, 0));
2524 mptcp_subflow_ctx_reset(subflow);
2525 } else {
2526 tcp_shutdown(ssk, SEND_SHUTDOWN);
2527 }
2528 }
2529
2530 /* subflow sockets can be either outgoing (connect) or incoming
2531 * (accept).
2532 *
2533 * Outgoing subflows use in-kernel sockets.
2534 * Incoming subflows do not have their own 'struct socket' allocated,
2535 * so we need to use tcp_close() after detaching them from the mptcp
2536 * parent socket.
2537 */
__mptcp_close_ssk(struct sock * sk,struct sock * ssk,struct mptcp_subflow_context * subflow,unsigned int flags)2538 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2539 struct mptcp_subflow_context *subflow,
2540 unsigned int flags)
2541 {
2542 struct mptcp_sock *msk = mptcp_sk(sk);
2543 bool dispose_it, need_push = false;
2544 int fwd_remaining;
2545
2546 /* Do not pass RX data to the msk, even if the subflow socket is not
2547 * going to be freed (i.e. even for the first subflow on graceful
2548 * subflow close.
2549 */
2550 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2551 subflow->closing = 1;
2552
2553 /* Borrow the fwd allocated page left-over; fwd memory for the subflow
2554 * could be negative at this point, but will be reach zero soon - when
2555 * the data allocated using such fragment will be freed.
2556 */
2557 if (subflow->lent_mem_frag) {
2558 fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag;
2559 sk_forward_alloc_add(sk, fwd_remaining);
2560 sk_forward_alloc_add(ssk, -fwd_remaining);
2561 subflow->lent_mem_frag = 0;
2562 }
2563
2564 /* If the first subflow moved to a close state before accept, e.g. due
2565 * to an incoming reset or listener shutdown, the subflow socket is
2566 * already deleted by inet_child_forget() and the mptcp socket can't
2567 * survive too.
2568 */
2569 if (msk->in_accept_queue && msk->first == ssk &&
2570 (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
2571 /* ensure later check in mptcp_worker() will dispose the msk */
2572 sock_set_flag(sk, SOCK_DEAD);
2573 mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1));
2574 mptcp_subflow_drop_ctx(ssk);
2575 goto out_release;
2576 }
2577
2578 dispose_it = msk->free_first || ssk != msk->first;
2579 if (dispose_it)
2580 list_del(&subflow->node);
2581
2582 if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
2583 tcp_set_state(ssk, TCP_CLOSE);
2584
2585 need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
2586 if (!dispose_it) {
2587 __mptcp_subflow_disconnect(ssk, subflow, msk->fastclosing);
2588 release_sock(ssk);
2589
2590 goto out;
2591 }
2592
2593 subflow->disposable = 1;
2594
2595 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2596 * the ssk has been already destroyed, we just need to release the
2597 * reference owned by msk;
2598 */
2599 if (!inet_csk(ssk)->icsk_ulp_ops) {
2600 WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
2601 kfree_rcu(subflow, rcu);
2602 } else {
2603 /* otherwise tcp will dispose of the ssk and subflow ctx */
2604 __tcp_close(ssk, 0);
2605
2606 /* close acquired an extra ref */
2607 __sock_put(ssk);
2608 }
2609
2610 out_release:
2611 __mptcp_subflow_error_report(sk, ssk);
2612 release_sock(ssk);
2613
2614 sock_put(ssk);
2615
2616 if (ssk == msk->first)
2617 WRITE_ONCE(msk->first, NULL);
2618
2619 out:
2620 __mptcp_sync_sndbuf(sk);
2621 if (need_push)
2622 __mptcp_push_pending(sk, 0);
2623
2624 /* Catch every 'all subflows closed' scenario, including peers silently
2625 * closing them, e.g. due to timeout.
2626 * For established sockets, allow an additional timeout before closing,
2627 * as the protocol can still create more subflows.
2628 */
2629 if (list_is_singular(&msk->conn_list) && msk->first &&
2630 inet_sk_state_load(msk->first) == TCP_CLOSE) {
2631 if (sk->sk_state != TCP_ESTABLISHED ||
2632 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
2633 mptcp_set_state(sk, TCP_CLOSE);
2634 mptcp_close_wake_up(sk);
2635 } else {
2636 mptcp_start_tout_timer(sk);
2637 }
2638 }
2639 }
2640
mptcp_close_ssk(struct sock * sk,struct sock * ssk,struct mptcp_subflow_context * subflow)2641 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2642 struct mptcp_subflow_context *subflow)
2643 {
2644 struct mptcp_sock *msk = mptcp_sk(sk);
2645 struct sk_buff *skb;
2646
2647 /* The first subflow can already be closed or disconnected */
2648 if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0)
2649 return;
2650
2651 subflow->close_event_done = true;
2652
2653 if (sk->sk_state == TCP_ESTABLISHED)
2654 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2655
2656 /* Remove any reference from the backlog to this ssk; backlog skbs consume
2657 * space in the msk receive queue, no need to touch sk->sk_rmem_alloc
2658 */
2659 list_for_each_entry(skb, &msk->backlog_list, list) {
2660 if (skb->sk != ssk)
2661 continue;
2662
2663 atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc);
2664 skb->sk = NULL;
2665 }
2666
2667 /* subflow aborted before reaching the fully_established status
2668 * attempt the creation of the next subflow
2669 */
2670 mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow);
2671
2672 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2673 }
2674
mptcp_sync_mss(struct sock * sk,u32 pmtu)2675 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2676 {
2677 return 0;
2678 }
2679
__mptcp_close_subflow(struct sock * sk)2680 static void __mptcp_close_subflow(struct sock *sk)
2681 {
2682 struct mptcp_subflow_context *subflow, *tmp;
2683 struct mptcp_sock *msk = mptcp_sk(sk);
2684
2685 might_sleep();
2686
2687 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2688 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2689 int ssk_state = inet_sk_state_load(ssk);
2690
2691 if (ssk_state != TCP_CLOSE &&
2692 (ssk_state != TCP_CLOSE_WAIT ||
2693 inet_sk_state_load(sk) != TCP_ESTABLISHED ||
2694 __mptcp_check_fallback(msk)))
2695 continue;
2696
2697 /* 'subflow_data_ready' will re-sched once rx queue is empty */
2698 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2699 continue;
2700
2701 mptcp_close_ssk(sk, ssk, subflow);
2702 }
2703
2704 }
2705
mptcp_close_tout_expired(const struct sock * sk)2706 static bool mptcp_close_tout_expired(const struct sock *sk)
2707 {
2708 if (!inet_csk(sk)->icsk_mtup.probe_timestamp ||
2709 sk->sk_state == TCP_CLOSE)
2710 return false;
2711
2712 return time_after32(tcp_jiffies32,
2713 inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk));
2714 }
2715
mptcp_check_fastclose(struct mptcp_sock * msk)2716 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2717 {
2718 struct mptcp_subflow_context *subflow, *tmp;
2719 struct sock *sk = (struct sock *)msk;
2720
2721 if (likely(!READ_ONCE(msk->rcv_fastclose)))
2722 return;
2723
2724 mptcp_token_destroy(msk);
2725
2726 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2727 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2728 bool slow;
2729
2730 slow = lock_sock_fast(tcp_sk);
2731 if (tcp_sk->sk_state != TCP_CLOSE) {
2732 mptcp_send_active_reset_reason(tcp_sk);
2733 tcp_set_state(tcp_sk, TCP_CLOSE);
2734 }
2735 unlock_sock_fast(tcp_sk, slow);
2736 }
2737
2738 /* Mirror the tcp_reset() error propagation */
2739 switch (sk->sk_state) {
2740 case TCP_SYN_SENT:
2741 WRITE_ONCE(sk->sk_err, ECONNREFUSED);
2742 break;
2743 case TCP_CLOSE_WAIT:
2744 WRITE_ONCE(sk->sk_err, EPIPE);
2745 break;
2746 case TCP_CLOSE:
2747 return;
2748 default:
2749 WRITE_ONCE(sk->sk_err, ECONNRESET);
2750 }
2751
2752 mptcp_set_state(sk, TCP_CLOSE);
2753 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
2754 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2755 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2756
2757 /* the calling mptcp_worker will properly destroy the socket */
2758 if (sock_flag(sk, SOCK_DEAD))
2759 return;
2760
2761 sk->sk_state_change(sk);
2762 sk_error_report(sk);
2763 }
2764
__mptcp_retrans(struct sock * sk)2765 static void __mptcp_retrans(struct sock *sk)
2766 {
2767 struct mptcp_sendmsg_info info = { .data_lock_held = true, };
2768 struct mptcp_sock *msk = mptcp_sk(sk);
2769 struct mptcp_subflow_context *subflow;
2770 struct mptcp_data_frag *dfrag;
2771 struct sock *ssk;
2772 int ret, err;
2773 u16 len = 0;
2774
2775 mptcp_clean_una_wakeup(sk);
2776
2777 /* first check ssk: need to kick "stale" logic */
2778 err = mptcp_sched_get_retrans(msk);
2779 dfrag = mptcp_rtx_head(sk);
2780 if (!dfrag) {
2781 if (mptcp_data_fin_enabled(msk)) {
2782 struct inet_connection_sock *icsk = inet_csk(sk);
2783
2784 WRITE_ONCE(icsk->icsk_retransmits,
2785 icsk->icsk_retransmits + 1);
2786 mptcp_set_datafin_timeout(sk);
2787 mptcp_send_ack(msk);
2788
2789 goto reset_timer;
2790 }
2791
2792 if (!mptcp_send_head(sk))
2793 goto clear_scheduled;
2794
2795 goto reset_timer;
2796 }
2797
2798 if (err)
2799 goto reset_timer;
2800
2801 mptcp_for_each_subflow(msk, subflow) {
2802 if (READ_ONCE(subflow->scheduled)) {
2803 u16 copied = 0;
2804
2805 mptcp_subflow_set_scheduled(subflow, false);
2806
2807 ssk = mptcp_subflow_tcp_sock(subflow);
2808
2809 lock_sock(ssk);
2810
2811 /* limit retransmission to the bytes already sent on some subflows */
2812 info.sent = 0;
2813 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len :
2814 dfrag->already_sent;
2815
2816 /*
2817 * make the whole retrans decision, xmit, disallow
2818 * fallback atomic, note that we can't retrans even
2819 * when an infinite fallback is in progress, i.e. new
2820 * subflows are disallowed.
2821 */
2822 spin_lock_bh(&msk->fallback_lock);
2823 if (__mptcp_check_fallback(msk) ||
2824 !msk->allow_subflows) {
2825 spin_unlock_bh(&msk->fallback_lock);
2826 release_sock(ssk);
2827 goto clear_scheduled;
2828 }
2829
2830 while (info.sent < info.limit) {
2831 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2832 if (ret <= 0)
2833 break;
2834
2835 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2836 copied += ret;
2837 info.sent += ret;
2838 }
2839 if (copied) {
2840 len = max(copied, len);
2841 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2842 info.size_goal);
2843 msk->allow_infinite_fallback = false;
2844 }
2845 spin_unlock_bh(&msk->fallback_lock);
2846
2847 release_sock(ssk);
2848 }
2849 }
2850
2851 msk->bytes_retrans += len;
2852 dfrag->already_sent = max(dfrag->already_sent, len);
2853
2854 reset_timer:
2855 mptcp_check_and_set_pending(sk);
2856
2857 if (!mptcp_rtx_timer_pending(sk))
2858 mptcp_reset_rtx_timer(sk);
2859
2860 clear_scheduled:
2861 /* If no rtx data was available or in case of fallback, there
2862 * could be left-over scheduled subflows; clear them all
2863 * or later xmit could use bad ones
2864 */
2865 mptcp_for_each_subflow(msk, subflow)
2866 if (READ_ONCE(subflow->scheduled))
2867 mptcp_subflow_set_scheduled(subflow, false);
2868 }
2869
2870 /* schedule the timeout timer for the relevant event: either close timeout
2871 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
2872 */
mptcp_reset_tout_timer(struct mptcp_sock * msk,unsigned long fail_tout)2873 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
2874 {
2875 struct sock *sk = (struct sock *)msk;
2876 unsigned long timeout, close_timeout;
2877
2878 if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
2879 return;
2880
2881 close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp -
2882 tcp_jiffies32 + jiffies + mptcp_close_timeout(sk);
2883
2884 /* the close timeout takes precedence on the fail one, and here at least one of
2885 * them is active
2886 */
2887 timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
2888
2889 sk_reset_timer(sk, &inet_csk(sk)->mptcp_tout_timer, timeout);
2890 }
2891
mptcp_mp_fail_no_response(struct mptcp_sock * msk)2892 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
2893 {
2894 struct sock *ssk = msk->first;
2895 bool slow;
2896
2897 if (!ssk)
2898 return;
2899
2900 pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
2901
2902 slow = lock_sock_fast(ssk);
2903 mptcp_subflow_reset(ssk);
2904 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
2905 unlock_sock_fast(ssk, slow);
2906 }
2907
mptcp_backlog_purge(struct sock * sk)2908 static void mptcp_backlog_purge(struct sock *sk)
2909 {
2910 struct mptcp_sock *msk = mptcp_sk(sk);
2911 struct sk_buff *tmp, *skb;
2912 LIST_HEAD(backlog);
2913
2914 mptcp_data_lock(sk);
2915 list_splice_init(&msk->backlog_list, &backlog);
2916 msk->backlog_len = 0;
2917 mptcp_data_unlock(sk);
2918
2919 list_for_each_entry_safe(skb, tmp, &backlog, list) {
2920 mptcp_borrow_fwdmem(sk, skb);
2921 kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
2922 }
2923 sk_mem_reclaim(sk);
2924 }
2925
mptcp_do_fastclose(struct sock * sk)2926 static void mptcp_do_fastclose(struct sock *sk)
2927 {
2928 struct mptcp_subflow_context *subflow, *tmp;
2929 struct mptcp_sock *msk = mptcp_sk(sk);
2930
2931 mptcp_set_state(sk, TCP_CLOSE);
2932 mptcp_backlog_purge(sk);
2933 msk->fastclosing = 1;
2934
2935 /* Explicitly send the fastclose reset as need */
2936 if (__mptcp_check_fallback(msk))
2937 return;
2938
2939 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2940 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2941
2942 lock_sock(ssk);
2943
2944 /* Some subflow socket states don't allow/need a reset.*/
2945 if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
2946 goto unlock;
2947
2948 subflow->send_fastclose = 1;
2949
2950 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2951 * issue in __tcp_select_window(), see tcp_disconnect().
2952 */
2953 inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS;
2954
2955 tcp_send_active_reset(ssk, ssk->sk_allocation,
2956 SK_RST_REASON_TCP_ABORT_ON_CLOSE);
2957 unlock:
2958 release_sock(ssk);
2959 }
2960 }
2961
mptcp_worker(struct work_struct * work)2962 static void mptcp_worker(struct work_struct *work)
2963 {
2964 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2965 struct sock *sk = (struct sock *)msk;
2966 unsigned long fail_tout;
2967 int state;
2968
2969 lock_sock(sk);
2970 state = sk->sk_state;
2971 if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
2972 goto unlock;
2973
2974 mptcp_check_fastclose(msk);
2975
2976 mptcp_pm_worker(msk);
2977
2978 mptcp_check_send_data_fin(sk);
2979 mptcp_check_data_fin_ack(sk);
2980 mptcp_check_data_fin(sk);
2981
2982 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2983 __mptcp_close_subflow(sk);
2984
2985 if (mptcp_close_tout_expired(sk)) {
2986 struct mptcp_subflow_context *subflow, *tmp;
2987
2988 mptcp_do_fastclose(sk);
2989 mptcp_for_each_subflow_safe(msk, subflow, tmp)
2990 __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
2991 mptcp_close_wake_up(sk);
2992 }
2993
2994 if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) {
2995 __mptcp_destroy_sock(sk);
2996 goto unlock;
2997 }
2998
2999 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
3000 __mptcp_retrans(sk);
3001
3002 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
3003 if (fail_tout && time_after(jiffies, fail_tout))
3004 mptcp_mp_fail_no_response(msk);
3005
3006 unlock:
3007 release_sock(sk);
3008 sock_put(sk);
3009 }
3010
__mptcp_init_sock(struct sock * sk)3011 static void __mptcp_init_sock(struct sock *sk)
3012 {
3013 struct mptcp_sock *msk = mptcp_sk(sk);
3014
3015 INIT_LIST_HEAD(&msk->conn_list);
3016 INIT_LIST_HEAD(&msk->join_list);
3017 INIT_LIST_HEAD(&msk->rtx_queue);
3018 INIT_LIST_HEAD(&msk->backlog_list);
3019 INIT_WORK(&msk->work, mptcp_worker);
3020 msk->out_of_order_queue = RB_ROOT;
3021 msk->first_pending = NULL;
3022 msk->timer_ival = TCP_RTO_MIN;
3023 msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
3024 msk->backlog_len = 0;
3025 mptcp_init_rtt_est(msk);
3026
3027 WRITE_ONCE(msk->first, NULL);
3028 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
3029 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3030 msk->allow_infinite_fallback = true;
3031 msk->allow_subflows = true;
3032 msk->recovery = false;
3033 msk->subflow_id = 1;
3034 msk->last_data_sent = tcp_jiffies32;
3035 msk->last_data_recv = tcp_jiffies32;
3036 msk->last_ack_recv = tcp_jiffies32;
3037
3038 mptcp_pm_data_init(msk);
3039 spin_lock_init(&msk->fallback_lock);
3040
3041 /* re-use the csk retrans timer for MPTCP-level retrans */
3042 timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0);
3043 timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0);
3044 }
3045
mptcp_ca_reset(struct sock * sk)3046 static void mptcp_ca_reset(struct sock *sk)
3047 {
3048 struct inet_connection_sock *icsk = inet_csk(sk);
3049
3050 tcp_assign_congestion_control(sk);
3051 strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name,
3052 sizeof(mptcp_sk(sk)->ca_name));
3053
3054 /* no need to keep a reference to the ops, the name will suffice */
3055 tcp_cleanup_congestion_control(sk);
3056 icsk->icsk_ca_ops = NULL;
3057 }
3058
mptcp_init_sock(struct sock * sk)3059 static int mptcp_init_sock(struct sock *sk)
3060 {
3061 struct net *net = sock_net(sk);
3062 int ret;
3063
3064 __mptcp_init_sock(sk);
3065
3066 if (!mptcp_is_enabled(net))
3067 return -ENOPROTOOPT;
3068
3069 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
3070 return -ENOMEM;
3071
3072 rcu_read_lock();
3073 ret = mptcp_init_sched(mptcp_sk(sk),
3074 mptcp_sched_find(mptcp_get_scheduler(net)));
3075 rcu_read_unlock();
3076 if (ret)
3077 return ret;
3078
3079 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
3080
3081 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
3082 * propagate the correct value
3083 */
3084 mptcp_ca_reset(sk);
3085
3086 sk_sockets_allocated_inc(sk);
3087 sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]);
3088 sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]);
3089 sk->sk_write_space = sk_stream_write_space;
3090
3091 return 0;
3092 }
3093
__mptcp_clear_xmit(struct sock * sk)3094 static void __mptcp_clear_xmit(struct sock *sk)
3095 {
3096 struct mptcp_sock *msk = mptcp_sk(sk);
3097 struct mptcp_data_frag *dtmp, *dfrag;
3098
3099 msk->first_pending = NULL;
3100 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
3101 dfrag_clear(sk, dfrag);
3102 }
3103
mptcp_cancel_work(struct sock * sk)3104 void mptcp_cancel_work(struct sock *sk)
3105 {
3106 struct mptcp_sock *msk = mptcp_sk(sk);
3107
3108 if (cancel_work_sync(&msk->work))
3109 __sock_put(sk);
3110 }
3111
mptcp_subflow_shutdown(struct sock * sk,struct sock * ssk,int how)3112 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
3113 {
3114 lock_sock(ssk);
3115
3116 switch (ssk->sk_state) {
3117 case TCP_LISTEN:
3118 if (!(how & RCV_SHUTDOWN))
3119 break;
3120 fallthrough;
3121 case TCP_SYN_SENT:
3122 WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK));
3123 break;
3124 default:
3125 if (__mptcp_check_fallback(mptcp_sk(sk))) {
3126 pr_debug("Fallback\n");
3127 ssk->sk_shutdown |= how;
3128 tcp_shutdown(ssk, how);
3129
3130 /* simulate the data_fin ack reception to let the state
3131 * machine move forward
3132 */
3133 WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
3134 mptcp_schedule_work(sk);
3135 } else {
3136 pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
3137 tcp_send_ack(ssk);
3138 if (!mptcp_rtx_timer_pending(sk))
3139 mptcp_reset_rtx_timer(sk);
3140 }
3141 break;
3142 }
3143
3144 release_sock(ssk);
3145 }
3146
mptcp_set_state(struct sock * sk,int state)3147 void mptcp_set_state(struct sock *sk, int state)
3148 {
3149 int oldstate = sk->sk_state;
3150
3151 switch (state) {
3152 case TCP_ESTABLISHED:
3153 if (oldstate != TCP_ESTABLISHED)
3154 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3155 break;
3156 case TCP_CLOSE_WAIT:
3157 /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
3158 * MPTCP "accepted" sockets will be created later on. So no
3159 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
3160 */
3161 break;
3162 default:
3163 if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
3164 MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
3165 }
3166
3167 inet_sk_state_store(sk, state);
3168 }
3169
3170 static const unsigned char new_state[16] = {
3171 /* current state: new state: action: */
3172 [0 /* (Invalid) */] = TCP_CLOSE,
3173 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3174 [TCP_SYN_SENT] = TCP_CLOSE,
3175 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
3176 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
3177 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
3178 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */
3179 [TCP_CLOSE] = TCP_CLOSE,
3180 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
3181 [TCP_LAST_ACK] = TCP_LAST_ACK,
3182 [TCP_LISTEN] = TCP_CLOSE,
3183 [TCP_CLOSING] = TCP_CLOSING,
3184 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
3185 };
3186
mptcp_close_state(struct sock * sk)3187 static int mptcp_close_state(struct sock *sk)
3188 {
3189 int next = (int)new_state[sk->sk_state];
3190 int ns = next & TCP_STATE_MASK;
3191
3192 mptcp_set_state(sk, ns);
3193
3194 return next & TCP_ACTION_FIN;
3195 }
3196
mptcp_check_send_data_fin(struct sock * sk)3197 static void mptcp_check_send_data_fin(struct sock *sk)
3198 {
3199 struct mptcp_subflow_context *subflow;
3200 struct mptcp_sock *msk = mptcp_sk(sk);
3201
3202 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
3203 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
3204 msk->snd_nxt, msk->write_seq);
3205
3206 /* we still need to enqueue subflows or not really shutting down,
3207 * skip this
3208 */
3209 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
3210 mptcp_send_head(sk))
3211 return;
3212
3213 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3214
3215 mptcp_for_each_subflow(msk, subflow) {
3216 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
3217
3218 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
3219 }
3220 }
3221
__mptcp_wr_shutdown(struct sock * sk)3222 static void __mptcp_wr_shutdown(struct sock *sk)
3223 {
3224 struct mptcp_sock *msk = mptcp_sk(sk);
3225
3226 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
3227 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
3228 !!mptcp_send_head(sk));
3229
3230 /* will be ignored by fallback sockets */
3231 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
3232 WRITE_ONCE(msk->snd_data_fin_enable, 1);
3233
3234 mptcp_check_send_data_fin(sk);
3235 }
3236
__mptcp_destroy_sock(struct sock * sk)3237 static void __mptcp_destroy_sock(struct sock *sk)
3238 {
3239 struct mptcp_sock *msk = mptcp_sk(sk);
3240
3241 pr_debug("msk=%p\n", msk);
3242
3243 might_sleep();
3244
3245 mptcp_stop_rtx_timer(sk);
3246 sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer);
3247 msk->pm.status = 0;
3248 mptcp_release_sched(msk);
3249
3250 sk->sk_prot->destroy(sk);
3251
3252 sk_stream_kill_queues(sk);
3253 xfrm_sk_free_policy(sk);
3254
3255 sock_put(sk);
3256 }
3257
__mptcp_unaccepted_force_close(struct sock * sk)3258 void __mptcp_unaccepted_force_close(struct sock *sk)
3259 {
3260 sock_set_flag(sk, SOCK_DEAD);
3261 mptcp_do_fastclose(sk);
3262 __mptcp_destroy_sock(sk);
3263 }
3264
mptcp_check_readable(struct sock * sk)3265 static __poll_t mptcp_check_readable(struct sock *sk)
3266 {
3267 return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0;
3268 }
3269
mptcp_check_listen_stop(struct sock * sk)3270 static void mptcp_check_listen_stop(struct sock *sk)
3271 {
3272 struct sock *ssk;
3273
3274 if (inet_sk_state_load(sk) != TCP_LISTEN)
3275 return;
3276
3277 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3278 ssk = mptcp_sk(sk)->first;
3279 if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN))
3280 return;
3281
3282 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
3283 tcp_set_state(ssk, TCP_CLOSE);
3284 mptcp_subflow_queue_clean(sk, ssk);
3285 inet_csk_listen_stop(ssk);
3286 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
3287 release_sock(ssk);
3288 }
3289
__mptcp_close(struct sock * sk,long timeout)3290 bool __mptcp_close(struct sock *sk, long timeout)
3291 {
3292 struct mptcp_subflow_context *subflow;
3293 struct mptcp_sock *msk = mptcp_sk(sk);
3294 bool do_cancel_work = false;
3295 int subflows_alive = 0;
3296
3297 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
3298
3299 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
3300 mptcp_check_listen_stop(sk);
3301 mptcp_set_state(sk, TCP_CLOSE);
3302 goto cleanup;
3303 }
3304
3305 if (mptcp_data_avail(msk) || timeout < 0 ||
3306 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
3307 /* If the msk has read data, or the caller explicitly ask it,
3308 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
3309 */
3310 mptcp_do_fastclose(sk);
3311 timeout = 0;
3312 } else if (mptcp_close_state(sk)) {
3313 __mptcp_wr_shutdown(sk);
3314 }
3315
3316 sk_stream_wait_close(sk, timeout);
3317
3318 cleanup:
3319 /* orphan all the subflows */
3320 mptcp_for_each_subflow(msk, subflow) {
3321 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3322 bool slow = lock_sock_fast_nested(ssk);
3323
3324 subflows_alive += ssk->sk_state != TCP_CLOSE;
3325
3326 /* since the close timeout takes precedence on the fail one,
3327 * cancel the latter
3328 */
3329 if (ssk == msk->first)
3330 subflow->fail_tout = 0;
3331
3332 /* detach from the parent socket, but allow data_ready to
3333 * push incoming data into the mptcp stack, to properly ack it
3334 */
3335 ssk->sk_socket = NULL;
3336 ssk->sk_wq = NULL;
3337 unlock_sock_fast(ssk, slow);
3338 }
3339 sock_orphan(sk);
3340
3341 /* all the subflows are closed, only timeout can change the msk
3342 * state, let's not keep resources busy for no reasons
3343 */
3344 if (subflows_alive == 0)
3345 mptcp_set_state(sk, TCP_CLOSE);
3346
3347 sock_hold(sk);
3348 pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
3349 mptcp_pm_connection_closed(msk);
3350
3351 if (sk->sk_state == TCP_CLOSE) {
3352 __mptcp_destroy_sock(sk);
3353 do_cancel_work = true;
3354 } else {
3355 mptcp_start_tout_timer(sk);
3356 }
3357
3358 return do_cancel_work;
3359 }
3360
mptcp_close(struct sock * sk,long timeout)3361 static void mptcp_close(struct sock *sk, long timeout)
3362 {
3363 bool do_cancel_work;
3364
3365 lock_sock(sk);
3366
3367 do_cancel_work = __mptcp_close(sk, timeout);
3368 release_sock(sk);
3369 if (do_cancel_work)
3370 mptcp_cancel_work(sk);
3371
3372 sock_put(sk);
3373 }
3374
mptcp_copy_inaddrs(struct sock * msk,const struct sock * ssk)3375 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
3376 {
3377 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3378 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
3379 struct ipv6_pinfo *msk6 = inet6_sk(msk);
3380
3381 msk->sk_v6_daddr = ssk->sk_v6_daddr;
3382 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
3383
3384 if (msk6 && ssk6) {
3385 msk6->saddr = ssk6->saddr;
3386 msk6->flow_label = ssk6->flow_label;
3387 }
3388 #endif
3389
3390 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
3391 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
3392 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
3393 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
3394 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
3395 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
3396 }
3397
mptcp_destroy_common(struct mptcp_sock * msk)3398 static void mptcp_destroy_common(struct mptcp_sock *msk)
3399 {
3400 struct mptcp_subflow_context *subflow, *tmp;
3401 struct sock *sk = (struct sock *)msk;
3402
3403 __mptcp_clear_xmit(sk);
3404 mptcp_backlog_purge(sk);
3405
3406 /* join list will be eventually flushed (with rst) at sock lock release time */
3407 mptcp_for_each_subflow_safe(msk, subflow, tmp)
3408 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
3409
3410 __skb_queue_purge(&sk->sk_receive_queue);
3411 skb_rbtree_purge(&msk->out_of_order_queue);
3412
3413 /* move all the rx fwd alloc into the sk_mem_reclaim_final in
3414 * inet_sock_destruct() will dispose it
3415 */
3416 mptcp_token_destroy(msk);
3417 mptcp_pm_destroy(msk);
3418 }
3419
mptcp_disconnect(struct sock * sk,int flags)3420 static int mptcp_disconnect(struct sock *sk, int flags)
3421 {
3422 struct mptcp_sock *msk = mptcp_sk(sk);
3423
3424 /* We are on the fastopen error path. We can't call straight into the
3425 * subflows cleanup code due to lock nesting (we are already under
3426 * msk->firstsocket lock).
3427 */
3428 if (msk->fastopening)
3429 return -EBUSY;
3430
3431 mptcp_check_listen_stop(sk);
3432 mptcp_set_state(sk, TCP_CLOSE);
3433
3434 mptcp_stop_rtx_timer(sk);
3435 mptcp_stop_tout_timer(sk);
3436
3437 mptcp_pm_connection_closed(msk);
3438
3439 /* msk->subflow is still intact, the following will not free the first
3440 * subflow
3441 */
3442 mptcp_do_fastclose(sk);
3443 mptcp_destroy_common(msk);
3444
3445 /* The first subflow is already in TCP_CLOSE status, the following
3446 * can't overlap with a fallback anymore
3447 */
3448 spin_lock_bh(&msk->fallback_lock);
3449 msk->allow_subflows = true;
3450 msk->allow_infinite_fallback = true;
3451 WRITE_ONCE(msk->flags, 0);
3452 spin_unlock_bh(&msk->fallback_lock);
3453
3454 msk->cb_flags = 0;
3455 msk->recovery = false;
3456 WRITE_ONCE(msk->can_ack, false);
3457 WRITE_ONCE(msk->fully_established, false);
3458 WRITE_ONCE(msk->rcv_data_fin, false);
3459 WRITE_ONCE(msk->snd_data_fin_enable, false);
3460 WRITE_ONCE(msk->rcv_fastclose, false);
3461 WRITE_ONCE(msk->use_64bit_ack, false);
3462 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3463 mptcp_pm_data_reset(msk);
3464 mptcp_ca_reset(sk);
3465 msk->bytes_consumed = 0;
3466 msk->bytes_acked = 0;
3467 msk->bytes_received = 0;
3468 msk->bytes_sent = 0;
3469 msk->bytes_retrans = 0;
3470 msk->rcvspace_init = 0;
3471 msk->fastclosing = 0;
3472 mptcp_init_rtt_est(msk);
3473
3474 /* for fallback's sake */
3475 WRITE_ONCE(msk->ack_seq, 0);
3476
3477 WRITE_ONCE(sk->sk_shutdown, 0);
3478 sk_error_report(sk);
3479 return 0;
3480 }
3481
3482 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
mptcp_inet6_sk(const struct sock * sk)3483 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
3484 {
3485 struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk);
3486
3487 return &msk6->np;
3488 }
3489
mptcp_copy_ip6_options(struct sock * newsk,const struct sock * sk)3490 static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
3491 {
3492 const struct ipv6_pinfo *np = inet6_sk(sk);
3493 struct ipv6_txoptions *opt;
3494 struct ipv6_pinfo *newnp;
3495
3496 newnp = inet6_sk(newsk);
3497
3498 rcu_read_lock();
3499 opt = rcu_dereference(np->opt);
3500 if (opt) {
3501 opt = ipv6_dup_options(newsk, opt);
3502 if (!opt)
3503 net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
3504 }
3505 RCU_INIT_POINTER(newnp->opt, opt);
3506 rcu_read_unlock();
3507 }
3508 #endif
3509
mptcp_copy_ip_options(struct sock * newsk,const struct sock * sk)3510 static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
3511 {
3512 struct ip_options_rcu *inet_opt, *newopt = NULL;
3513 const struct inet_sock *inet = inet_sk(sk);
3514 struct inet_sock *newinet;
3515
3516 newinet = inet_sk(newsk);
3517
3518 rcu_read_lock();
3519 inet_opt = rcu_dereference(inet->inet_opt);
3520 if (inet_opt) {
3521 newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) +
3522 inet_opt->opt.optlen, GFP_ATOMIC);
3523 if (!newopt)
3524 net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
3525 }
3526 RCU_INIT_POINTER(newinet->inet_opt, newopt);
3527 rcu_read_unlock();
3528 }
3529
mptcp_sk_clone_init(const struct sock * sk,const struct mptcp_options_received * mp_opt,struct sock * ssk,struct request_sock * req)3530 struct sock *mptcp_sk_clone_init(const struct sock *sk,
3531 const struct mptcp_options_received *mp_opt,
3532 struct sock *ssk,
3533 struct request_sock *req)
3534 {
3535 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
3536 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
3537 struct mptcp_subflow_context *subflow;
3538 struct mptcp_sock *msk;
3539
3540 if (!nsk)
3541 return NULL;
3542
3543 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3544 if (nsk->sk_family == AF_INET6)
3545 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
3546 #endif
3547
3548 __mptcp_init_sock(nsk);
3549
3550 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3551 if (nsk->sk_family == AF_INET6)
3552 mptcp_copy_ip6_options(nsk, sk);
3553 else
3554 #endif
3555 mptcp_copy_ip_options(nsk, sk);
3556
3557 msk = mptcp_sk(nsk);
3558 WRITE_ONCE(msk->local_key, subflow_req->local_key);
3559 WRITE_ONCE(msk->token, subflow_req->token);
3560 msk->in_accept_queue = 1;
3561 WRITE_ONCE(msk->fully_established, false);
3562 if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
3563 WRITE_ONCE(msk->csum_enabled, true);
3564
3565 WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
3566 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3567 WRITE_ONCE(msk->snd_una, msk->write_seq);
3568 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
3569 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
3570 mptcp_init_sched(msk, mptcp_sk(sk)->sched);
3571
3572 /* passive msk is created after the first/MPC subflow */
3573 msk->subflow_id = 2;
3574
3575 sock_reset_flag(nsk, SOCK_RCU_FREE);
3576 security_inet_csk_clone(nsk, req);
3577
3578 /* this can't race with mptcp_close(), as the msk is
3579 * not yet exposted to user-space
3580 */
3581 mptcp_set_state(nsk, TCP_ESTABLISHED);
3582
3583 /* The msk maintain a ref to each subflow in the connections list */
3584 WRITE_ONCE(msk->first, ssk);
3585 subflow = mptcp_subflow_ctx(ssk);
3586 list_add(&subflow->node, &msk->conn_list);
3587 sock_hold(ssk);
3588
3589 /* new mpc subflow takes ownership of the newly
3590 * created mptcp socket
3591 */
3592 mptcp_token_accept(subflow_req, msk);
3593
3594 /* set msk addresses early to ensure mptcp_pm_get_local_id()
3595 * uses the correct data
3596 */
3597 mptcp_copy_inaddrs(nsk, ssk);
3598
3599 mptcp_rcv_space_init(msk, ssk);
3600 msk->rcvq_space.time = mptcp_stamp();
3601
3602 if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
3603 __mptcp_subflow_fully_established(msk, subflow, mp_opt);
3604 bh_unlock_sock(nsk);
3605
3606 /* note: the newly allocated socket refcount is 2 now */
3607 return nsk;
3608 }
3609
mptcp_destroy(struct sock * sk)3610 static void mptcp_destroy(struct sock *sk)
3611 {
3612 struct mptcp_sock *msk = mptcp_sk(sk);
3613
3614 /* allow the following to close even the initial subflow */
3615 msk->free_first = 1;
3616 mptcp_destroy_common(msk);
3617 sk_sockets_allocated_dec(sk);
3618 }
3619
__mptcp_data_acked(struct sock * sk)3620 void __mptcp_data_acked(struct sock *sk)
3621 {
3622 if (!sock_owned_by_user(sk))
3623 __mptcp_clean_una(sk);
3624 else
3625 __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
3626 }
3627
__mptcp_check_push(struct sock * sk,struct sock * ssk)3628 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3629 {
3630 if (!sock_owned_by_user(sk))
3631 __mptcp_subflow_push_pending(sk, ssk, false);
3632 else
3633 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3634 }
3635
3636 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3637 BIT(MPTCP_RETRANSMIT) | \
3638 BIT(MPTCP_FLUSH_JOIN_LIST))
3639
3640 /* processes deferred events and flush wmem */
mptcp_release_cb(struct sock * sk)3641 static void mptcp_release_cb(struct sock *sk)
3642 __must_hold(&sk->sk_lock.slock)
3643 {
3644 struct mptcp_sock *msk = mptcp_sk(sk);
3645
3646 for (;;) {
3647 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
3648 struct list_head join_list, skbs;
3649 bool spool_bl;
3650 u32 moved;
3651
3652 spool_bl = mptcp_can_spool_backlog(sk, &skbs);
3653 if (!flags && !spool_bl)
3654 break;
3655
3656 INIT_LIST_HEAD(&join_list);
3657 list_splice_init(&msk->join_list, &join_list);
3658
3659 /* the following actions acquire the subflow socket lock
3660 *
3661 * 1) can't be invoked in atomic scope
3662 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3663 * datapath acquires the msk socket spinlock while helding
3664 * the subflow socket lock
3665 */
3666 msk->cb_flags &= ~flags;
3667 spin_unlock_bh(&sk->sk_lock.slock);
3668
3669 if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
3670 __mptcp_flush_join_list(sk, &join_list);
3671 if (flags & BIT(MPTCP_PUSH_PENDING))
3672 __mptcp_push_pending(sk, 0);
3673 if (flags & BIT(MPTCP_RETRANSMIT))
3674 __mptcp_retrans(sk);
3675 if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) {
3676 /* notify ack seq update */
3677 mptcp_cleanup_rbuf(msk, 0);
3678 sk->sk_data_ready(sk);
3679 }
3680
3681 cond_resched();
3682 spin_lock_bh(&sk->sk_lock.slock);
3683 if (spool_bl)
3684 mptcp_backlog_spooled(sk, moved, &skbs);
3685 }
3686
3687 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
3688 __mptcp_clean_una_wakeup(sk);
3689 if (unlikely(msk->cb_flags)) {
3690 /* be sure to sync the msk state before taking actions
3691 * depending on sk_state (MPTCP_ERROR_REPORT)
3692 * On sk release avoid actions depending on the first subflow
3693 */
3694 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first)
3695 __mptcp_sync_state(sk, msk->pending_state);
3696 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
3697 __mptcp_error_report(sk);
3698 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
3699 __mptcp_sync_sndbuf(sk);
3700 }
3701 }
3702
3703 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3704 * TCP can't schedule delack timer before the subflow is fully established.
3705 * MPTCP uses the delack timer to do 3rd ack retransmissions
3706 */
schedule_3rdack_retransmission(struct sock * ssk)3707 static void schedule_3rdack_retransmission(struct sock *ssk)
3708 {
3709 struct inet_connection_sock *icsk = inet_csk(ssk);
3710 struct tcp_sock *tp = tcp_sk(ssk);
3711 unsigned long timeout;
3712
3713 if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established))
3714 return;
3715
3716 /* reschedule with a timeout above RTT, as we must look only for drop */
3717 if (tp->srtt_us)
3718 timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
3719 else
3720 timeout = TCP_TIMEOUT_INIT;
3721 timeout += jiffies;
3722
3723 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
3724 smp_store_release(&icsk->icsk_ack.pending,
3725 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
3726 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
3727 }
3728
mptcp_subflow_process_delegated(struct sock * ssk,long status)3729 void mptcp_subflow_process_delegated(struct sock *ssk, long status)
3730 {
3731 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3732 struct sock *sk = subflow->conn;
3733
3734 if (status & BIT(MPTCP_DELEGATE_SEND)) {
3735 mptcp_data_lock(sk);
3736 if (!sock_owned_by_user(sk))
3737 __mptcp_subflow_push_pending(sk, ssk, true);
3738 else
3739 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3740 mptcp_data_unlock(sk);
3741 }
3742 if (status & BIT(MPTCP_DELEGATE_SNDBUF)) {
3743 mptcp_data_lock(sk);
3744 if (!sock_owned_by_user(sk))
3745 __mptcp_sync_sndbuf(sk);
3746 else
3747 __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags);
3748 mptcp_data_unlock(sk);
3749 }
3750 if (status & BIT(MPTCP_DELEGATE_ACK))
3751 schedule_3rdack_retransmission(ssk);
3752 }
3753
mptcp_hash(struct sock * sk)3754 static int mptcp_hash(struct sock *sk)
3755 {
3756 /* should never be called,
3757 * we hash the TCP subflows not the MPTCP socket
3758 */
3759 WARN_ON_ONCE(1);
3760 return 0;
3761 }
3762
mptcp_unhash(struct sock * sk)3763 static void mptcp_unhash(struct sock *sk)
3764 {
3765 /* called from sk_common_release(), but nothing to do here */
3766 }
3767
mptcp_get_port(struct sock * sk,unsigned short snum)3768 static int mptcp_get_port(struct sock *sk, unsigned short snum)
3769 {
3770 struct mptcp_sock *msk = mptcp_sk(sk);
3771
3772 pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
3773 if (WARN_ON_ONCE(!msk->first))
3774 return -EINVAL;
3775
3776 return inet_csk_get_port(msk->first, snum);
3777 }
3778
mptcp_finish_connect(struct sock * ssk)3779 void mptcp_finish_connect(struct sock *ssk)
3780 {
3781 struct mptcp_subflow_context *subflow;
3782 struct mptcp_sock *msk;
3783 struct sock *sk;
3784
3785 subflow = mptcp_subflow_ctx(ssk);
3786 sk = subflow->conn;
3787 msk = mptcp_sk(sk);
3788
3789 pr_debug("msk=%p, token=%u\n", sk, subflow->token);
3790
3791 subflow->map_seq = subflow->iasn;
3792 subflow->map_subflow_seq = 1;
3793
3794 /* the socket is not connected yet, no msk/subflow ops can access/race
3795 * accessing the field below
3796 */
3797 WRITE_ONCE(msk->local_key, subflow->local_key);
3798 WRITE_ONCE(msk->rcvq_space.time, mptcp_stamp());
3799
3800 mptcp_pm_new_connection(msk, ssk, 0);
3801 }
3802
mptcp_sock_graft(struct sock * sk,struct socket * parent)3803 void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3804 {
3805 write_lock_bh(&sk->sk_callback_lock);
3806 rcu_assign_pointer(sk->sk_wq, &parent->wq);
3807 sk_set_socket(sk, parent);
3808 write_unlock_bh(&sk->sk_callback_lock);
3809 }
3810
3811 /* Can be called without holding the msk socket lock; use the callback lock
3812 * to avoid {READ_,WRITE_}ONCE annotations on sk_socket.
3813 */
mptcp_sock_check_graft(struct sock * sk,struct sock * ssk)3814 static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk)
3815 {
3816 struct socket *sock;
3817
3818 write_lock_bh(&sk->sk_callback_lock);
3819 sock = sk->sk_socket;
3820 write_unlock_bh(&sk->sk_callback_lock);
3821 if (sock) {
3822 mptcp_sock_graft(ssk, sock);
3823 __mptcp_inherit_cgrp_data(sk, ssk);
3824 __mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC);
3825 }
3826 }
3827
mptcp_finish_join(struct sock * ssk)3828 bool mptcp_finish_join(struct sock *ssk)
3829 {
3830 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3831 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3832 struct sock *parent = (void *)msk;
3833 bool ret = true;
3834
3835 pr_debug("msk=%p, subflow=%p\n", msk, subflow);
3836
3837 /* mptcp socket already closing? */
3838 if (!mptcp_is_fully_established(parent)) {
3839 subflow->reset_reason = MPTCP_RST_EMPTCP;
3840 return false;
3841 }
3842
3843 /* Active subflow, already present inside the conn_list; is grafted
3844 * either by __mptcp_subflow_connect() or accept.
3845 */
3846 if (!list_empty(&subflow->node)) {
3847 spin_lock_bh(&msk->fallback_lock);
3848 if (!msk->allow_subflows) {
3849 spin_unlock_bh(&msk->fallback_lock);
3850 return false;
3851 }
3852 mptcp_subflow_joined(msk, ssk);
3853 spin_unlock_bh(&msk->fallback_lock);
3854 mptcp_propagate_sndbuf(parent, ssk);
3855 return true;
3856 }
3857
3858 if (!mptcp_pm_allow_new_subflow(msk)) {
3859 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_JOINREJECTED);
3860 goto err_prohibited;
3861 }
3862
3863 /* If we can't acquire msk socket lock here, let the release callback
3864 * handle it
3865 */
3866 mptcp_data_lock(parent);
3867 if (!sock_owned_by_user(parent)) {
3868 ret = __mptcp_finish_join(msk, ssk);
3869 if (ret) {
3870 sock_hold(ssk);
3871 list_add_tail(&subflow->node, &msk->conn_list);
3872 mptcp_sock_check_graft(parent, ssk);
3873 }
3874 } else {
3875 sock_hold(ssk);
3876 list_add_tail(&subflow->node, &msk->join_list);
3877 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
3878
3879 /* In case of later failures, __mptcp_flush_join_list() will
3880 * properly orphan the ssk via mptcp_close_ssk().
3881 */
3882 mptcp_sock_check_graft(parent, ssk);
3883 }
3884 mptcp_data_unlock(parent);
3885
3886 if (!ret) {
3887 err_prohibited:
3888 subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3889 return false;
3890 }
3891
3892 return true;
3893 }
3894
mptcp_shutdown(struct sock * sk,int how)3895 static void mptcp_shutdown(struct sock *sk, int how)
3896 {
3897 pr_debug("sk=%p, how=%d\n", sk, how);
3898
3899 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3900 __mptcp_wr_shutdown(sk);
3901 }
3902
mptcp_ioctl_outq(const struct mptcp_sock * msk,u64 v)3903 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
3904 {
3905 const struct sock *sk = (void *)msk;
3906 u64 delta;
3907
3908 if (sk->sk_state == TCP_LISTEN)
3909 return -EINVAL;
3910
3911 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
3912 return 0;
3913
3914 delta = msk->write_seq - v;
3915 if (__mptcp_check_fallback(msk) && msk->first) {
3916 struct tcp_sock *tp = tcp_sk(msk->first);
3917
3918 /* the first subflow is disconnected after close - see
3919 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3920 * so ignore that status, too.
3921 */
3922 if (!((1 << msk->first->sk_state) &
3923 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
3924 delta += READ_ONCE(tp->write_seq) - tp->snd_una;
3925 }
3926 if (delta > INT_MAX)
3927 delta = INT_MAX;
3928
3929 return (int)delta;
3930 }
3931
mptcp_ioctl(struct sock * sk,int cmd,int * karg)3932 static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
3933 {
3934 struct mptcp_sock *msk = mptcp_sk(sk);
3935 bool slow;
3936
3937 switch (cmd) {
3938 case SIOCINQ:
3939 if (sk->sk_state == TCP_LISTEN)
3940 return -EINVAL;
3941
3942 lock_sock(sk);
3943 if (mptcp_move_skbs(sk))
3944 mptcp_cleanup_rbuf(msk, 0);
3945 *karg = mptcp_inq_hint(sk);
3946 release_sock(sk);
3947 break;
3948 case SIOCOUTQ:
3949 slow = lock_sock_fast(sk);
3950 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
3951 unlock_sock_fast(sk, slow);
3952 break;
3953 case SIOCOUTQNSD:
3954 slow = lock_sock_fast(sk);
3955 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt);
3956 unlock_sock_fast(sk, slow);
3957 break;
3958 default:
3959 return -ENOIOCTLCMD;
3960 }
3961
3962 return 0;
3963 }
3964
mptcp_connect(struct sock * sk,struct sockaddr_unsized * uaddr,int addr_len)3965 static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
3966 int addr_len)
3967 {
3968 struct mptcp_subflow_context *subflow;
3969 struct mptcp_sock *msk = mptcp_sk(sk);
3970 int err = -EINVAL;
3971 struct sock *ssk;
3972
3973 ssk = __mptcp_nmpc_sk(msk);
3974 if (IS_ERR(ssk))
3975 return PTR_ERR(ssk);
3976
3977 mptcp_set_state(sk, TCP_SYN_SENT);
3978 subflow = mptcp_subflow_ctx(ssk);
3979 #ifdef CONFIG_TCP_MD5SIG
3980 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3981 * TCP option space.
3982 */
3983 if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
3984 mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK);
3985 #endif
3986 if (subflow->request_mptcp) {
3987 if (mptcp_active_should_disable(sk))
3988 mptcp_early_fallback(msk, subflow,
3989 MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
3990 else if (mptcp_token_new_connect(ssk) < 0)
3991 mptcp_early_fallback(msk, subflow,
3992 MPTCP_MIB_TOKENFALLBACKINIT);
3993 }
3994
3995 WRITE_ONCE(msk->write_seq, subflow->idsn);
3996 WRITE_ONCE(msk->snd_nxt, subflow->idsn);
3997 WRITE_ONCE(msk->snd_una, subflow->idsn);
3998 if (likely(!__mptcp_check_fallback(msk)))
3999 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
4000
4001 /* if reaching here via the fastopen/sendmsg path, the caller already
4002 * acquired the subflow socket lock, too.
4003 */
4004 if (!msk->fastopening)
4005 lock_sock(ssk);
4006
4007 /* the following mirrors closely a very small chunk of code from
4008 * __inet_stream_connect()
4009 */
4010 if (ssk->sk_state != TCP_CLOSE)
4011 goto out;
4012
4013 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) {
4014 err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len);
4015 if (err)
4016 goto out;
4017 }
4018
4019 err = ssk->sk_prot->connect(ssk, uaddr, addr_len);
4020 if (err < 0)
4021 goto out;
4022
4023 inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk));
4024
4025 out:
4026 if (!msk->fastopening)
4027 release_sock(ssk);
4028
4029 /* on successful connect, the msk state will be moved to established by
4030 * subflow_finish_connect()
4031 */
4032 if (unlikely(err)) {
4033 /* avoid leaving a dangling token in an unconnected socket */
4034 mptcp_token_destroy(msk);
4035 mptcp_set_state(sk, TCP_CLOSE);
4036 return err;
4037 }
4038
4039 mptcp_copy_inaddrs(sk, ssk);
4040 return 0;
4041 }
4042
4043 static struct proto mptcp_prot = {
4044 .name = "MPTCP",
4045 .owner = THIS_MODULE,
4046 .init = mptcp_init_sock,
4047 .connect = mptcp_connect,
4048 .disconnect = mptcp_disconnect,
4049 .close = mptcp_close,
4050 .setsockopt = mptcp_setsockopt,
4051 .getsockopt = mptcp_getsockopt,
4052 .shutdown = mptcp_shutdown,
4053 .destroy = mptcp_destroy,
4054 .sendmsg = mptcp_sendmsg,
4055 .ioctl = mptcp_ioctl,
4056 .recvmsg = mptcp_recvmsg,
4057 .release_cb = mptcp_release_cb,
4058 .hash = mptcp_hash,
4059 .unhash = mptcp_unhash,
4060 .get_port = mptcp_get_port,
4061 .stream_memory_free = mptcp_stream_memory_free,
4062 .sockets_allocated = &mptcp_sockets_allocated,
4063
4064 .memory_allocated = &net_aligned_data.tcp_memory_allocated,
4065 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
4066
4067 .memory_pressure = &tcp_memory_pressure,
4068 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
4069 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
4070 .sysctl_mem = sysctl_tcp_mem,
4071 .obj_size = sizeof(struct mptcp_sock),
4072 .slab_flags = SLAB_TYPESAFE_BY_RCU,
4073 .no_autobind = true,
4074 };
4075
mptcp_bind(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len)4076 static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
4077 {
4078 struct mptcp_sock *msk = mptcp_sk(sock->sk);
4079 struct sock *ssk, *sk = sock->sk;
4080 int err = -EINVAL;
4081
4082 lock_sock(sk);
4083 ssk = __mptcp_nmpc_sk(msk);
4084 if (IS_ERR(ssk)) {
4085 err = PTR_ERR(ssk);
4086 goto unlock;
4087 }
4088
4089 if (sk->sk_family == AF_INET)
4090 err = inet_bind_sk(ssk, uaddr, addr_len);
4091 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4092 else if (sk->sk_family == AF_INET6)
4093 err = inet6_bind_sk(ssk, uaddr, addr_len);
4094 #endif
4095 if (!err)
4096 mptcp_copy_inaddrs(sk, ssk);
4097
4098 unlock:
4099 release_sock(sk);
4100 return err;
4101 }
4102
mptcp_listen(struct socket * sock,int backlog)4103 static int mptcp_listen(struct socket *sock, int backlog)
4104 {
4105 struct mptcp_sock *msk = mptcp_sk(sock->sk);
4106 struct sock *sk = sock->sk;
4107 struct sock *ssk;
4108 int err;
4109
4110 pr_debug("msk=%p\n", msk);
4111
4112 lock_sock(sk);
4113
4114 err = -EINVAL;
4115 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
4116 goto unlock;
4117
4118 ssk = __mptcp_nmpc_sk(msk);
4119 if (IS_ERR(ssk)) {
4120 err = PTR_ERR(ssk);
4121 goto unlock;
4122 }
4123
4124 mptcp_set_state(sk, TCP_LISTEN);
4125 sock_set_flag(sk, SOCK_RCU_FREE);
4126
4127 lock_sock(ssk);
4128 err = __inet_listen_sk(ssk, backlog);
4129 release_sock(ssk);
4130 mptcp_set_state(sk, inet_sk_state_load(ssk));
4131
4132 if (!err) {
4133 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
4134 mptcp_copy_inaddrs(sk, ssk);
4135 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
4136 }
4137
4138 unlock:
4139 release_sock(sk);
4140 return err;
4141 }
4142
mptcp_graft_subflows(struct sock * sk)4143 static void mptcp_graft_subflows(struct sock *sk)
4144 {
4145 struct mptcp_subflow_context *subflow;
4146 struct mptcp_sock *msk = mptcp_sk(sk);
4147
4148 if (mem_cgroup_sockets_enabled) {
4149 LIST_HEAD(join_list);
4150
4151 /* Subflows joining after __inet_accept() will get the
4152 * mem CG properly initialized at mptcp_finish_join() time,
4153 * but subflows pending in join_list need explicit
4154 * initialization before flushing `backlog_unaccounted`
4155 * or MPTCP can later unexpectedly observe unaccounted memory.
4156 */
4157 mptcp_data_lock(sk);
4158 list_splice_init(&msk->join_list, &join_list);
4159 mptcp_data_unlock(sk);
4160
4161 __mptcp_flush_join_list(sk, &join_list);
4162 }
4163
4164 mptcp_for_each_subflow(msk, subflow) {
4165 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4166
4167 lock_sock(ssk);
4168
4169 /* Set ssk->sk_socket of accept()ed flows to mptcp socket.
4170 * This is needed so NOSPACE flag can be set from tcp stack.
4171 */
4172 if (!ssk->sk_socket)
4173 mptcp_sock_graft(ssk, sk->sk_socket);
4174
4175 if (!mem_cgroup_sk_enabled(sk))
4176 goto unlock;
4177
4178 __mptcp_inherit_cgrp_data(sk, ssk);
4179 __mptcp_inherit_memcg(sk, ssk, GFP_KERNEL);
4180
4181 unlock:
4182 release_sock(ssk);
4183 }
4184
4185 if (mem_cgroup_sk_enabled(sk)) {
4186 gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
4187 int amt;
4188
4189 /* Account the backlog memory; prior accept() is aware of
4190 * fwd and rmem only.
4191 */
4192 mptcp_data_lock(sk);
4193 amt = sk_mem_pages(sk->sk_forward_alloc +
4194 msk->backlog_unaccounted +
4195 atomic_read(&sk->sk_rmem_alloc)) -
4196 sk_mem_pages(sk->sk_forward_alloc +
4197 atomic_read(&sk->sk_rmem_alloc));
4198 msk->backlog_unaccounted = 0;
4199 mptcp_data_unlock(sk);
4200
4201 if (amt)
4202 mem_cgroup_sk_charge(sk, amt, gfp);
4203 }
4204 }
4205
mptcp_stream_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)4206 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
4207 struct proto_accept_arg *arg)
4208 {
4209 struct mptcp_sock *msk = mptcp_sk(sock->sk);
4210 struct sock *ssk, *newsk;
4211
4212 pr_debug("msk=%p\n", msk);
4213
4214 /* Buggy applications can call accept on socket states other then LISTEN
4215 * but no need to allocate the first subflow just to error out.
4216 */
4217 ssk = READ_ONCE(msk->first);
4218 if (!ssk)
4219 return -EINVAL;
4220
4221 pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
4222 newsk = inet_csk_accept(ssk, arg);
4223 if (!newsk)
4224 return arg->err;
4225
4226 pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
4227 if (sk_is_mptcp(newsk)) {
4228 struct mptcp_subflow_context *subflow;
4229 struct sock *new_mptcp_sock;
4230
4231 subflow = mptcp_subflow_ctx(newsk);
4232 new_mptcp_sock = subflow->conn;
4233
4234 /* is_mptcp should be false if subflow->conn is missing, see
4235 * subflow_syn_recv_sock()
4236 */
4237 if (WARN_ON_ONCE(!new_mptcp_sock)) {
4238 tcp_sk(newsk)->is_mptcp = 0;
4239 goto tcpfallback;
4240 }
4241
4242 newsk = new_mptcp_sock;
4243 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
4244
4245 newsk->sk_kern_sock = arg->kern;
4246 lock_sock(newsk);
4247 __inet_accept(sock, newsock, newsk);
4248
4249 set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
4250 msk = mptcp_sk(newsk);
4251 msk->in_accept_queue = 0;
4252
4253 mptcp_graft_subflows(newsk);
4254 mptcp_rps_record_subflows(msk);
4255 __mptcp_propagate_sndbuf(newsk, mptcp_subflow_tcp_sock(subflow));
4256
4257 /* Do late cleanup for the first subflow as necessary. Also
4258 * deal with bad peers not doing a complete shutdown.
4259 */
4260 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
4261 if (unlikely(list_is_singular(&msk->conn_list)))
4262 mptcp_set_state(newsk, TCP_CLOSE);
4263 mptcp_close_ssk(newsk, msk->first,
4264 mptcp_subflow_ctx(msk->first));
4265 }
4266 } else {
4267 tcpfallback:
4268 newsk->sk_kern_sock = arg->kern;
4269 lock_sock(newsk);
4270 __inet_accept(sock, newsock, newsk);
4271 /* we are being invoked after accepting a non-mp-capable
4272 * flow: sk is a tcp_sk, not an mptcp one.
4273 *
4274 * Hand the socket over to tcp so all further socket ops
4275 * bypass mptcp.
4276 */
4277 WRITE_ONCE(newsock->sk->sk_socket->ops,
4278 mptcp_fallback_tcp_ops(newsock->sk));
4279 }
4280 release_sock(newsk);
4281
4282 return 0;
4283 }
4284
mptcp_check_writeable(struct mptcp_sock * msk)4285 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
4286 {
4287 struct sock *sk = (struct sock *)msk;
4288
4289 if (__mptcp_stream_is_writeable(sk, 1))
4290 return EPOLLOUT | EPOLLWRNORM;
4291
4292 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
4293 smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
4294 if (__mptcp_stream_is_writeable(sk, 1))
4295 return EPOLLOUT | EPOLLWRNORM;
4296
4297 return 0;
4298 }
4299
mptcp_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)4300 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
4301 struct poll_table_struct *wait)
4302 {
4303 struct sock *sk = sock->sk;
4304 struct mptcp_sock *msk;
4305 __poll_t mask = 0;
4306 u8 shutdown;
4307 int state;
4308
4309 msk = mptcp_sk(sk);
4310 sock_poll_wait(file, sock, wait);
4311
4312 state = inet_sk_state_load(sk);
4313 pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
4314 if (state == TCP_LISTEN) {
4315 struct sock *ssk = READ_ONCE(msk->first);
4316
4317 if (WARN_ON_ONCE(!ssk))
4318 return 0;
4319
4320 return inet_csk_listen_poll(ssk);
4321 }
4322
4323 shutdown = READ_ONCE(sk->sk_shutdown);
4324 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
4325 mask |= EPOLLHUP;
4326 if (shutdown & RCV_SHUTDOWN)
4327 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
4328
4329 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
4330 mask |= mptcp_check_readable(sk);
4331 if (shutdown & SEND_SHUTDOWN)
4332 mask |= EPOLLOUT | EPOLLWRNORM;
4333 else
4334 mask |= mptcp_check_writeable(msk);
4335 } else if (state == TCP_SYN_SENT &&
4336 inet_test_bit(DEFER_CONNECT, sk)) {
4337 /* cf tcp_poll() note about TFO */
4338 mask |= EPOLLOUT | EPOLLWRNORM;
4339 }
4340
4341 /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
4342 smp_rmb();
4343 if (READ_ONCE(sk->sk_err))
4344 mask |= EPOLLERR;
4345
4346 return mask;
4347 }
4348
mptcp_recv_skb(struct sock * sk,u32 * off)4349 static struct sk_buff *mptcp_recv_skb(struct sock *sk, u32 *off)
4350 {
4351 struct mptcp_sock *msk = mptcp_sk(sk);
4352 struct sk_buff *skb;
4353 u32 offset;
4354
4355 if (!list_empty(&msk->backlog_list))
4356 mptcp_move_skbs(sk);
4357
4358 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
4359 offset = MPTCP_SKB_CB(skb)->offset;
4360 if (offset < skb->len) {
4361 *off = offset;
4362 return skb;
4363 }
4364 mptcp_eat_recv_skb(sk, skb);
4365 }
4366 return NULL;
4367 }
4368
4369 /*
4370 * Note:
4371 * - It is assumed that the socket was locked by the caller.
4372 */
__mptcp_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor,bool noack)4373 static int __mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4374 sk_read_actor_t recv_actor, bool noack)
4375 {
4376 struct mptcp_sock *msk = mptcp_sk(sk);
4377 struct sk_buff *skb;
4378 int copied = 0;
4379 u32 offset;
4380
4381 msk_owned_by_me(msk);
4382
4383 if (sk->sk_state == TCP_LISTEN)
4384 return -ENOTCONN;
4385 while ((skb = mptcp_recv_skb(sk, &offset)) != NULL) {
4386 u32 data_len = skb->len - offset;
4387 int count;
4388 u32 size;
4389
4390 size = min_t(size_t, data_len, INT_MAX);
4391 count = recv_actor(desc, skb, offset, size);
4392 if (count <= 0) {
4393 if (!copied)
4394 copied = count;
4395 break;
4396 }
4397
4398 copied += count;
4399
4400 msk->bytes_consumed += count;
4401 if (count < data_len) {
4402 MPTCP_SKB_CB(skb)->offset += count;
4403 MPTCP_SKB_CB(skb)->map_seq += count;
4404 break;
4405 }
4406
4407 mptcp_eat_recv_skb(sk, skb);
4408 }
4409
4410 if (noack)
4411 goto out;
4412
4413 mptcp_rcv_space_adjust(msk, copied);
4414
4415 if (copied > 0) {
4416 mptcp_recv_skb(sk, &offset);
4417 mptcp_cleanup_rbuf(msk, copied);
4418 }
4419 out:
4420 return copied;
4421 }
4422
mptcp_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor)4423 static int mptcp_read_sock(struct sock *sk, read_descriptor_t *desc,
4424 sk_read_actor_t recv_actor)
4425 {
4426 return __mptcp_read_sock(sk, desc, recv_actor, false);
4427 }
4428
__mptcp_splice_read(struct sock * sk,struct tcp_splice_state * tss)4429 static int __mptcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
4430 {
4431 /* Store TCP splice context information in read_descriptor_t. */
4432 read_descriptor_t rd_desc = {
4433 .arg.data = tss,
4434 .count = tss->len,
4435 };
4436
4437 return mptcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
4438 }
4439
4440 /**
4441 * mptcp_splice_read - splice data from MPTCP socket to a pipe
4442 * @sock: socket to splice from
4443 * @ppos: position (not valid)
4444 * @pipe: pipe to splice to
4445 * @len: number of bytes to splice
4446 * @flags: splice modifier flags
4447 *
4448 * Description:
4449 * Will read pages from given socket and fill them into a pipe.
4450 *
4451 * Return:
4452 * Amount of bytes that have been spliced.
4453 *
4454 **/
mptcp_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)4455 static ssize_t mptcp_splice_read(struct socket *sock, loff_t *ppos,
4456 struct pipe_inode_info *pipe, size_t len,
4457 unsigned int flags)
4458 {
4459 struct tcp_splice_state tss = {
4460 .pipe = pipe,
4461 .len = len,
4462 .flags = flags,
4463 };
4464 struct sock *sk = sock->sk;
4465 ssize_t spliced = 0;
4466 int ret = 0;
4467 long timeo;
4468
4469 /*
4470 * We can't seek on a socket input
4471 */
4472 if (unlikely(*ppos))
4473 return -ESPIPE;
4474
4475 lock_sock(sk);
4476
4477 mptcp_rps_record_subflows(mptcp_sk(sk));
4478
4479 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
4480 while (tss.len) {
4481 ret = __mptcp_splice_read(sk, &tss);
4482 if (ret < 0) {
4483 break;
4484 } else if (!ret) {
4485 if (spliced)
4486 break;
4487 if (sock_flag(sk, SOCK_DONE))
4488 break;
4489 if (sk->sk_err) {
4490 ret = sock_error(sk);
4491 break;
4492 }
4493 if (sk->sk_shutdown & RCV_SHUTDOWN)
4494 break;
4495 if (sk->sk_state == TCP_CLOSE) {
4496 /*
4497 * This occurs when user tries to read
4498 * from never connected socket.
4499 */
4500 ret = -ENOTCONN;
4501 break;
4502 }
4503 if (!timeo) {
4504 ret = -EAGAIN;
4505 break;
4506 }
4507 /* if __mptcp_splice_read() got nothing while we have
4508 * an skb in receive queue, we do not want to loop.
4509 * This might happen with URG data.
4510 */
4511 if (!skb_queue_empty(&sk->sk_receive_queue))
4512 break;
4513 ret = sk_wait_data(sk, &timeo, NULL);
4514 if (ret < 0)
4515 break;
4516 if (signal_pending(current)) {
4517 ret = sock_intr_errno(timeo);
4518 break;
4519 }
4520 continue;
4521 }
4522 tss.len -= ret;
4523 spliced += ret;
4524
4525 if (!tss.len || !timeo)
4526 break;
4527 release_sock(sk);
4528 lock_sock(sk);
4529
4530 if (tcp_recv_should_stop(sk))
4531 break;
4532 }
4533
4534 release_sock(sk);
4535
4536 if (spliced)
4537 return spliced;
4538
4539 return ret;
4540 }
4541
4542 static const struct proto_ops mptcp_stream_ops = {
4543 .family = PF_INET,
4544 .owner = THIS_MODULE,
4545 .release = inet_release,
4546 .bind = mptcp_bind,
4547 .connect = inet_stream_connect,
4548 .socketpair = sock_no_socketpair,
4549 .accept = mptcp_stream_accept,
4550 .getname = inet_getname,
4551 .poll = mptcp_poll,
4552 .ioctl = inet_ioctl,
4553 .gettstamp = sock_gettstamp,
4554 .listen = mptcp_listen,
4555 .shutdown = inet_shutdown,
4556 .setsockopt = sock_common_setsockopt,
4557 .getsockopt = sock_common_getsockopt,
4558 .sendmsg = inet_sendmsg,
4559 .recvmsg = inet_recvmsg,
4560 .mmap = sock_no_mmap,
4561 .set_rcvlowat = mptcp_set_rcvlowat,
4562 .read_sock = mptcp_read_sock,
4563 .splice_read = mptcp_splice_read,
4564 };
4565
4566 static struct inet_protosw mptcp_protosw = {
4567 .type = SOCK_STREAM,
4568 .protocol = IPPROTO_MPTCP,
4569 .prot = &mptcp_prot,
4570 .ops = &mptcp_stream_ops,
4571 .flags = INET_PROTOSW_ICSK,
4572 };
4573
mptcp_napi_poll(struct napi_struct * napi,int budget)4574 static int mptcp_napi_poll(struct napi_struct *napi, int budget)
4575 {
4576 struct mptcp_delegated_action *delegated;
4577 struct mptcp_subflow_context *subflow;
4578 int work_done = 0;
4579
4580 delegated = container_of(napi, struct mptcp_delegated_action, napi);
4581 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
4582 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4583
4584 bh_lock_sock_nested(ssk);
4585 if (!sock_owned_by_user(ssk)) {
4586 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
4587 } else {
4588 /* tcp_release_cb_override already processed
4589 * the action or will do at next release_sock().
4590 * In both case must dequeue the subflow here - on the same
4591 * CPU that scheduled it.
4592 */
4593 smp_wmb();
4594 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
4595 }
4596 bh_unlock_sock(ssk);
4597 sock_put(ssk);
4598
4599 if (++work_done == budget)
4600 return budget;
4601 }
4602
4603 /* always provide a 0 'work_done' argument, so that napi_complete_done
4604 * will not try accessing the NULL napi->dev ptr
4605 */
4606 napi_complete_done(napi, 0);
4607 return work_done;
4608 }
4609
mptcp_proto_init(void)4610 void __init mptcp_proto_init(void)
4611 {
4612 struct mptcp_delegated_action *delegated;
4613 int cpu;
4614
4615 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
4616
4617 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
4618 panic("Failed to allocate MPTCP pcpu counter\n");
4619
4620 mptcp_napi_dev = alloc_netdev_dummy(0);
4621 if (!mptcp_napi_dev)
4622 panic("Failed to allocate MPTCP dummy netdev\n");
4623 for_each_possible_cpu(cpu) {
4624 delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
4625 INIT_LIST_HEAD(&delegated->head);
4626 netif_napi_add_tx(mptcp_napi_dev, &delegated->napi,
4627 mptcp_napi_poll);
4628 napi_enable(&delegated->napi);
4629 }
4630
4631 mptcp_subflow_init();
4632 mptcp_pm_init();
4633 mptcp_sched_init();
4634 mptcp_token_init();
4635
4636 if (proto_register(&mptcp_prot, 1) != 0)
4637 panic("Failed to register MPTCP proto.\n");
4638
4639 inet_register_protosw(&mptcp_protosw);
4640
4641 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
4642
4643 /* struct mptcp_data_frag: 'overhead' corresponds to the alignment
4644 * (ALIGN(1, sizeof(long)) - 1, so 8-1) + the struct's size
4645 */
4646 BUILD_BUG_ON(ALIGN(1, sizeof(long)) - 1 + sizeof(struct mptcp_data_frag)
4647 > U8_MAX);
4648 }
4649
4650 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4651 static const struct proto_ops mptcp_v6_stream_ops = {
4652 .family = PF_INET6,
4653 .owner = THIS_MODULE,
4654 .release = inet6_release,
4655 .bind = mptcp_bind,
4656 .connect = inet_stream_connect,
4657 .socketpair = sock_no_socketpair,
4658 .accept = mptcp_stream_accept,
4659 .getname = inet6_getname,
4660 .poll = mptcp_poll,
4661 .ioctl = inet6_ioctl,
4662 .gettstamp = sock_gettstamp,
4663 .listen = mptcp_listen,
4664 .shutdown = inet_shutdown,
4665 .setsockopt = sock_common_setsockopt,
4666 .getsockopt = sock_common_getsockopt,
4667 .sendmsg = inet6_sendmsg,
4668 .recvmsg = inet6_recvmsg,
4669 .mmap = sock_no_mmap,
4670 #ifdef CONFIG_COMPAT
4671 .compat_ioctl = inet6_compat_ioctl,
4672 #endif
4673 .set_rcvlowat = mptcp_set_rcvlowat,
4674 .read_sock = mptcp_read_sock,
4675 .splice_read = mptcp_splice_read,
4676 };
4677
4678 static struct proto mptcp_v6_prot;
4679
4680 static struct inet_protosw mptcp_v6_protosw = {
4681 .type = SOCK_STREAM,
4682 .protocol = IPPROTO_MPTCP,
4683 .prot = &mptcp_v6_prot,
4684 .ops = &mptcp_v6_stream_ops,
4685 .flags = INET_PROTOSW_ICSK,
4686 };
4687
mptcp_proto_v6_init(void)4688 int __init mptcp_proto_v6_init(void)
4689 {
4690 int err;
4691
4692 mptcp_subflow_v6_init();
4693
4694 mptcp_v6_prot = mptcp_prot;
4695 strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name));
4696 mptcp_v6_prot.slab = NULL;
4697 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
4698 mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np);
4699
4700 err = proto_register(&mptcp_v6_prot, 1);
4701 if (err)
4702 return err;
4703
4704 err = inet6_register_protosw(&mptcp_v6_protosw);
4705 if (err)
4706 proto_unregister(&mptcp_v6_prot);
4707
4708 return err;
4709 }
4710 #endif
4711