1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3 *
4 * Copyright (c) 2019, Intel Corporation.
5 */
6 #define pr_fmt(fmt) "MPTCP: " fmt
7
8 #include <linux/rculist.h>
9 #include <linux/spinlock.h>
10 #include "protocol.h"
11 #include "mib.h"
12
13 #define ADD_ADDR_RETRANS_MAX 3
14
15 struct mptcp_pm_add_entry {
16 struct list_head list;
17 struct mptcp_addr_info addr;
18 u8 retrans_times;
19 bool timer_done;
20 struct timer_list add_timer;
21 struct mptcp_sock *sock;
22 struct rcu_head rcu;
23 };
24
25 static DEFINE_SPINLOCK(mptcp_pm_list_lock);
26 static LIST_HEAD(mptcp_pm_list);
27
28 /* path manager helpers */
29
30 /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
31 * otherwise allow any matching local/remote pair
32 */
mptcp_pm_addr_families_match(const struct sock * sk,const struct mptcp_addr_info * loc,const struct mptcp_addr_info * rem)33 bool mptcp_pm_addr_families_match(const struct sock *sk,
34 const struct mptcp_addr_info *loc,
35 const struct mptcp_addr_info *rem)
36 {
37 bool mptcp_is_v4 = sk->sk_family == AF_INET;
38
39 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
40 bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6);
41 bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6);
42
43 if (mptcp_is_v4)
44 return loc_is_v4 && rem_is_v4;
45
46 if (ipv6_only_sock(sk))
47 return !loc_is_v4 && !rem_is_v4;
48
49 return loc_is_v4 == rem_is_v4;
50 #else
51 return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET;
52 #endif
53 }
54
mptcp_addresses_equal(const struct mptcp_addr_info * a,const struct mptcp_addr_info * b,bool use_port)55 bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
56 const struct mptcp_addr_info *b, bool use_port)
57 {
58 bool addr_equals = false;
59
60 if (a->family == b->family) {
61 if (a->family == AF_INET)
62 addr_equals = a->addr.s_addr == b->addr.s_addr;
63 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
64 else
65 addr_equals = ipv6_addr_equal(&a->addr6, &b->addr6);
66 } else if (a->family == AF_INET) {
67 if (ipv6_addr_v4mapped(&b->addr6))
68 addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3];
69 } else if (b->family == AF_INET) {
70 if (ipv6_addr_v4mapped(&a->addr6))
71 addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr;
72 #endif
73 }
74
75 if (!addr_equals)
76 return false;
77 if (!use_port)
78 return true;
79
80 return a->port == b->port;
81 }
82
mptcp_local_address(const struct sock_common * skc,struct mptcp_addr_info * addr)83 void mptcp_local_address(const struct sock_common *skc,
84 struct mptcp_addr_info *addr)
85 {
86 addr->family = skc->skc_family;
87 addr->port = htons(skc->skc_num);
88 if (addr->family == AF_INET)
89 addr->addr.s_addr = skc->skc_rcv_saddr;
90 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
91 else if (addr->family == AF_INET6)
92 addr->addr6 = skc->skc_v6_rcv_saddr;
93 #endif
94 }
95
mptcp_remote_address(const struct sock_common * skc,struct mptcp_addr_info * addr)96 void mptcp_remote_address(const struct sock_common *skc,
97 struct mptcp_addr_info *addr)
98 {
99 addr->family = skc->skc_family;
100 addr->port = skc->skc_dport;
101 if (addr->family == AF_INET)
102 addr->addr.s_addr = skc->skc_daddr;
103 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
104 else if (addr->family == AF_INET6)
105 addr->addr6 = skc->skc_v6_daddr;
106 #endif
107 }
108
mptcp_pm_is_init_remote_addr(struct mptcp_sock * msk,const struct mptcp_addr_info * remote)109 static bool mptcp_pm_is_init_remote_addr(struct mptcp_sock *msk,
110 const struct mptcp_addr_info *remote)
111 {
112 struct mptcp_addr_info mpc_remote;
113
114 mptcp_remote_address((struct sock_common *)msk, &mpc_remote);
115 return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
116 }
117
mptcp_lookup_subflow_by_saddr(const struct list_head * list,const struct mptcp_addr_info * saddr)118 bool mptcp_lookup_subflow_by_saddr(const struct list_head *list,
119 const struct mptcp_addr_info *saddr)
120 {
121 struct mptcp_subflow_context *subflow;
122 struct mptcp_addr_info cur;
123 struct sock_common *skc;
124
125 list_for_each_entry(subflow, list, node) {
126 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
127
128 mptcp_local_address(skc, &cur);
129 if (mptcp_addresses_equal(&cur, saddr, saddr->port))
130 return true;
131 }
132
133 return false;
134 }
135
136 static struct mptcp_pm_add_entry *
mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock * msk,const struct mptcp_addr_info * addr)137 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
138 const struct mptcp_addr_info *addr)
139 {
140 struct mptcp_pm_add_entry *entry;
141
142 lockdep_assert_held(&msk->pm.lock);
143
144 list_for_each_entry(entry, &msk->pm.anno_list, list) {
145 if (mptcp_addresses_equal(&entry->addr, addr, true))
146 return entry;
147 }
148
149 return NULL;
150 }
151
mptcp_remove_anno_list_by_saddr(struct mptcp_sock * msk,const struct mptcp_addr_info * addr)152 bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk,
153 const struct mptcp_addr_info *addr)
154 {
155 struct mptcp_pm_add_entry *entry;
156 bool ret;
157
158 entry = mptcp_pm_del_add_timer(msk, addr, false);
159 ret = entry;
160 kfree_rcu(entry, rcu);
161
162 return ret;
163 }
164
mptcp_pm_sport_in_anno_list(struct mptcp_sock * msk,const struct sock * sk)165 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
166 {
167 struct mptcp_pm_add_entry *entry;
168 struct mptcp_addr_info saddr;
169 bool ret = false;
170
171 mptcp_local_address((struct sock_common *)sk, &saddr);
172
173 spin_lock_bh(&msk->pm.lock);
174 list_for_each_entry(entry, &msk->pm.anno_list, list) {
175 if (mptcp_addresses_equal(&entry->addr, &saddr, true)) {
176 ret = true;
177 goto out;
178 }
179 }
180
181 out:
182 spin_unlock_bh(&msk->pm.lock);
183 return ret;
184 }
185
__mptcp_pm_send_ack(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,bool prio,bool backup)186 static void __mptcp_pm_send_ack(struct mptcp_sock *msk,
187 struct mptcp_subflow_context *subflow,
188 bool prio, bool backup)
189 {
190 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
191 bool slow;
192
193 pr_debug("send ack for %s\n",
194 prio ? "mp_prio" :
195 (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
196
197 slow = lock_sock_fast(ssk);
198 if (prio) {
199 subflow->send_mp_prio = 1;
200 subflow->request_bkup = backup;
201 }
202
203 __mptcp_subflow_send_ack(ssk);
204 unlock_sock_fast(ssk, slow);
205 }
206
mptcp_pm_send_ack(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,bool prio,bool backup)207 void mptcp_pm_send_ack(struct mptcp_sock *msk,
208 struct mptcp_subflow_context *subflow,
209 bool prio, bool backup)
210 {
211 spin_unlock_bh(&msk->pm.lock);
212 __mptcp_pm_send_ack(msk, subflow, prio, backup);
213 spin_lock_bh(&msk->pm.lock);
214 }
215
subflow_in_rm_list(const struct mptcp_subflow_context * subflow,const struct mptcp_rm_list * rm_list)216 static bool subflow_in_rm_list(const struct mptcp_subflow_context *subflow,
217 const struct mptcp_rm_list *rm_list)
218 {
219 u8 i, id = subflow_get_local_id(subflow);
220
221 for (i = 0; i < rm_list->nr; i++) {
222 if (rm_list->ids[i] == id)
223 return true;
224 }
225
226 return false;
227 }
228
229 static void
mptcp_pm_addr_send_ack_avoid_list(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)230 mptcp_pm_addr_send_ack_avoid_list(struct mptcp_sock *msk,
231 const struct mptcp_rm_list *rm_list)
232 {
233 struct mptcp_subflow_context *subflow, *stale = NULL, *same_id = NULL;
234
235 msk_owned_by_me(msk);
236 lockdep_assert_held(&msk->pm.lock);
237
238 if (!mptcp_pm_should_add_signal(msk) &&
239 !mptcp_pm_should_rm_signal(msk))
240 return;
241
242 mptcp_for_each_subflow(msk, subflow) {
243 if (!__mptcp_subflow_active(subflow))
244 continue;
245
246 if (unlikely(subflow->stale)) {
247 if (!stale)
248 stale = subflow;
249 } else if (unlikely(rm_list &&
250 subflow_in_rm_list(subflow, rm_list))) {
251 if (!same_id)
252 same_id = subflow;
253 } else {
254 goto send_ack;
255 }
256 }
257
258 if (same_id)
259 subflow = same_id;
260 else if (stale)
261 subflow = stale;
262 else
263 return;
264
265 send_ack:
266 mptcp_pm_send_ack(msk, subflow, false, false);
267 }
268
mptcp_pm_addr_send_ack(struct mptcp_sock * msk)269 void mptcp_pm_addr_send_ack(struct mptcp_sock *msk)
270 {
271 mptcp_pm_addr_send_ack_avoid_list(msk, NULL);
272 }
273
mptcp_pm_mp_prio_send_ack(struct mptcp_sock * msk,struct mptcp_addr_info * addr,struct mptcp_addr_info * rem,u8 bkup)274 int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk,
275 struct mptcp_addr_info *addr,
276 struct mptcp_addr_info *rem,
277 u8 bkup)
278 {
279 struct mptcp_subflow_context *subflow;
280
281 pr_debug("bkup=%d\n", bkup);
282
283 mptcp_for_each_subflow(msk, subflow) {
284 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
285 struct mptcp_addr_info local, remote;
286
287 if (!__mptcp_subflow_active(subflow))
288 continue;
289
290 mptcp_local_address((struct sock_common *)ssk, &local);
291 if (!mptcp_addresses_equal(&local, addr, addr->port))
292 continue;
293
294 if (rem && rem->family != AF_UNSPEC) {
295 mptcp_remote_address((struct sock_common *)ssk, &remote);
296 if (!mptcp_addresses_equal(&remote, rem, rem->port))
297 continue;
298 }
299
300 __mptcp_pm_send_ack(msk, subflow, true, bkup);
301 return 0;
302 }
303
304 return -EINVAL;
305 }
306
mptcp_adjust_add_addr_timeout(struct mptcp_sock * msk)307 static unsigned int mptcp_adjust_add_addr_timeout(struct mptcp_sock *msk)
308 {
309 const struct net *net = sock_net((struct sock *)msk);
310 unsigned int rto = mptcp_get_add_addr_timeout(net);
311 struct mptcp_subflow_context *subflow;
312 unsigned int max = 0, max_stale = 0;
313
314 if (!rto)
315 return 0;
316
317 mptcp_for_each_subflow(msk, subflow) {
318 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
319 struct inet_connection_sock *icsk = inet_csk(ssk);
320
321 if (!__mptcp_subflow_active(subflow))
322 continue;
323
324 if (unlikely(subflow->stale)) {
325 if (icsk->icsk_rto > max_stale)
326 max_stale = icsk->icsk_rto;
327 } else if (icsk->icsk_rto > max) {
328 max = icsk->icsk_rto;
329 }
330 }
331
332 if (max)
333 return min(max, rto);
334
335 if (max_stale)
336 return min(max_stale, rto);
337
338 return rto;
339 }
340
mptcp_pm_add_timer(struct timer_list * timer)341 static void mptcp_pm_add_timer(struct timer_list *timer)
342 {
343 struct mptcp_pm_add_entry *entry = timer_container_of(entry, timer,
344 add_timer);
345 struct mptcp_sock *msk = entry->sock;
346 struct sock *sk = (struct sock *)msk;
347 unsigned int timeout = 0;
348
349 pr_debug("msk=%p\n", msk);
350
351 bh_lock_sock(sk);
352 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
353 goto out;
354
355 if (sock_owned_by_user(sk)) {
356 /* Try again later. */
357 timeout = HZ / 20;
358 goto out;
359 }
360
361 timeout = mptcp_adjust_add_addr_timeout(msk);
362 if (!timeout || mptcp_pm_should_add_signal_addr(msk))
363 goto out;
364
365 spin_lock_bh(&msk->pm.lock);
366
367 if (!mptcp_pm_should_add_signal_addr(msk)) {
368 pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
369 mptcp_pm_announce_addr(msk, &entry->addr, false);
370 mptcp_pm_add_addr_send_ack(msk);
371 entry->retrans_times++;
372 }
373
374 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
375 timeout <<= entry->retrans_times;
376 else
377 timeout = 0;
378
379 spin_unlock_bh(&msk->pm.lock);
380
381 if (entry->retrans_times == ADD_ADDR_RETRANS_MAX)
382 mptcp_pm_subflow_established(msk);
383
384 out:
385 if (timeout)
386 sk_reset_timer(sk, timer, jiffies + timeout);
387 else
388 /* if sock_put calls sk_free: avoid waiting for this timer */
389 entry->timer_done = true;
390 bh_unlock_sock(sk);
391 sock_put(sk);
392 }
393
394 struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock * msk,const struct mptcp_addr_info * addr,bool check_id)395 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
396 const struct mptcp_addr_info *addr, bool check_id)
397 {
398 struct mptcp_pm_add_entry *entry;
399 struct sock *sk = (struct sock *)msk;
400 bool stop_timer = false;
401
402 rcu_read_lock();
403
404 spin_lock_bh(&msk->pm.lock);
405 entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
406 if (entry && (!check_id || entry->addr.id == addr->id)) {
407 entry->retrans_times = ADD_ADDR_RETRANS_MAX;
408 stop_timer = true;
409 }
410 if (!check_id && entry)
411 list_del(&entry->list);
412 spin_unlock_bh(&msk->pm.lock);
413
414 /* Note: entry might have been removed by another thread.
415 * We hold rcu_read_lock() to ensure it is not freed under us.
416 */
417 if (stop_timer)
418 sk_stop_timer_sync(sk, &entry->add_timer);
419
420 rcu_read_unlock();
421 return entry;
422 }
423
mptcp_pm_alloc_anno_list(struct mptcp_sock * msk,const struct mptcp_addr_info * addr)424 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
425 const struct mptcp_addr_info *addr)
426 {
427 struct mptcp_pm_add_entry *add_entry = NULL;
428 struct sock *sk = (struct sock *)msk;
429 unsigned int timeout;
430
431 lockdep_assert_held(&msk->pm.lock);
432
433 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
434
435 if (add_entry) {
436 if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
437 return false;
438
439 goto reset_timer;
440 }
441
442 add_entry = kmalloc_obj(*add_entry, GFP_ATOMIC);
443 if (!add_entry)
444 return false;
445
446 list_add(&add_entry->list, &msk->pm.anno_list);
447
448 add_entry->addr = *addr;
449 add_entry->sock = msk;
450 add_entry->retrans_times = 0;
451
452 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
453 reset_timer:
454 add_entry->timer_done = false;
455 timeout = mptcp_adjust_add_addr_timeout(msk);
456 if (timeout)
457 sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout);
458
459 return true;
460 }
461
mptcp_pm_free_anno_list(struct mptcp_sock * msk)462 static void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
463 {
464 struct mptcp_pm_add_entry *entry, *tmp;
465 struct sock *sk = (struct sock *)msk;
466 LIST_HEAD(free_list);
467
468 pr_debug("msk=%p\n", msk);
469
470 spin_lock_bh(&msk->pm.lock);
471 list_splice_init(&msk->pm.anno_list, &free_list);
472 spin_unlock_bh(&msk->pm.lock);
473
474 list_for_each_entry_safe(entry, tmp, &free_list, list) {
475 if (!entry->timer_done)
476 sk_stop_timer_sync(sk, &entry->add_timer);
477 kfree_rcu(entry, rcu);
478 }
479 }
480
481 /* path manager command handlers */
482
mptcp_pm_announce_addr(struct mptcp_sock * msk,const struct mptcp_addr_info * addr,bool echo)483 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
484 const struct mptcp_addr_info *addr,
485 bool echo)
486 {
487 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
488
489 pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
490
491 lockdep_assert_held(&msk->pm.lock);
492
493 if (add_addr &
494 (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) {
495 MPTCP_INC_STATS(sock_net((struct sock *)msk),
496 echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP);
497 return -EINVAL;
498 }
499
500 if (echo) {
501 msk->pm.remote = *addr;
502 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
503 } else {
504 msk->pm.local = *addr;
505 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
506 }
507 WRITE_ONCE(msk->pm.addr_signal, add_addr);
508 return 0;
509 }
510
mptcp_pm_remove_addr(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)511 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
512 {
513 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
514
515 pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
516
517 if (rm_addr) {
518 MPTCP_ADD_STATS(sock_net((struct sock *)msk),
519 MPTCP_MIB_RMADDRTXDROP, rm_list->nr);
520 return -EINVAL;
521 }
522
523 msk->pm.rm_list_tx = *rm_list;
524 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
525 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
526 mptcp_pm_addr_send_ack_avoid_list(msk, rm_list);
527 return 0;
528 }
529
530 /* path manager event handlers */
531
mptcp_pm_new_connection(struct mptcp_sock * msk,const struct sock * ssk,int server_side)532 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
533 {
534 struct mptcp_pm_data *pm = &msk->pm;
535
536 pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side);
537
538 WRITE_ONCE(pm->server_side, server_side);
539 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
540 }
541
mptcp_pm_allow_new_subflow(struct mptcp_sock * msk)542 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
543 {
544 struct mptcp_pm_data *pm = &msk->pm;
545 unsigned int limit_extra_subflows;
546 int ret = 0;
547
548 if (mptcp_pm_is_userspace(msk)) {
549 if (mptcp_userspace_pm_active(msk)) {
550 spin_lock_bh(&pm->lock);
551 pm->extra_subflows++;
552 spin_unlock_bh(&pm->lock);
553 return true;
554 }
555 return false;
556 }
557
558 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
559
560 pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk,
561 pm->extra_subflows, limit_extra_subflows,
562 READ_ONCE(pm->accept_subflow));
563
564 /* try to avoid acquiring the lock below */
565 if (!READ_ONCE(pm->accept_subflow))
566 return false;
567
568 spin_lock_bh(&pm->lock);
569 if (READ_ONCE(pm->accept_subflow)) {
570 ret = pm->extra_subflows < limit_extra_subflows;
571 if (ret && ++pm->extra_subflows == limit_extra_subflows)
572 WRITE_ONCE(pm->accept_subflow, false);
573 }
574 spin_unlock_bh(&pm->lock);
575
576 return ret;
577 }
578
579 /* return true if the new status bit is currently cleared, that is, this event
580 * can be server, eventually by an already scheduled work
581 */
mptcp_pm_schedule_work(struct mptcp_sock * msk,enum mptcp_pm_status new_status)582 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
583 enum mptcp_pm_status new_status)
584 {
585 pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
586 BIT(new_status));
587 if (msk->pm.status & BIT(new_status))
588 return false;
589
590 msk->pm.status |= BIT(new_status);
591 mptcp_schedule_work((struct sock *)msk);
592 return true;
593 }
594
mptcp_pm_fully_established(struct mptcp_sock * msk,const struct sock * ssk)595 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
596 {
597 struct mptcp_pm_data *pm = &msk->pm;
598 bool announce = false;
599
600 pr_debug("msk=%p\n", msk);
601
602 spin_lock_bh(&pm->lock);
603
604 /* mptcp_pm_fully_established() can be invoked by multiple
605 * racing paths - accept() and check_fully_established()
606 * be sure to serve this event only once.
607 */
608 if (READ_ONCE(pm->work_pending) &&
609 !(pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
610 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
611
612 if ((pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
613 announce = true;
614
615 pm->status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
616 spin_unlock_bh(&pm->lock);
617
618 if (announce)
619 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC);
620 }
621
mptcp_pm_connection_closed(struct mptcp_sock * msk)622 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
623 {
624 pr_debug("msk=%p\n", msk);
625
626 if (msk->token)
627 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
628 }
629
mptcp_pm_subflow_established(struct mptcp_sock * msk)630 void mptcp_pm_subflow_established(struct mptcp_sock *msk)
631 {
632 struct mptcp_pm_data *pm = &msk->pm;
633
634 pr_debug("msk=%p\n", msk);
635
636 if (!READ_ONCE(pm->work_pending))
637 return;
638
639 spin_lock_bh(&pm->lock);
640
641 if (READ_ONCE(pm->work_pending))
642 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
643
644 spin_unlock_bh(&pm->lock);
645 }
646
mptcp_pm_subflow_check_next(struct mptcp_sock * msk,const struct mptcp_subflow_context * subflow)647 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk,
648 const struct mptcp_subflow_context *subflow)
649 {
650 struct sock *sk = (struct sock *)msk;
651 struct mptcp_pm_data *pm = &msk->pm;
652 bool update_subflows;
653
654 update_subflows = subflow->request_join || subflow->mp_join;
655 if (mptcp_pm_is_userspace(msk)) {
656 if (update_subflows) {
657 spin_lock_bh(&pm->lock);
658 pm->extra_subflows--;
659 spin_unlock_bh(&pm->lock);
660 }
661 return;
662 }
663
664 if (!READ_ONCE(pm->work_pending) && !update_subflows)
665 return;
666
667 spin_lock_bh(&pm->lock);
668 if (update_subflows)
669 __mptcp_pm_close_subflow(msk);
670
671 /* Even if this subflow is not really established, tell the PM to try
672 * to pick the next ones, if possible.
673 */
674 if (mptcp_is_fully_established(sk) &&
675 mptcp_pm_nl_check_work_pending(msk))
676 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
677
678 spin_unlock_bh(&pm->lock);
679 }
680
mptcp_pm_add_addr_received(const struct sock * ssk,const struct mptcp_addr_info * addr)681 void mptcp_pm_add_addr_received(const struct sock *ssk,
682 const struct mptcp_addr_info *addr)
683 {
684 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
685 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
686 struct mptcp_pm_data *pm = &msk->pm;
687
688 pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
689 READ_ONCE(pm->accept_addr));
690
691 mptcp_event_addr_announced(ssk, addr);
692
693 spin_lock_bh(&pm->lock);
694
695 if (mptcp_pm_is_userspace(msk)) {
696 if (mptcp_userspace_pm_active(msk)) {
697 mptcp_pm_announce_addr(msk, addr, true);
698 mptcp_pm_add_addr_send_ack(msk);
699 } else {
700 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
701 }
702 /* - id0 should not have a different address
703 * - special case for C-flag: linked to fill_local_addresses_vec()
704 */
705 } else if ((addr->id == 0 && !mptcp_pm_is_init_remote_addr(msk, addr)) ||
706 (addr->id > 0 && !READ_ONCE(pm->accept_addr) &&
707 !mptcp_pm_add_addr_c_flag_case(msk))) {
708 mptcp_pm_announce_addr(msk, addr, true);
709 mptcp_pm_add_addr_send_ack(msk);
710 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
711 pm->remote = *addr;
712 } else {
713 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
714 }
715
716 spin_unlock_bh(&pm->lock);
717 }
718
mptcp_pm_add_addr_echoed(struct mptcp_sock * msk,const struct mptcp_addr_info * addr)719 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
720 const struct mptcp_addr_info *addr)
721 {
722 struct mptcp_pm_data *pm = &msk->pm;
723
724 pr_debug("msk=%p\n", msk);
725
726 if (!READ_ONCE(pm->work_pending))
727 return;
728
729 spin_lock_bh(&pm->lock);
730
731 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
732 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
733
734 spin_unlock_bh(&pm->lock);
735 }
736
mptcp_pm_add_addr_send_ack(struct mptcp_sock * msk)737 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
738 {
739 if (!mptcp_pm_should_add_signal(msk))
740 return;
741
742 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
743 }
744
mptcp_pm_rm_addr_or_subflow(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list,enum linux_mptcp_mib_field rm_type)745 static void mptcp_pm_rm_addr_or_subflow(struct mptcp_sock *msk,
746 const struct mptcp_rm_list *rm_list,
747 enum linux_mptcp_mib_field rm_type)
748 {
749 struct mptcp_subflow_context *subflow, *tmp;
750 struct sock *sk = (struct sock *)msk;
751 u8 i;
752
753 pr_debug("%s rm_list_nr %d\n",
754 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
755
756 msk_owned_by_me(msk);
757
758 if (sk->sk_state == TCP_LISTEN)
759 return;
760
761 if (!rm_list->nr)
762 return;
763
764 if (list_empty(&msk->conn_list))
765 return;
766
767 for (i = 0; i < rm_list->nr; i++) {
768 u8 rm_id = rm_list->ids[i];
769 bool removed = false;
770
771 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
772 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
773 u8 remote_id = READ_ONCE(subflow->remote_id);
774 int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
775 u8 id = subflow_get_local_id(subflow);
776
777 if ((1 << inet_sk_state_load(ssk)) &
778 (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE))
779 continue;
780 if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
781 continue;
782 if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
783 continue;
784
785 pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
786 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
787 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
788 spin_unlock_bh(&msk->pm.lock);
789 mptcp_subflow_shutdown(sk, ssk, how);
790 removed |= subflow->request_join;
791
792 /* the following takes care of updating the subflows counter */
793 mptcp_close_ssk(sk, ssk, subflow);
794 spin_lock_bh(&msk->pm.lock);
795
796 if (rm_type == MPTCP_MIB_RMSUBFLOW)
797 __MPTCP_INC_STATS(sock_net(sk), rm_type);
798 }
799
800 if (rm_type == MPTCP_MIB_RMADDR) {
801 __MPTCP_INC_STATS(sock_net(sk), rm_type);
802 if (removed && mptcp_pm_is_kernel(msk))
803 mptcp_pm_nl_rm_addr(msk, rm_id);
804 }
805 }
806 }
807
mptcp_pm_rm_addr_recv(struct mptcp_sock * msk)808 static void mptcp_pm_rm_addr_recv(struct mptcp_sock *msk)
809 {
810 mptcp_pm_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
811 }
812
mptcp_pm_rm_subflow(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)813 void mptcp_pm_rm_subflow(struct mptcp_sock *msk,
814 const struct mptcp_rm_list *rm_list)
815 {
816 mptcp_pm_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
817 }
818
mptcp_pm_rm_addr_received(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)819 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
820 const struct mptcp_rm_list *rm_list)
821 {
822 struct mptcp_pm_data *pm = &msk->pm;
823 u8 i;
824
825 pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
826
827 for (i = 0; i < rm_list->nr; i++)
828 mptcp_event_addr_removed(msk, rm_list->ids[i]);
829
830 spin_lock_bh(&pm->lock);
831 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
832 pm->rm_list_rx = *rm_list;
833 else
834 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
835 spin_unlock_bh(&pm->lock);
836 }
837
mptcp_pm_mp_prio_received(struct sock * ssk,u8 bkup)838 void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
839 {
840 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
841 struct sock *sk = subflow->conn;
842 struct mptcp_sock *msk;
843
844 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
845 msk = mptcp_sk(sk);
846 if (subflow->backup != bkup)
847 subflow->backup = bkup;
848
849 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
850 }
851
mptcp_pm_mp_fail_received(struct sock * sk,u64 fail_seq)852 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
853 {
854 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
855 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
856
857 pr_debug("fail_seq=%llu\n", fail_seq);
858
859 /* After accepting the fail, we can't create any other subflows */
860 spin_lock_bh(&msk->fallback_lock);
861 if (!msk->allow_infinite_fallback) {
862 spin_unlock_bh(&msk->fallback_lock);
863 return;
864 }
865 msk->allow_subflows = false;
866 spin_unlock_bh(&msk->fallback_lock);
867
868 if (!subflow->fail_tout) {
869 pr_debug("send MP_FAIL response and infinite map\n");
870
871 subflow->send_mp_fail = 1;
872 subflow->send_infinite_map = 1;
873 tcp_send_ack(sk);
874 } else {
875 pr_debug("MP_FAIL response received\n");
876 WRITE_ONCE(subflow->fail_tout, 0);
877 }
878 }
879
mptcp_pm_add_addr_signal(struct mptcp_sock * msk,const struct sk_buff * skb,unsigned int opt_size,unsigned int remaining,struct mptcp_addr_info * addr,bool * echo,bool * drop_other_suboptions)880 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
881 unsigned int opt_size, unsigned int remaining,
882 struct mptcp_addr_info *addr, bool *echo,
883 bool *drop_other_suboptions)
884 {
885 int ret = false;
886 u8 add_addr;
887 u8 family;
888 bool port;
889
890 spin_lock_bh(&msk->pm.lock);
891
892 /* double check after the lock is acquired */
893 if (!mptcp_pm_should_add_signal(msk))
894 goto out_unlock;
895
896 /* always drop every other options for pure ack ADD_ADDR; this is a
897 * plain dup-ack from TCP perspective. The other MPTCP-relevant info,
898 * if any, will be carried by the 'original' TCP ack
899 */
900 if (skb && skb_is_tcp_pure_ack(skb)) {
901 remaining += opt_size;
902 *drop_other_suboptions = true;
903 }
904
905 *echo = mptcp_pm_should_add_signal_echo(msk);
906 port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
907
908 family = *echo ? msk->pm.remote.family : msk->pm.local.family;
909 if (remaining < mptcp_add_addr_len(family, *echo, port))
910 goto out_unlock;
911
912 if (*echo) {
913 *addr = msk->pm.remote;
914 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
915 } else {
916 *addr = msk->pm.local;
917 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
918 }
919 WRITE_ONCE(msk->pm.addr_signal, add_addr);
920 ret = true;
921
922 out_unlock:
923 spin_unlock_bh(&msk->pm.lock);
924 return ret;
925 }
926
mptcp_pm_rm_addr_signal(struct mptcp_sock * msk,unsigned int remaining,struct mptcp_rm_list * rm_list)927 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
928 struct mptcp_rm_list *rm_list)
929 {
930 int ret = false, len;
931 u8 rm_addr;
932
933 spin_lock_bh(&msk->pm.lock);
934
935 /* double check after the lock is acquired */
936 if (!mptcp_pm_should_rm_signal(msk))
937 goto out_unlock;
938
939 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
940 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
941 if (len < 0) {
942 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
943 goto out_unlock;
944 }
945 if (remaining < len)
946 goto out_unlock;
947
948 *rm_list = msk->pm.rm_list_tx;
949 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
950 ret = true;
951
952 out_unlock:
953 spin_unlock_bh(&msk->pm.lock);
954 return ret;
955 }
956
mptcp_pm_get_local_id(struct mptcp_sock * msk,struct sock_common * skc)957 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
958 {
959 struct mptcp_pm_addr_entry skc_local = { 0 };
960 struct mptcp_addr_info msk_local;
961
962 if (WARN_ON_ONCE(!msk))
963 return -1;
964
965 /* The 0 ID mapping is defined by the first subflow, copied into the msk
966 * addr
967 */
968 mptcp_local_address((struct sock_common *)msk, &msk_local);
969 mptcp_local_address((struct sock_common *)skc, &skc_local.addr);
970 if (mptcp_addresses_equal(&msk_local, &skc_local.addr, false))
971 return 0;
972
973 skc_local.addr.id = 0;
974 skc_local.flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
975
976 if (mptcp_pm_is_userspace(msk))
977 return mptcp_userspace_pm_get_local_id(msk, &skc_local);
978 return mptcp_pm_nl_get_local_id(msk, &skc_local);
979 }
980
mptcp_pm_is_backup(struct mptcp_sock * msk,struct sock_common * skc)981 bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
982 {
983 struct mptcp_addr_info skc_local;
984
985 mptcp_local_address((struct sock_common *)skc, &skc_local);
986
987 if (mptcp_pm_is_userspace(msk))
988 return mptcp_userspace_pm_is_backup(msk, &skc_local);
989
990 return mptcp_pm_nl_is_backup(msk, &skc_local);
991 }
992
mptcp_pm_subflows_chk_stale(const struct mptcp_sock * msk,struct sock * ssk)993 static void mptcp_pm_subflows_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
994 {
995 struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
996 struct sock *sk = (struct sock *)msk;
997 unsigned int active_max_loss_cnt;
998 struct net *net = sock_net(sk);
999 unsigned int stale_loss_cnt;
1000 bool slow;
1001
1002 stale_loss_cnt = mptcp_stale_loss_cnt(net);
1003 if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt)
1004 return;
1005
1006 /* look for another available subflow not in loss state */
1007 active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1);
1008 mptcp_for_each_subflow(msk, iter) {
1009 if (iter != subflow && mptcp_subflow_active(iter) &&
1010 iter->stale_count < active_max_loss_cnt) {
1011 /* we have some alternatives, try to mark this subflow as idle ...*/
1012 slow = lock_sock_fast(ssk);
1013 if (!tcp_rtx_and_write_queues_empty(ssk)) {
1014 subflow->stale = 1;
1015 __mptcp_retransmit_pending_data(sk);
1016 MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE);
1017 }
1018 unlock_sock_fast(ssk, slow);
1019
1020 /* always try to push the pending data regardless of re-injections:
1021 * we can possibly use backup subflows now, and subflow selection
1022 * is cheap under the msk socket lock
1023 */
1024 __mptcp_push_pending(sk, 0);
1025 return;
1026 }
1027 }
1028 }
1029
mptcp_pm_subflow_chk_stale(const struct mptcp_sock * msk,struct sock * ssk)1030 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
1031 {
1032 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1033 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
1034
1035 /* keep track of rtx periods with no progress */
1036 if (!subflow->stale_count) {
1037 subflow->stale_rcv_tstamp = rcv_tstamp;
1038 subflow->stale_count++;
1039 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
1040 if (subflow->stale_count < U8_MAX)
1041 subflow->stale_count++;
1042 mptcp_pm_subflows_chk_stale(msk, ssk);
1043 } else {
1044 subflow->stale_count = 0;
1045 mptcp_subflow_set_active(subflow);
1046 }
1047 }
1048
mptcp_pm_worker(struct mptcp_sock * msk)1049 void mptcp_pm_worker(struct mptcp_sock *msk)
1050 {
1051 struct mptcp_pm_data *pm = &msk->pm;
1052
1053 msk_owned_by_me(msk);
1054
1055 if (!(pm->status & MPTCP_PM_WORK_MASK))
1056 return;
1057
1058 spin_lock_bh(&msk->pm.lock);
1059
1060 pr_debug("msk=%p status=%x\n", msk, pm->status);
1061 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
1062 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
1063 mptcp_pm_addr_send_ack(msk);
1064 }
1065 if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
1066 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
1067 mptcp_pm_rm_addr_recv(msk);
1068 }
1069 __mptcp_pm_kernel_worker(msk);
1070
1071 spin_unlock_bh(&msk->pm.lock);
1072 }
1073
mptcp_pm_destroy(struct mptcp_sock * msk)1074 void mptcp_pm_destroy(struct mptcp_sock *msk)
1075 {
1076 mptcp_pm_free_anno_list(msk);
1077
1078 if (mptcp_pm_is_userspace(msk))
1079 mptcp_userspace_pm_free_local_addr_list(msk);
1080 }
1081
mptcp_pm_data_reset(struct mptcp_sock * msk)1082 void mptcp_pm_data_reset(struct mptcp_sock *msk)
1083 {
1084 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
1085 struct mptcp_pm_data *pm = &msk->pm;
1086
1087 memset(&pm->reset, 0, sizeof(pm->reset));
1088 pm->rm_list_tx.nr = 0;
1089 pm->rm_list_rx.nr = 0;
1090 WRITE_ONCE(pm->pm_type, pm_type);
1091
1092 if (pm_type == MPTCP_PM_TYPE_KERNEL) {
1093 bool subflows_allowed = !!mptcp_pm_get_limit_extra_subflows(msk);
1094
1095 /* pm->work_pending must be only be set to 'true' when
1096 * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL
1097 */
1098 WRITE_ONCE(pm->work_pending,
1099 (!!mptcp_pm_get_endp_subflow_max(msk) &&
1100 subflows_allowed) ||
1101 !!mptcp_pm_get_endp_signal_max(msk));
1102 WRITE_ONCE(pm->accept_addr,
1103 !!mptcp_pm_get_limit_add_addr_accepted(msk) &&
1104 subflows_allowed);
1105 WRITE_ONCE(pm->accept_subflow, subflows_allowed);
1106
1107 bitmap_fill(pm->id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
1108 }
1109 }
1110
mptcp_pm_data_init(struct mptcp_sock * msk)1111 void mptcp_pm_data_init(struct mptcp_sock *msk)
1112 {
1113 spin_lock_init(&msk->pm.lock);
1114 INIT_LIST_HEAD(&msk->pm.anno_list);
1115 INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list);
1116 mptcp_pm_data_reset(msk);
1117 }
1118
mptcp_pm_init(void)1119 void __init mptcp_pm_init(void)
1120 {
1121 mptcp_pm_kernel_register();
1122 mptcp_pm_userspace_register();
1123 mptcp_pm_nl_init();
1124 }
1125
1126 /* Must be called with rcu read lock held */
mptcp_pm_find(const char * name)1127 struct mptcp_pm_ops *mptcp_pm_find(const char *name)
1128 {
1129 struct mptcp_pm_ops *pm_ops;
1130
1131 list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) {
1132 if (!strcmp(pm_ops->name, name))
1133 return pm_ops;
1134 }
1135
1136 return NULL;
1137 }
1138
mptcp_pm_validate(struct mptcp_pm_ops * pm_ops)1139 int mptcp_pm_validate(struct mptcp_pm_ops *pm_ops)
1140 {
1141 return 0;
1142 }
1143
mptcp_pm_register(struct mptcp_pm_ops * pm_ops)1144 int mptcp_pm_register(struct mptcp_pm_ops *pm_ops)
1145 {
1146 int ret;
1147
1148 ret = mptcp_pm_validate(pm_ops);
1149 if (ret)
1150 return ret;
1151
1152 spin_lock(&mptcp_pm_list_lock);
1153 if (mptcp_pm_find(pm_ops->name)) {
1154 spin_unlock(&mptcp_pm_list_lock);
1155 return -EEXIST;
1156 }
1157 list_add_tail_rcu(&pm_ops->list, &mptcp_pm_list);
1158 spin_unlock(&mptcp_pm_list_lock);
1159
1160 pr_debug("%s registered\n", pm_ops->name);
1161 return 0;
1162 }
1163
mptcp_pm_unregister(struct mptcp_pm_ops * pm_ops)1164 void mptcp_pm_unregister(struct mptcp_pm_ops *pm_ops)
1165 {
1166 /* skip unregistering the default path manager */
1167 if (WARN_ON_ONCE(pm_ops == &mptcp_pm_kernel))
1168 return;
1169
1170 spin_lock(&mptcp_pm_list_lock);
1171 list_del_rcu(&pm_ops->list);
1172 spin_unlock(&mptcp_pm_list_lock);
1173 }
1174
1175 /* Build string with list of available path manager values.
1176 * Similar to tcp_get_available_congestion_control()
1177 */
mptcp_pm_get_available(char * buf,size_t maxlen)1178 void mptcp_pm_get_available(char *buf, size_t maxlen)
1179 {
1180 struct mptcp_pm_ops *pm_ops;
1181 size_t offs = 0;
1182
1183 rcu_read_lock();
1184 list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) {
1185 offs += snprintf(buf + offs, maxlen - offs, "%s%s",
1186 offs == 0 ? "" : " ", pm_ops->name);
1187
1188 if (WARN_ON_ONCE(offs >= maxlen))
1189 break;
1190 }
1191 rcu_read_unlock();
1192 }
1193