xref: /linux/net/mptcp/pm.c (revision 8e621c9a337555c914cf1664605edfaa6f839774)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2019, Intel Corporation.
5  */
6 #define pr_fmt(fmt) "MPTCP: " fmt
7 
8 #include <linux/rculist.h>
9 #include <linux/spinlock.h>
10 #include "protocol.h"
11 #include "mib.h"
12 
13 #define ADD_ADDR_RETRANS_MAX	3
14 
15 struct mptcp_pm_add_entry {
16 	struct list_head	list;
17 	struct mptcp_addr_info	addr;
18 	u8			retrans_times;
19 	struct timer_list	add_timer;
20 	struct mptcp_sock	*sock;
21 	struct rcu_head		rcu;
22 };
23 
24 static DEFINE_SPINLOCK(mptcp_pm_list_lock);
25 static LIST_HEAD(mptcp_pm_list);
26 
27 /* path manager helpers */
28 
29 /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
30  * otherwise allow any matching local/remote pair
31  */
mptcp_pm_addr_families_match(const struct sock * sk,const struct mptcp_addr_info * loc,const struct mptcp_addr_info * rem)32 bool mptcp_pm_addr_families_match(const struct sock *sk,
33 				  const struct mptcp_addr_info *loc,
34 				  const struct mptcp_addr_info *rem)
35 {
36 	bool mptcp_is_v4 = sk->sk_family == AF_INET;
37 
38 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
39 	bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6);
40 	bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6);
41 
42 	if (mptcp_is_v4)
43 		return loc_is_v4 && rem_is_v4;
44 
45 	if (ipv6_only_sock(sk))
46 		return !loc_is_v4 && !rem_is_v4;
47 
48 	return loc_is_v4 == rem_is_v4;
49 #else
50 	return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET;
51 #endif
52 }
53 
mptcp_addresses_equal(const struct mptcp_addr_info * a,const struct mptcp_addr_info * b,bool use_port)54 bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
55 			   const struct mptcp_addr_info *b, bool use_port)
56 {
57 	bool addr_equals = false;
58 
59 	if (a->family == b->family) {
60 		if (a->family == AF_INET)
61 			addr_equals = a->addr.s_addr == b->addr.s_addr;
62 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
63 		else
64 			addr_equals = ipv6_addr_equal(&a->addr6, &b->addr6);
65 	} else if (a->family == AF_INET) {
66 		if (ipv6_addr_v4mapped(&b->addr6))
67 			addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3];
68 	} else if (b->family == AF_INET) {
69 		if (ipv6_addr_v4mapped(&a->addr6))
70 			addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr;
71 #endif
72 	}
73 
74 	if (!addr_equals)
75 		return false;
76 	if (!use_port)
77 		return true;
78 
79 	return a->port == b->port;
80 }
81 
mptcp_local_address(const struct sock_common * skc,struct mptcp_addr_info * addr)82 void mptcp_local_address(const struct sock_common *skc,
83 			 struct mptcp_addr_info *addr)
84 {
85 	addr->family = skc->skc_family;
86 	addr->port = htons(skc->skc_num);
87 	if (addr->family == AF_INET)
88 		addr->addr.s_addr = skc->skc_rcv_saddr;
89 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
90 	else if (addr->family == AF_INET6)
91 		addr->addr6 = skc->skc_v6_rcv_saddr;
92 #endif
93 }
94 
mptcp_remote_address(const struct sock_common * skc,struct mptcp_addr_info * addr)95 void mptcp_remote_address(const struct sock_common *skc,
96 			  struct mptcp_addr_info *addr)
97 {
98 	addr->family = skc->skc_family;
99 	addr->port = skc->skc_dport;
100 	if (addr->family == AF_INET)
101 		addr->addr.s_addr = skc->skc_daddr;
102 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
103 	else if (addr->family == AF_INET6)
104 		addr->addr6 = skc->skc_v6_daddr;
105 #endif
106 }
107 
mptcp_pm_is_init_remote_addr(struct mptcp_sock * msk,const struct mptcp_addr_info * remote)108 static bool mptcp_pm_is_init_remote_addr(struct mptcp_sock *msk,
109 					 const struct mptcp_addr_info *remote)
110 {
111 	struct mptcp_addr_info mpc_remote;
112 
113 	mptcp_remote_address((struct sock_common *)msk, &mpc_remote);
114 	return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
115 }
116 
mptcp_lookup_subflow_by_saddr(const struct list_head * list,const struct mptcp_addr_info * saddr)117 bool mptcp_lookup_subflow_by_saddr(const struct list_head *list,
118 				   const struct mptcp_addr_info *saddr)
119 {
120 	struct mptcp_subflow_context *subflow;
121 	struct mptcp_addr_info cur;
122 	struct sock_common *skc;
123 
124 	list_for_each_entry(subflow, list, node) {
125 		skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
126 
127 		mptcp_local_address(skc, &cur);
128 		if (mptcp_addresses_equal(&cur, saddr, saddr->port))
129 			return true;
130 	}
131 
132 	return false;
133 }
134 
135 static struct mptcp_pm_add_entry *
mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock * msk,const struct mptcp_addr_info * addr)136 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
137 				const struct mptcp_addr_info *addr)
138 {
139 	struct mptcp_pm_add_entry *entry;
140 
141 	lockdep_assert_held(&msk->pm.lock);
142 
143 	list_for_each_entry(entry, &msk->pm.anno_list, list) {
144 		if (mptcp_addresses_equal(&entry->addr, addr, true))
145 			return entry;
146 	}
147 
148 	return NULL;
149 }
150 
mptcp_remove_anno_list_by_saddr(struct mptcp_sock * msk,const struct mptcp_addr_info * addr)151 bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk,
152 				     const struct mptcp_addr_info *addr)
153 {
154 	struct mptcp_pm_add_entry *entry;
155 	bool ret;
156 
157 	entry = mptcp_pm_del_add_timer(msk, addr, false);
158 	ret = entry;
159 	kfree_rcu(entry, rcu);
160 
161 	return ret;
162 }
163 
mptcp_pm_sport_in_anno_list(struct mptcp_sock * msk,const struct sock * sk)164 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
165 {
166 	struct mptcp_pm_add_entry *entry;
167 	struct mptcp_addr_info saddr;
168 	bool ret = false;
169 
170 	mptcp_local_address((struct sock_common *)sk, &saddr);
171 
172 	spin_lock_bh(&msk->pm.lock);
173 	list_for_each_entry(entry, &msk->pm.anno_list, list) {
174 		if (mptcp_addresses_equal(&entry->addr, &saddr, true)) {
175 			ret = true;
176 			goto out;
177 		}
178 	}
179 
180 out:
181 	spin_unlock_bh(&msk->pm.lock);
182 	return ret;
183 }
184 
__mptcp_pm_send_ack(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,bool prio,bool backup)185 static void __mptcp_pm_send_ack(struct mptcp_sock *msk,
186 				struct mptcp_subflow_context *subflow,
187 				bool prio, bool backup)
188 {
189 	struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
190 	bool slow;
191 
192 	pr_debug("send ack for %s\n",
193 		 prio ? "mp_prio" :
194 		 (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
195 
196 	slow = lock_sock_fast(ssk);
197 	if (prio) {
198 		subflow->send_mp_prio = 1;
199 		subflow->request_bkup = backup;
200 	}
201 
202 	__mptcp_subflow_send_ack(ssk);
203 	unlock_sock_fast(ssk, slow);
204 }
205 
mptcp_pm_send_ack(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,bool prio,bool backup)206 void mptcp_pm_send_ack(struct mptcp_sock *msk,
207 		       struct mptcp_subflow_context *subflow,
208 		       bool prio, bool backup)
209 {
210 	spin_unlock_bh(&msk->pm.lock);
211 	__mptcp_pm_send_ack(msk, subflow, prio, backup);
212 	spin_lock_bh(&msk->pm.lock);
213 }
214 
mptcp_pm_addr_send_ack(struct mptcp_sock * msk)215 void mptcp_pm_addr_send_ack(struct mptcp_sock *msk)
216 {
217 	struct mptcp_subflow_context *subflow, *alt = NULL;
218 
219 	msk_owned_by_me(msk);
220 	lockdep_assert_held(&msk->pm.lock);
221 
222 	if (!mptcp_pm_should_add_signal(msk) &&
223 	    !mptcp_pm_should_rm_signal(msk))
224 		return;
225 
226 	mptcp_for_each_subflow(msk, subflow) {
227 		if (__mptcp_subflow_active(subflow)) {
228 			if (!subflow->stale) {
229 				mptcp_pm_send_ack(msk, subflow, false, false);
230 				return;
231 			}
232 
233 			if (!alt)
234 				alt = subflow;
235 		}
236 	}
237 
238 	if (alt)
239 		mptcp_pm_send_ack(msk, alt, false, false);
240 }
241 
mptcp_pm_mp_prio_send_ack(struct mptcp_sock * msk,struct mptcp_addr_info * addr,struct mptcp_addr_info * rem,u8 bkup)242 int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk,
243 			      struct mptcp_addr_info *addr,
244 			      struct mptcp_addr_info *rem,
245 			      u8 bkup)
246 {
247 	struct mptcp_subflow_context *subflow;
248 
249 	pr_debug("bkup=%d\n", bkup);
250 
251 	mptcp_for_each_subflow(msk, subflow) {
252 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
253 		struct mptcp_addr_info local, remote;
254 
255 		mptcp_local_address((struct sock_common *)ssk, &local);
256 		if (!mptcp_addresses_equal(&local, addr, addr->port))
257 			continue;
258 
259 		if (rem && rem->family != AF_UNSPEC) {
260 			mptcp_remote_address((struct sock_common *)ssk, &remote);
261 			if (!mptcp_addresses_equal(&remote, rem, rem->port))
262 				continue;
263 		}
264 
265 		__mptcp_pm_send_ack(msk, subflow, true, bkup);
266 		return 0;
267 	}
268 
269 	return -EINVAL;
270 }
271 
mptcp_adjust_add_addr_timeout(struct mptcp_sock * msk)272 static unsigned int mptcp_adjust_add_addr_timeout(struct mptcp_sock *msk)
273 {
274 	const struct net *net = sock_net((struct sock *)msk);
275 	unsigned int rto = mptcp_get_add_addr_timeout(net);
276 	struct mptcp_subflow_context *subflow;
277 	unsigned int max = 0;
278 
279 	mptcp_for_each_subflow(msk, subflow) {
280 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
281 		struct inet_connection_sock *icsk = inet_csk(ssk);
282 
283 		if (icsk->icsk_rto > max)
284 			max = icsk->icsk_rto;
285 	}
286 
287 	if (max && max < rto)
288 		rto = max;
289 
290 	return rto;
291 }
292 
mptcp_pm_add_timer(struct timer_list * timer)293 static void mptcp_pm_add_timer(struct timer_list *timer)
294 {
295 	struct mptcp_pm_add_entry *entry = timer_container_of(entry, timer,
296 							      add_timer);
297 	struct mptcp_sock *msk = entry->sock;
298 	struct sock *sk = (struct sock *)msk;
299 	unsigned int timeout;
300 
301 	pr_debug("msk=%p\n", msk);
302 
303 	if (!msk)
304 		return;
305 
306 	if (inet_sk_state_load(sk) == TCP_CLOSE)
307 		return;
308 
309 	if (!entry->addr.id)
310 		return;
311 
312 	if (mptcp_pm_should_add_signal_addr(msk)) {
313 		sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8);
314 		goto out;
315 	}
316 
317 	timeout = mptcp_adjust_add_addr_timeout(msk);
318 	if (!timeout)
319 		goto out;
320 
321 	spin_lock_bh(&msk->pm.lock);
322 
323 	if (!mptcp_pm_should_add_signal_addr(msk)) {
324 		pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
325 		mptcp_pm_announce_addr(msk, &entry->addr, false);
326 		mptcp_pm_add_addr_send_ack(msk);
327 		entry->retrans_times++;
328 	}
329 
330 	if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
331 		sk_reset_timer(sk, timer,
332 			       jiffies + (timeout << entry->retrans_times));
333 
334 	spin_unlock_bh(&msk->pm.lock);
335 
336 	if (entry->retrans_times == ADD_ADDR_RETRANS_MAX)
337 		mptcp_pm_subflow_established(msk);
338 
339 out:
340 	__sock_put(sk);
341 }
342 
343 struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock * msk,const struct mptcp_addr_info * addr,bool check_id)344 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
345 		       const struct mptcp_addr_info *addr, bool check_id)
346 {
347 	struct mptcp_pm_add_entry *entry;
348 	struct sock *sk = (struct sock *)msk;
349 	bool stop_timer = false;
350 
351 	rcu_read_lock();
352 
353 	spin_lock_bh(&msk->pm.lock);
354 	entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
355 	if (entry && (!check_id || entry->addr.id == addr->id)) {
356 		entry->retrans_times = ADD_ADDR_RETRANS_MAX;
357 		stop_timer = true;
358 	}
359 	if (!check_id && entry)
360 		list_del(&entry->list);
361 	spin_unlock_bh(&msk->pm.lock);
362 
363 	/* Note: entry might have been removed by another thread.
364 	 * We hold rcu_read_lock() to ensure it is not freed under us.
365 	 */
366 	if (stop_timer)
367 		sk_stop_timer_sync(sk, &entry->add_timer);
368 
369 	rcu_read_unlock();
370 	return entry;
371 }
372 
mptcp_pm_alloc_anno_list(struct mptcp_sock * msk,const struct mptcp_addr_info * addr)373 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
374 			      const struct mptcp_addr_info *addr)
375 {
376 	struct mptcp_pm_add_entry *add_entry = NULL;
377 	struct sock *sk = (struct sock *)msk;
378 	unsigned int timeout;
379 
380 	lockdep_assert_held(&msk->pm.lock);
381 
382 	add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
383 
384 	if (add_entry) {
385 		if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
386 			return false;
387 
388 		goto reset_timer;
389 	}
390 
391 	add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
392 	if (!add_entry)
393 		return false;
394 
395 	list_add(&add_entry->list, &msk->pm.anno_list);
396 
397 	add_entry->addr = *addr;
398 	add_entry->sock = msk;
399 	add_entry->retrans_times = 0;
400 
401 	timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
402 reset_timer:
403 	timeout = mptcp_adjust_add_addr_timeout(msk);
404 	if (timeout)
405 		sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout);
406 
407 	return true;
408 }
409 
mptcp_pm_free_anno_list(struct mptcp_sock * msk)410 static void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
411 {
412 	struct mptcp_pm_add_entry *entry, *tmp;
413 	struct sock *sk = (struct sock *)msk;
414 	LIST_HEAD(free_list);
415 
416 	pr_debug("msk=%p\n", msk);
417 
418 	spin_lock_bh(&msk->pm.lock);
419 	list_splice_init(&msk->pm.anno_list, &free_list);
420 	spin_unlock_bh(&msk->pm.lock);
421 
422 	list_for_each_entry_safe(entry, tmp, &free_list, list) {
423 		sk_stop_timer_sync(sk, &entry->add_timer);
424 		kfree_rcu(entry, rcu);
425 	}
426 }
427 
428 /* path manager command handlers */
429 
mptcp_pm_announce_addr(struct mptcp_sock * msk,const struct mptcp_addr_info * addr,bool echo)430 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
431 			   const struct mptcp_addr_info *addr,
432 			   bool echo)
433 {
434 	u8 add_addr = READ_ONCE(msk->pm.addr_signal);
435 
436 	pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
437 
438 	lockdep_assert_held(&msk->pm.lock);
439 
440 	if (add_addr &
441 	    (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) {
442 		MPTCP_INC_STATS(sock_net((struct sock *)msk),
443 				echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP);
444 		return -EINVAL;
445 	}
446 
447 	if (echo) {
448 		msk->pm.remote = *addr;
449 		add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
450 	} else {
451 		msk->pm.local = *addr;
452 		add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
453 	}
454 	WRITE_ONCE(msk->pm.addr_signal, add_addr);
455 	return 0;
456 }
457 
mptcp_pm_remove_addr(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)458 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
459 {
460 	u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
461 
462 	pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
463 
464 	if (rm_addr) {
465 		MPTCP_ADD_STATS(sock_net((struct sock *)msk),
466 				MPTCP_MIB_RMADDRTXDROP, rm_list->nr);
467 		return -EINVAL;
468 	}
469 
470 	msk->pm.rm_list_tx = *rm_list;
471 	rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
472 	WRITE_ONCE(msk->pm.addr_signal, rm_addr);
473 	mptcp_pm_addr_send_ack(msk);
474 	return 0;
475 }
476 
477 /* path manager event handlers */
478 
mptcp_pm_new_connection(struct mptcp_sock * msk,const struct sock * ssk,int server_side)479 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
480 {
481 	struct mptcp_pm_data *pm = &msk->pm;
482 
483 	pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side);
484 
485 	WRITE_ONCE(pm->server_side, server_side);
486 	mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
487 }
488 
mptcp_pm_allow_new_subflow(struct mptcp_sock * msk)489 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
490 {
491 	struct mptcp_pm_data *pm = &msk->pm;
492 	unsigned int limit_extra_subflows;
493 	int ret = 0;
494 
495 	if (mptcp_pm_is_userspace(msk)) {
496 		if (mptcp_userspace_pm_active(msk)) {
497 			spin_lock_bh(&pm->lock);
498 			pm->extra_subflows++;
499 			spin_unlock_bh(&pm->lock);
500 			return true;
501 		}
502 		return false;
503 	}
504 
505 	limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
506 
507 	pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk,
508 		 pm->extra_subflows, limit_extra_subflows,
509 		 READ_ONCE(pm->accept_subflow));
510 
511 	/* try to avoid acquiring the lock below */
512 	if (!READ_ONCE(pm->accept_subflow))
513 		return false;
514 
515 	spin_lock_bh(&pm->lock);
516 	if (READ_ONCE(pm->accept_subflow)) {
517 		ret = pm->extra_subflows < limit_extra_subflows;
518 		if (ret && ++pm->extra_subflows == limit_extra_subflows)
519 			WRITE_ONCE(pm->accept_subflow, false);
520 	}
521 	spin_unlock_bh(&pm->lock);
522 
523 	return ret;
524 }
525 
526 /* return true if the new status bit is currently cleared, that is, this event
527  * can be server, eventually by an already scheduled work
528  */
mptcp_pm_schedule_work(struct mptcp_sock * msk,enum mptcp_pm_status new_status)529 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
530 				   enum mptcp_pm_status new_status)
531 {
532 	pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
533 		 BIT(new_status));
534 	if (msk->pm.status & BIT(new_status))
535 		return false;
536 
537 	msk->pm.status |= BIT(new_status);
538 	mptcp_schedule_work((struct sock *)msk);
539 	return true;
540 }
541 
mptcp_pm_fully_established(struct mptcp_sock * msk,const struct sock * ssk)542 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
543 {
544 	struct mptcp_pm_data *pm = &msk->pm;
545 	bool announce = false;
546 
547 	pr_debug("msk=%p\n", msk);
548 
549 	spin_lock_bh(&pm->lock);
550 
551 	/* mptcp_pm_fully_established() can be invoked by multiple
552 	 * racing paths - accept() and check_fully_established()
553 	 * be sure to serve this event only once.
554 	 */
555 	if (READ_ONCE(pm->work_pending) &&
556 	    !(pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
557 		mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
558 
559 	if ((pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0)
560 		announce = true;
561 
562 	pm->status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
563 	spin_unlock_bh(&pm->lock);
564 
565 	if (announce)
566 		mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC);
567 }
568 
mptcp_pm_connection_closed(struct mptcp_sock * msk)569 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
570 {
571 	pr_debug("msk=%p\n", msk);
572 
573 	if (msk->token)
574 		mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
575 }
576 
mptcp_pm_subflow_established(struct mptcp_sock * msk)577 void mptcp_pm_subflow_established(struct mptcp_sock *msk)
578 {
579 	struct mptcp_pm_data *pm = &msk->pm;
580 
581 	pr_debug("msk=%p\n", msk);
582 
583 	if (!READ_ONCE(pm->work_pending))
584 		return;
585 
586 	spin_lock_bh(&pm->lock);
587 
588 	if (READ_ONCE(pm->work_pending))
589 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
590 
591 	spin_unlock_bh(&pm->lock);
592 }
593 
mptcp_pm_subflow_check_next(struct mptcp_sock * msk,const struct mptcp_subflow_context * subflow)594 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk,
595 				 const struct mptcp_subflow_context *subflow)
596 {
597 	struct mptcp_pm_data *pm = &msk->pm;
598 	bool update_subflows;
599 
600 	update_subflows = subflow->request_join || subflow->mp_join;
601 	if (mptcp_pm_is_userspace(msk)) {
602 		if (update_subflows) {
603 			spin_lock_bh(&pm->lock);
604 			pm->extra_subflows--;
605 			spin_unlock_bh(&pm->lock);
606 		}
607 		return;
608 	}
609 
610 	if (!READ_ONCE(pm->work_pending) && !update_subflows)
611 		return;
612 
613 	spin_lock_bh(&pm->lock);
614 	if (update_subflows)
615 		__mptcp_pm_close_subflow(msk);
616 
617 	/* Even if this subflow is not really established, tell the PM to try
618 	 * to pick the next ones, if possible.
619 	 */
620 	if (mptcp_pm_nl_check_work_pending(msk))
621 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
622 
623 	spin_unlock_bh(&pm->lock);
624 }
625 
mptcp_pm_add_addr_received(const struct sock * ssk,const struct mptcp_addr_info * addr)626 void mptcp_pm_add_addr_received(const struct sock *ssk,
627 				const struct mptcp_addr_info *addr)
628 {
629 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
630 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
631 	struct mptcp_pm_data *pm = &msk->pm;
632 
633 	pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
634 		 READ_ONCE(pm->accept_addr));
635 
636 	mptcp_event_addr_announced(ssk, addr);
637 
638 	spin_lock_bh(&pm->lock);
639 
640 	if (mptcp_pm_is_userspace(msk)) {
641 		if (mptcp_userspace_pm_active(msk)) {
642 			mptcp_pm_announce_addr(msk, addr, true);
643 			mptcp_pm_add_addr_send_ack(msk);
644 		} else {
645 			__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
646 		}
647 	/* - id0 should not have a different address
648 	 * - special case for C-flag: linked to fill_local_addresses_vec()
649 	 */
650 	} else if ((addr->id == 0 && !mptcp_pm_is_init_remote_addr(msk, addr)) ||
651 		   (addr->id > 0 && !READ_ONCE(pm->accept_addr) &&
652 		    !mptcp_pm_add_addr_c_flag_case(msk))) {
653 		mptcp_pm_announce_addr(msk, addr, true);
654 		mptcp_pm_add_addr_send_ack(msk);
655 	} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
656 		pm->remote = *addr;
657 	} else {
658 		__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
659 	}
660 
661 	spin_unlock_bh(&pm->lock);
662 }
663 
mptcp_pm_add_addr_echoed(struct mptcp_sock * msk,const struct mptcp_addr_info * addr)664 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
665 			      const struct mptcp_addr_info *addr)
666 {
667 	struct mptcp_pm_data *pm = &msk->pm;
668 
669 	pr_debug("msk=%p\n", msk);
670 
671 	if (!READ_ONCE(pm->work_pending))
672 		return;
673 
674 	spin_lock_bh(&pm->lock);
675 
676 	if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
677 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
678 
679 	spin_unlock_bh(&pm->lock);
680 }
681 
mptcp_pm_add_addr_send_ack(struct mptcp_sock * msk)682 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
683 {
684 	if (!mptcp_pm_should_add_signal(msk))
685 		return;
686 
687 	mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
688 }
689 
mptcp_pm_rm_addr_or_subflow(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list,enum linux_mptcp_mib_field rm_type)690 static void mptcp_pm_rm_addr_or_subflow(struct mptcp_sock *msk,
691 					const struct mptcp_rm_list *rm_list,
692 					enum linux_mptcp_mib_field rm_type)
693 {
694 	struct mptcp_subflow_context *subflow, *tmp;
695 	struct sock *sk = (struct sock *)msk;
696 	u8 i;
697 
698 	pr_debug("%s rm_list_nr %d\n",
699 		 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
700 
701 	msk_owned_by_me(msk);
702 
703 	if (sk->sk_state == TCP_LISTEN)
704 		return;
705 
706 	if (!rm_list->nr)
707 		return;
708 
709 	if (list_empty(&msk->conn_list))
710 		return;
711 
712 	for (i = 0; i < rm_list->nr; i++) {
713 		u8 rm_id = rm_list->ids[i];
714 		bool removed = false;
715 
716 		mptcp_for_each_subflow_safe(msk, subflow, tmp) {
717 			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
718 			u8 remote_id = READ_ONCE(subflow->remote_id);
719 			int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
720 			u8 id = subflow_get_local_id(subflow);
721 
722 			if ((1 << inet_sk_state_load(ssk)) &
723 			    (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE))
724 				continue;
725 			if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
726 				continue;
727 			if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
728 				continue;
729 
730 			pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
731 				 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
732 				 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
733 			spin_unlock_bh(&msk->pm.lock);
734 			mptcp_subflow_shutdown(sk, ssk, how);
735 			removed |= subflow->request_join;
736 
737 			/* the following takes care of updating the subflows counter */
738 			mptcp_close_ssk(sk, ssk, subflow);
739 			spin_lock_bh(&msk->pm.lock);
740 
741 			if (rm_type == MPTCP_MIB_RMSUBFLOW)
742 				__MPTCP_INC_STATS(sock_net(sk), rm_type);
743 		}
744 
745 		if (rm_type == MPTCP_MIB_RMADDR) {
746 			__MPTCP_INC_STATS(sock_net(sk), rm_type);
747 			if (removed && mptcp_pm_is_kernel(msk))
748 				mptcp_pm_nl_rm_addr(msk, rm_id);
749 		}
750 	}
751 }
752 
mptcp_pm_rm_addr_recv(struct mptcp_sock * msk)753 static void mptcp_pm_rm_addr_recv(struct mptcp_sock *msk)
754 {
755 	mptcp_pm_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
756 }
757 
mptcp_pm_rm_subflow(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)758 void mptcp_pm_rm_subflow(struct mptcp_sock *msk,
759 			 const struct mptcp_rm_list *rm_list)
760 {
761 	mptcp_pm_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
762 }
763 
mptcp_pm_rm_addr_received(struct mptcp_sock * msk,const struct mptcp_rm_list * rm_list)764 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
765 			       const struct mptcp_rm_list *rm_list)
766 {
767 	struct mptcp_pm_data *pm = &msk->pm;
768 	u8 i;
769 
770 	pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
771 
772 	for (i = 0; i < rm_list->nr; i++)
773 		mptcp_event_addr_removed(msk, rm_list->ids[i]);
774 
775 	spin_lock_bh(&pm->lock);
776 	if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
777 		pm->rm_list_rx = *rm_list;
778 	else
779 		__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
780 	spin_unlock_bh(&pm->lock);
781 }
782 
mptcp_pm_mp_prio_received(struct sock * ssk,u8 bkup)783 void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
784 {
785 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
786 	struct sock *sk = subflow->conn;
787 	struct mptcp_sock *msk;
788 
789 	pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
790 	msk = mptcp_sk(sk);
791 	if (subflow->backup != bkup)
792 		subflow->backup = bkup;
793 
794 	mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
795 }
796 
mptcp_pm_mp_fail_received(struct sock * sk,u64 fail_seq)797 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
798 {
799 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
800 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
801 
802 	pr_debug("fail_seq=%llu\n", fail_seq);
803 
804 	/* After accepting the fail, we can't create any other subflows */
805 	spin_lock_bh(&msk->fallback_lock);
806 	if (!msk->allow_infinite_fallback) {
807 		spin_unlock_bh(&msk->fallback_lock);
808 		return;
809 	}
810 	msk->allow_subflows = false;
811 	spin_unlock_bh(&msk->fallback_lock);
812 
813 	if (!subflow->fail_tout) {
814 		pr_debug("send MP_FAIL response and infinite map\n");
815 
816 		subflow->send_mp_fail = 1;
817 		subflow->send_infinite_map = 1;
818 		tcp_send_ack(sk);
819 	} else {
820 		pr_debug("MP_FAIL response received\n");
821 		WRITE_ONCE(subflow->fail_tout, 0);
822 	}
823 }
824 
mptcp_pm_add_addr_signal(struct mptcp_sock * msk,const struct sk_buff * skb,unsigned int opt_size,unsigned int remaining,struct mptcp_addr_info * addr,bool * echo,bool * drop_other_suboptions)825 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
826 			      unsigned int opt_size, unsigned int remaining,
827 			      struct mptcp_addr_info *addr, bool *echo,
828 			      bool *drop_other_suboptions)
829 {
830 	int ret = false;
831 	u8 add_addr;
832 	u8 family;
833 	bool port;
834 
835 	spin_lock_bh(&msk->pm.lock);
836 
837 	/* double check after the lock is acquired */
838 	if (!mptcp_pm_should_add_signal(msk))
839 		goto out_unlock;
840 
841 	/* always drop every other options for pure ack ADD_ADDR; this is a
842 	 * plain dup-ack from TCP perspective. The other MPTCP-relevant info,
843 	 * if any, will be carried by the 'original' TCP ack
844 	 */
845 	if (skb && skb_is_tcp_pure_ack(skb)) {
846 		remaining += opt_size;
847 		*drop_other_suboptions = true;
848 	}
849 
850 	*echo = mptcp_pm_should_add_signal_echo(msk);
851 	port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
852 
853 	family = *echo ? msk->pm.remote.family : msk->pm.local.family;
854 	if (remaining < mptcp_add_addr_len(family, *echo, port))
855 		goto out_unlock;
856 
857 	if (*echo) {
858 		*addr = msk->pm.remote;
859 		add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
860 	} else {
861 		*addr = msk->pm.local;
862 		add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
863 	}
864 	WRITE_ONCE(msk->pm.addr_signal, add_addr);
865 	ret = true;
866 
867 out_unlock:
868 	spin_unlock_bh(&msk->pm.lock);
869 	return ret;
870 }
871 
mptcp_pm_rm_addr_signal(struct mptcp_sock * msk,unsigned int remaining,struct mptcp_rm_list * rm_list)872 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
873 			     struct mptcp_rm_list *rm_list)
874 {
875 	int ret = false, len;
876 	u8 rm_addr;
877 
878 	spin_lock_bh(&msk->pm.lock);
879 
880 	/* double check after the lock is acquired */
881 	if (!mptcp_pm_should_rm_signal(msk))
882 		goto out_unlock;
883 
884 	rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
885 	len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
886 	if (len < 0) {
887 		WRITE_ONCE(msk->pm.addr_signal, rm_addr);
888 		goto out_unlock;
889 	}
890 	if (remaining < len)
891 		goto out_unlock;
892 
893 	*rm_list = msk->pm.rm_list_tx;
894 	WRITE_ONCE(msk->pm.addr_signal, rm_addr);
895 	ret = true;
896 
897 out_unlock:
898 	spin_unlock_bh(&msk->pm.lock);
899 	return ret;
900 }
901 
mptcp_pm_get_local_id(struct mptcp_sock * msk,struct sock_common * skc)902 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
903 {
904 	struct mptcp_pm_addr_entry skc_local = { 0 };
905 	struct mptcp_addr_info msk_local;
906 
907 	if (WARN_ON_ONCE(!msk))
908 		return -1;
909 
910 	/* The 0 ID mapping is defined by the first subflow, copied into the msk
911 	 * addr
912 	 */
913 	mptcp_local_address((struct sock_common *)msk, &msk_local);
914 	mptcp_local_address((struct sock_common *)skc, &skc_local.addr);
915 	if (mptcp_addresses_equal(&msk_local, &skc_local.addr, false))
916 		return 0;
917 
918 	skc_local.addr.id = 0;
919 	skc_local.flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
920 
921 	if (mptcp_pm_is_userspace(msk))
922 		return mptcp_userspace_pm_get_local_id(msk, &skc_local);
923 	return mptcp_pm_nl_get_local_id(msk, &skc_local);
924 }
925 
mptcp_pm_is_backup(struct mptcp_sock * msk,struct sock_common * skc)926 bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
927 {
928 	struct mptcp_addr_info skc_local;
929 
930 	mptcp_local_address((struct sock_common *)skc, &skc_local);
931 
932 	if (mptcp_pm_is_userspace(msk))
933 		return mptcp_userspace_pm_is_backup(msk, &skc_local);
934 
935 	return mptcp_pm_nl_is_backup(msk, &skc_local);
936 }
937 
mptcp_pm_subflows_chk_stale(const struct mptcp_sock * msk,struct sock * ssk)938 static void mptcp_pm_subflows_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
939 {
940 	struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
941 	struct sock *sk = (struct sock *)msk;
942 	unsigned int active_max_loss_cnt;
943 	struct net *net = sock_net(sk);
944 	unsigned int stale_loss_cnt;
945 	bool slow;
946 
947 	stale_loss_cnt = mptcp_stale_loss_cnt(net);
948 	if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt)
949 		return;
950 
951 	/* look for another available subflow not in loss state */
952 	active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1);
953 	mptcp_for_each_subflow(msk, iter) {
954 		if (iter != subflow && mptcp_subflow_active(iter) &&
955 		    iter->stale_count < active_max_loss_cnt) {
956 			/* we have some alternatives, try to mark this subflow as idle ...*/
957 			slow = lock_sock_fast(ssk);
958 			if (!tcp_rtx_and_write_queues_empty(ssk)) {
959 				subflow->stale = 1;
960 				__mptcp_retransmit_pending_data(sk);
961 				MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE);
962 			}
963 			unlock_sock_fast(ssk, slow);
964 
965 			/* always try to push the pending data regardless of re-injections:
966 			 * we can possibly use backup subflows now, and subflow selection
967 			 * is cheap under the msk socket lock
968 			 */
969 			__mptcp_push_pending(sk, 0);
970 			return;
971 		}
972 	}
973 }
974 
mptcp_pm_subflow_chk_stale(const struct mptcp_sock * msk,struct sock * ssk)975 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
976 {
977 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
978 	u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp);
979 
980 	/* keep track of rtx periods with no progress */
981 	if (!subflow->stale_count) {
982 		subflow->stale_rcv_tstamp = rcv_tstamp;
983 		subflow->stale_count++;
984 	} else if (subflow->stale_rcv_tstamp == rcv_tstamp) {
985 		if (subflow->stale_count < U8_MAX)
986 			subflow->stale_count++;
987 		mptcp_pm_subflows_chk_stale(msk, ssk);
988 	} else {
989 		subflow->stale_count = 0;
990 		mptcp_subflow_set_active(subflow);
991 	}
992 }
993 
mptcp_pm_worker(struct mptcp_sock * msk)994 void mptcp_pm_worker(struct mptcp_sock *msk)
995 {
996 	struct mptcp_pm_data *pm = &msk->pm;
997 
998 	msk_owned_by_me(msk);
999 
1000 	if (!(pm->status & MPTCP_PM_WORK_MASK))
1001 		return;
1002 
1003 	spin_lock_bh(&msk->pm.lock);
1004 
1005 	pr_debug("msk=%p status=%x\n", msk, pm->status);
1006 	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
1007 		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
1008 		mptcp_pm_addr_send_ack(msk);
1009 	}
1010 	if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
1011 		pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
1012 		mptcp_pm_rm_addr_recv(msk);
1013 	}
1014 	__mptcp_pm_kernel_worker(msk);
1015 
1016 	spin_unlock_bh(&msk->pm.lock);
1017 }
1018 
mptcp_pm_destroy(struct mptcp_sock * msk)1019 void mptcp_pm_destroy(struct mptcp_sock *msk)
1020 {
1021 	mptcp_pm_free_anno_list(msk);
1022 
1023 	if (mptcp_pm_is_userspace(msk))
1024 		mptcp_userspace_pm_free_local_addr_list(msk);
1025 }
1026 
mptcp_pm_data_reset(struct mptcp_sock * msk)1027 void mptcp_pm_data_reset(struct mptcp_sock *msk)
1028 {
1029 	u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
1030 	struct mptcp_pm_data *pm = &msk->pm;
1031 
1032 	memset(&pm->reset, 0, sizeof(pm->reset));
1033 	pm->rm_list_tx.nr = 0;
1034 	pm->rm_list_rx.nr = 0;
1035 	WRITE_ONCE(pm->pm_type, pm_type);
1036 
1037 	if (pm_type == MPTCP_PM_TYPE_KERNEL) {
1038 		bool subflows_allowed = !!mptcp_pm_get_limit_extra_subflows(msk);
1039 
1040 		/* pm->work_pending must be only be set to 'true' when
1041 		 * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL
1042 		 */
1043 		WRITE_ONCE(pm->work_pending,
1044 			   (!!mptcp_pm_get_endp_subflow_max(msk) &&
1045 			    subflows_allowed) ||
1046 			   !!mptcp_pm_get_endp_signal_max(msk));
1047 		WRITE_ONCE(pm->accept_addr,
1048 			   !!mptcp_pm_get_limit_add_addr_accepted(msk) &&
1049 			   subflows_allowed);
1050 		WRITE_ONCE(pm->accept_subflow, subflows_allowed);
1051 
1052 		bitmap_fill(pm->id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
1053 	}
1054 }
1055 
mptcp_pm_data_init(struct mptcp_sock * msk)1056 void mptcp_pm_data_init(struct mptcp_sock *msk)
1057 {
1058 	spin_lock_init(&msk->pm.lock);
1059 	INIT_LIST_HEAD(&msk->pm.anno_list);
1060 	INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list);
1061 	mptcp_pm_data_reset(msk);
1062 }
1063 
mptcp_pm_init(void)1064 void __init mptcp_pm_init(void)
1065 {
1066 	mptcp_pm_kernel_register();
1067 	mptcp_pm_userspace_register();
1068 	mptcp_pm_nl_init();
1069 }
1070 
1071 /* Must be called with rcu read lock held */
mptcp_pm_find(const char * name)1072 struct mptcp_pm_ops *mptcp_pm_find(const char *name)
1073 {
1074 	struct mptcp_pm_ops *pm_ops;
1075 
1076 	list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) {
1077 		if (!strcmp(pm_ops->name, name))
1078 			return pm_ops;
1079 	}
1080 
1081 	return NULL;
1082 }
1083 
mptcp_pm_validate(struct mptcp_pm_ops * pm_ops)1084 int mptcp_pm_validate(struct mptcp_pm_ops *pm_ops)
1085 {
1086 	return 0;
1087 }
1088 
mptcp_pm_register(struct mptcp_pm_ops * pm_ops)1089 int mptcp_pm_register(struct mptcp_pm_ops *pm_ops)
1090 {
1091 	int ret;
1092 
1093 	ret = mptcp_pm_validate(pm_ops);
1094 	if (ret)
1095 		return ret;
1096 
1097 	spin_lock(&mptcp_pm_list_lock);
1098 	if (mptcp_pm_find(pm_ops->name)) {
1099 		spin_unlock(&mptcp_pm_list_lock);
1100 		return -EEXIST;
1101 	}
1102 	list_add_tail_rcu(&pm_ops->list, &mptcp_pm_list);
1103 	spin_unlock(&mptcp_pm_list_lock);
1104 
1105 	pr_debug("%s registered\n", pm_ops->name);
1106 	return 0;
1107 }
1108 
mptcp_pm_unregister(struct mptcp_pm_ops * pm_ops)1109 void mptcp_pm_unregister(struct mptcp_pm_ops *pm_ops)
1110 {
1111 	/* skip unregistering the default path manager */
1112 	if (WARN_ON_ONCE(pm_ops == &mptcp_pm_kernel))
1113 		return;
1114 
1115 	spin_lock(&mptcp_pm_list_lock);
1116 	list_del_rcu(&pm_ops->list);
1117 	spin_unlock(&mptcp_pm_list_lock);
1118 }
1119 
1120 /* Build string with list of available path manager values.
1121  * Similar to tcp_get_available_congestion_control()
1122  */
mptcp_pm_get_available(char * buf,size_t maxlen)1123 void mptcp_pm_get_available(char *buf, size_t maxlen)
1124 {
1125 	struct mptcp_pm_ops *pm_ops;
1126 	size_t offs = 0;
1127 
1128 	rcu_read_lock();
1129 	list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) {
1130 		offs += snprintf(buf + offs, maxlen - offs, "%s%s",
1131 				 offs == 0 ? "" : " ", pm_ops->name);
1132 
1133 		if (WARN_ON_ONCE(offs >= maxlen))
1134 			break;
1135 	}
1136 	rcu_read_unlock();
1137 }
1138