xref: /linux/net/mctp/route.c (revision 96b341a8e78272b70905a5ac8b01e0cb97ae07de)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Management Component Transport Protocol (MCTP) - routing
4  * implementation.
5  *
6  * This is currently based on a simple routing table, with no dst cache. The
7  * number of routes should stay fairly small, so the lookup cost is small.
8  *
9  * Copyright (c) 2021 Code Construct
10  * Copyright (c) 2021 Google
11  */
12 
13 #include <linux/idr.h>
14 #include <linux/kconfig.h>
15 #include <linux/mctp.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 
20 #include <uapi/linux/if_arp.h>
21 
22 #include <net/mctp.h>
23 #include <net/mctpdevice.h>
24 #include <net/netlink.h>
25 #include <net/sock.h>
26 
27 #include <trace/events/mctp.h>
28 
29 static const unsigned int mctp_message_maxlen = 64 * 1024;
30 static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ;
31 
32 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev);
33 
34 /* route output callbacks */
35 static int mctp_dst_discard(struct mctp_dst *dst, struct sk_buff *skb)
36 {
37 	kfree_skb(skb);
38 	return 0;
39 }
40 
41 static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
42 {
43 	struct mctp_skb_cb *cb = mctp_cb(skb);
44 	struct mctp_hdr *mh;
45 	struct sock *sk;
46 	u8 type;
47 
48 	WARN_ON(!rcu_read_lock_held());
49 
50 	/* TODO: look up in skb->cb? */
51 	mh = mctp_hdr(skb);
52 
53 	if (!skb_headlen(skb))
54 		return NULL;
55 
56 	type = (*(u8 *)skb->data) & 0x7f;
57 
58 	sk_for_each_rcu(sk, &net->mctp.binds) {
59 		struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
60 
61 		if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net)
62 			continue;
63 
64 		if (msk->bind_type != type)
65 			continue;
66 
67 		if (!mctp_address_matches(msk->bind_addr, mh->dest))
68 			continue;
69 
70 		return msk;
71 	}
72 
73 	return NULL;
74 }
75 
76 /* A note on the key allocations.
77  *
78  * struct net->mctp.keys contains our set of currently-allocated keys for
79  * MCTP tag management. The lookup tuple for these is the peer EID,
80  * local EID and MCTP tag.
81  *
82  * In some cases, the peer EID may be MCTP_EID_ANY: for example, when a
83  * broadcast message is sent, we may receive responses from any peer EID.
84  * Because the broadcast dest address is equivalent to ANY, we create
85  * a key with (local = local-eid, peer = ANY). This allows a match on the
86  * incoming broadcast responses from any peer.
87  *
88  * We perform lookups when packets are received, and when tags are allocated
89  * in two scenarios:
90  *
91  *  - when a packet is sent, with a locally-owned tag: we need to find an
92  *    unused tag value for the (local, peer) EID pair.
93  *
94  *  - when a tag is manually allocated: we need to find an unused tag value
95  *    for the peer EID, but don't have a specific local EID at that stage.
96  *
97  * in the latter case, on successful allocation, we end up with a tag with
98  * (local = ANY, peer = peer-eid).
99  *
100  * So, the key set allows both a local EID of ANY, as well as a peer EID of
101  * ANY in the lookup tuple. Both may be ANY if we prealloc for a broadcast.
102  * The matching (in mctp_key_match()) during lookup allows the match value to
103  * be ANY in either the dest or source addresses.
104  *
105  * When allocating (+ inserting) a tag, we need to check for conflicts amongst
106  * the existing tag set. This requires macthing either exactly on the local
107  * and peer addresses, or either being ANY.
108  */
109 
110 static bool mctp_key_match(struct mctp_sk_key *key, unsigned int net,
111 			   mctp_eid_t local, mctp_eid_t peer, u8 tag)
112 {
113 	if (key->net != net)
114 		return false;
115 
116 	if (!mctp_address_matches(key->local_addr, local))
117 		return false;
118 
119 	if (!mctp_address_matches(key->peer_addr, peer))
120 		return false;
121 
122 	if (key->tag != tag)
123 		return false;
124 
125 	return true;
126 }
127 
128 /* returns a key (with key->lock held, and refcounted), or NULL if no such
129  * key exists.
130  */
131 static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
132 					   unsigned int netid, mctp_eid_t peer,
133 					   unsigned long *irqflags)
134 	__acquires(&key->lock)
135 {
136 	struct mctp_sk_key *key, *ret;
137 	unsigned long flags;
138 	struct mctp_hdr *mh;
139 	u8 tag;
140 
141 	mh = mctp_hdr(skb);
142 	tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
143 
144 	ret = NULL;
145 	spin_lock_irqsave(&net->mctp.keys_lock, flags);
146 
147 	hlist_for_each_entry(key, &net->mctp.keys, hlist) {
148 		if (!mctp_key_match(key, netid, mh->dest, peer, tag))
149 			continue;
150 
151 		spin_lock(&key->lock);
152 		if (key->valid) {
153 			refcount_inc(&key->refs);
154 			ret = key;
155 			break;
156 		}
157 		spin_unlock(&key->lock);
158 	}
159 
160 	if (ret) {
161 		spin_unlock(&net->mctp.keys_lock);
162 		*irqflags = flags;
163 	} else {
164 		spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
165 	}
166 
167 	return ret;
168 }
169 
170 static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
171 					  unsigned int net,
172 					  mctp_eid_t local, mctp_eid_t peer,
173 					  u8 tag, gfp_t gfp)
174 {
175 	struct mctp_sk_key *key;
176 
177 	key = kzalloc(sizeof(*key), gfp);
178 	if (!key)
179 		return NULL;
180 
181 	key->net = net;
182 	key->peer_addr = peer;
183 	key->local_addr = local;
184 	key->tag = tag;
185 	key->sk = &msk->sk;
186 	key->valid = true;
187 	spin_lock_init(&key->lock);
188 	refcount_set(&key->refs, 1);
189 	sock_hold(key->sk);
190 
191 	return key;
192 }
193 
194 void mctp_key_unref(struct mctp_sk_key *key)
195 {
196 	unsigned long flags;
197 
198 	if (!refcount_dec_and_test(&key->refs))
199 		return;
200 
201 	/* even though no refs exist here, the lock allows us to stay
202 	 * consistent with the locking requirement of mctp_dev_release_key
203 	 */
204 	spin_lock_irqsave(&key->lock, flags);
205 	mctp_dev_release_key(key->dev, key);
206 	spin_unlock_irqrestore(&key->lock, flags);
207 
208 	sock_put(key->sk);
209 	kfree(key);
210 }
211 
212 static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
213 {
214 	struct net *net = sock_net(&msk->sk);
215 	struct mctp_sk_key *tmp;
216 	unsigned long flags;
217 	int rc = 0;
218 
219 	spin_lock_irqsave(&net->mctp.keys_lock, flags);
220 
221 	if (sock_flag(&msk->sk, SOCK_DEAD)) {
222 		rc = -EINVAL;
223 		goto out_unlock;
224 	}
225 
226 	hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
227 		if (mctp_key_match(tmp, key->net, key->local_addr,
228 				   key->peer_addr, key->tag)) {
229 			spin_lock(&tmp->lock);
230 			if (tmp->valid)
231 				rc = -EEXIST;
232 			spin_unlock(&tmp->lock);
233 			if (rc)
234 				break;
235 		}
236 	}
237 
238 	if (!rc) {
239 		refcount_inc(&key->refs);
240 		key->expiry = jiffies + mctp_key_lifetime;
241 		timer_reduce(&msk->key_expiry, key->expiry);
242 
243 		hlist_add_head(&key->hlist, &net->mctp.keys);
244 		hlist_add_head(&key->sklist, &msk->keys);
245 	}
246 
247 out_unlock:
248 	spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
249 
250 	return rc;
251 }
252 
253 /* Helper for mctp_route_input().
254  * We're done with the key; unlock and unref the key.
255  * For the usual case of automatic expiry we remove the key from lists.
256  * In the case that manual allocation is set on a key we release the lock
257  * and local ref, reset reassembly, but don't remove from lists.
258  */
259 static void __mctp_key_done_in(struct mctp_sk_key *key, struct net *net,
260 			       unsigned long flags, unsigned long reason)
261 __releases(&key->lock)
262 {
263 	struct sk_buff *skb;
264 
265 	trace_mctp_key_release(key, reason);
266 	skb = key->reasm_head;
267 	key->reasm_head = NULL;
268 
269 	if (!key->manual_alloc) {
270 		key->reasm_dead = true;
271 		key->valid = false;
272 		mctp_dev_release_key(key->dev, key);
273 	}
274 	spin_unlock_irqrestore(&key->lock, flags);
275 
276 	if (!key->manual_alloc) {
277 		spin_lock_irqsave(&net->mctp.keys_lock, flags);
278 		if (!hlist_unhashed(&key->hlist)) {
279 			hlist_del_init(&key->hlist);
280 			hlist_del_init(&key->sklist);
281 			mctp_key_unref(key);
282 		}
283 		spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
284 	}
285 
286 	/* and one for the local reference */
287 	mctp_key_unref(key);
288 
289 	kfree_skb(skb);
290 }
291 
292 #ifdef CONFIG_MCTP_FLOWS
293 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key)
294 {
295 	struct mctp_flow *flow;
296 
297 	flow = skb_ext_add(skb, SKB_EXT_MCTP);
298 	if (!flow)
299 		return;
300 
301 	refcount_inc(&key->refs);
302 	flow->key = key;
303 }
304 
305 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev)
306 {
307 	struct mctp_sk_key *key;
308 	struct mctp_flow *flow;
309 
310 	flow = skb_ext_find(skb, SKB_EXT_MCTP);
311 	if (!flow)
312 		return;
313 
314 	key = flow->key;
315 
316 	if (key->dev) {
317 		WARN_ON(key->dev != dev);
318 		return;
319 	}
320 
321 	mctp_dev_set_key(dev, key);
322 }
323 #else
324 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
325 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {}
326 #endif
327 
328 static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
329 {
330 	struct mctp_hdr *hdr = mctp_hdr(skb);
331 	u8 exp_seq, this_seq;
332 
333 	this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT)
334 		& MCTP_HDR_SEQ_MASK;
335 
336 	if (!key->reasm_head) {
337 		/* Since we're manipulating the shared frag_list, ensure it isn't
338 		 * shared with any other SKBs.
339 		 */
340 		key->reasm_head = skb_unshare(skb, GFP_ATOMIC);
341 		if (!key->reasm_head)
342 			return -ENOMEM;
343 
344 		key->reasm_tailp = &(skb_shinfo(key->reasm_head)->frag_list);
345 		key->last_seq = this_seq;
346 		return 0;
347 	}
348 
349 	exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK;
350 
351 	if (this_seq != exp_seq)
352 		return -EINVAL;
353 
354 	if (key->reasm_head->len + skb->len > mctp_message_maxlen)
355 		return -EINVAL;
356 
357 	skb->next = NULL;
358 	skb->sk = NULL;
359 	*key->reasm_tailp = skb;
360 	key->reasm_tailp = &skb->next;
361 
362 	key->last_seq = this_seq;
363 
364 	key->reasm_head->data_len += skb->len;
365 	key->reasm_head->len += skb->len;
366 	key->reasm_head->truesize += skb->truesize;
367 
368 	return 0;
369 }
370 
371 static int mctp_dst_input(struct mctp_dst *dst, struct sk_buff *skb)
372 {
373 	struct mctp_sk_key *key, *any_key = NULL;
374 	struct net *net = dev_net(skb->dev);
375 	struct mctp_sock *msk;
376 	struct mctp_hdr *mh;
377 	unsigned int netid;
378 	unsigned long f;
379 	u8 tag, flags;
380 	int rc;
381 
382 	msk = NULL;
383 	rc = -EINVAL;
384 
385 	/* We may be receiving a locally-routed packet; drop source sk
386 	 * accounting.
387 	 *
388 	 * From here, we will either queue the skb - either to a frag_queue, or
389 	 * to a receiving socket. When that succeeds, we clear the skb pointer;
390 	 * a non-NULL skb on exit will be otherwise unowned, and hence
391 	 * kfree_skb()-ed.
392 	 */
393 	skb_orphan(skb);
394 
395 	if (skb->pkt_type == PACKET_OUTGOING)
396 		skb->pkt_type = PACKET_LOOPBACK;
397 
398 	/* ensure we have enough data for a header and a type */
399 	if (skb->len < sizeof(struct mctp_hdr) + 1)
400 		goto out;
401 
402 	/* grab header, advance data ptr */
403 	mh = mctp_hdr(skb);
404 	netid = mctp_cb(skb)->net;
405 	skb_pull(skb, sizeof(struct mctp_hdr));
406 
407 	if (mh->ver != 1)
408 		goto out;
409 
410 	flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
411 	tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
412 
413 	rcu_read_lock();
414 
415 	/* lookup socket / reasm context, exactly matching (src,dest,tag).
416 	 * we hold a ref on the key, and key->lock held.
417 	 */
418 	key = mctp_lookup_key(net, skb, netid, mh->src, &f);
419 
420 	if (flags & MCTP_HDR_FLAG_SOM) {
421 		if (key) {
422 			msk = container_of(key->sk, struct mctp_sock, sk);
423 		} else {
424 			/* first response to a broadcast? do a more general
425 			 * key lookup to find the socket, but don't use this
426 			 * key for reassembly - we'll create a more specific
427 			 * one for future packets if required (ie, !EOM).
428 			 *
429 			 * this lookup requires key->peer to be MCTP_ADDR_ANY,
430 			 * it doesn't match just any key->peer.
431 			 */
432 			any_key = mctp_lookup_key(net, skb, netid,
433 						  MCTP_ADDR_ANY, &f);
434 			if (any_key) {
435 				msk = container_of(any_key->sk,
436 						   struct mctp_sock, sk);
437 				spin_unlock_irqrestore(&any_key->lock, f);
438 			}
439 		}
440 
441 		if (!key && !msk && (tag & MCTP_HDR_FLAG_TO))
442 			msk = mctp_lookup_bind(net, skb);
443 
444 		if (!msk) {
445 			rc = -ENOENT;
446 			goto out_unlock;
447 		}
448 
449 		/* single-packet message? deliver to socket, clean up any
450 		 * pending key.
451 		 */
452 		if (flags & MCTP_HDR_FLAG_EOM) {
453 			rc = sock_queue_rcv_skb(&msk->sk, skb);
454 			if (!rc)
455 				skb = NULL;
456 			if (key) {
457 				/* we've hit a pending reassembly; not much we
458 				 * can do but drop it
459 				 */
460 				__mctp_key_done_in(key, net, f,
461 						   MCTP_TRACE_KEY_REPLIED);
462 				key = NULL;
463 			}
464 			goto out_unlock;
465 		}
466 
467 		/* broadcast response or a bind() - create a key for further
468 		 * packets for this message
469 		 */
470 		if (!key) {
471 			key = mctp_key_alloc(msk, netid, mh->dest, mh->src,
472 					     tag, GFP_ATOMIC);
473 			if (!key) {
474 				rc = -ENOMEM;
475 				goto out_unlock;
476 			}
477 
478 			/* we can queue without the key lock here, as the
479 			 * key isn't observable yet
480 			 */
481 			mctp_frag_queue(key, skb);
482 
483 			/* if the key_add fails, we've raced with another
484 			 * SOM packet with the same src, dest and tag. There's
485 			 * no way to distinguish future packets, so all we
486 			 * can do is drop; we'll free the skb on exit from
487 			 * this function.
488 			 */
489 			rc = mctp_key_add(key, msk);
490 			if (!rc) {
491 				trace_mctp_key_acquire(key);
492 				skb = NULL;
493 			}
494 
495 			/* we don't need to release key->lock on exit, so
496 			 * clean up here and suppress the unlock via
497 			 * setting to NULL
498 			 */
499 			mctp_key_unref(key);
500 			key = NULL;
501 
502 		} else {
503 			if (key->reasm_head || key->reasm_dead) {
504 				/* duplicate start? drop everything */
505 				__mctp_key_done_in(key, net, f,
506 						   MCTP_TRACE_KEY_INVALIDATED);
507 				rc = -EEXIST;
508 				key = NULL;
509 			} else {
510 				rc = mctp_frag_queue(key, skb);
511 				if (!rc)
512 					skb = NULL;
513 			}
514 		}
515 
516 	} else if (key) {
517 		/* this packet continues a previous message; reassemble
518 		 * using the message-specific key
519 		 */
520 
521 		/* we need to be continuing an existing reassembly... */
522 		if (!key->reasm_head)
523 			rc = -EINVAL;
524 		else
525 			rc = mctp_frag_queue(key, skb);
526 
527 		if (rc)
528 			goto out_unlock;
529 
530 		/* we've queued; the queue owns the skb now */
531 		skb = NULL;
532 
533 		/* end of message? deliver to socket, and we're done with
534 		 * the reassembly/response key
535 		 */
536 		if (flags & MCTP_HDR_FLAG_EOM) {
537 			rc = sock_queue_rcv_skb(key->sk, key->reasm_head);
538 			if (!rc)
539 				key->reasm_head = NULL;
540 			__mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED);
541 			key = NULL;
542 		}
543 
544 	} else {
545 		/* not a start, no matching key */
546 		rc = -ENOENT;
547 	}
548 
549 out_unlock:
550 	rcu_read_unlock();
551 	if (key) {
552 		spin_unlock_irqrestore(&key->lock, f);
553 		mctp_key_unref(key);
554 	}
555 	if (any_key)
556 		mctp_key_unref(any_key);
557 out:
558 	kfree_skb(skb);
559 	return rc;
560 }
561 
562 static int mctp_dst_output(struct mctp_dst *dst, struct sk_buff *skb)
563 {
564 	struct mctp_hdr *hdr = mctp_hdr(skb);
565 	char daddr_buf[MAX_ADDR_LEN];
566 	char *daddr = NULL;
567 	int rc;
568 
569 	skb->protocol = htons(ETH_P_MCTP);
570 	skb->pkt_type = PACKET_OUTGOING;
571 
572 	if (skb->len > dst->mtu) {
573 		kfree_skb(skb);
574 		return -EMSGSIZE;
575 	}
576 
577 	/* direct route; use the hwaddr we stashed in sendmsg */
578 	if (dst->halen) {
579 		if (dst->halen != skb->dev->addr_len) {
580 			/* sanity check, sendmsg should have already caught this */
581 			kfree_skb(skb);
582 			return -EMSGSIZE;
583 		}
584 		daddr = dst->haddr;
585 	} else {
586 		/* If lookup fails let the device handle daddr==NULL */
587 		if (mctp_neigh_lookup(dst->dev, hdr->dest, daddr_buf) == 0)
588 			daddr = daddr_buf;
589 	}
590 
591 	rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
592 			     daddr, skb->dev->dev_addr, skb->len);
593 	if (rc < 0) {
594 		kfree_skb(skb);
595 		return -EHOSTUNREACH;
596 	}
597 
598 	mctp_flow_prepare_output(skb, dst->dev);
599 
600 	rc = dev_queue_xmit(skb);
601 	if (rc)
602 		rc = net_xmit_errno(rc);
603 
604 	return rc;
605 }
606 
607 /* route alloc/release */
608 static void mctp_route_release(struct mctp_route *rt)
609 {
610 	if (refcount_dec_and_test(&rt->refs)) {
611 		mctp_dev_put(rt->dev);
612 		kfree_rcu(rt, rcu);
613 	}
614 }
615 
616 /* returns a route with the refcount at 1 */
617 static struct mctp_route *mctp_route_alloc(void)
618 {
619 	struct mctp_route *rt;
620 
621 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
622 	if (!rt)
623 		return NULL;
624 
625 	INIT_LIST_HEAD(&rt->list);
626 	refcount_set(&rt->refs, 1);
627 	rt->output = mctp_dst_discard;
628 
629 	return rt;
630 }
631 
632 unsigned int mctp_default_net(struct net *net)
633 {
634 	return READ_ONCE(net->mctp.default_net);
635 }
636 
637 int mctp_default_net_set(struct net *net, unsigned int index)
638 {
639 	if (index == 0)
640 		return -EINVAL;
641 	WRITE_ONCE(net->mctp.default_net, index);
642 	return 0;
643 }
644 
645 /* tag management */
646 static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
647 			     struct mctp_sock *msk)
648 {
649 	struct netns_mctp *mns = &net->mctp;
650 
651 	lockdep_assert_held(&mns->keys_lock);
652 
653 	key->expiry = jiffies + mctp_key_lifetime;
654 	timer_reduce(&msk->key_expiry, key->expiry);
655 
656 	/* we hold the net->key_lock here, allowing updates to both
657 	 * then net and sk
658 	 */
659 	hlist_add_head_rcu(&key->hlist, &mns->keys);
660 	hlist_add_head_rcu(&key->sklist, &msk->keys);
661 	refcount_inc(&key->refs);
662 }
663 
664 /* Allocate a locally-owned tag value for (local, peer), and reserve
665  * it for the socket msk
666  */
667 struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
668 					 unsigned int netid,
669 					 mctp_eid_t local, mctp_eid_t peer,
670 					 bool manual, u8 *tagp)
671 {
672 	struct net *net = sock_net(&msk->sk);
673 	struct netns_mctp *mns = &net->mctp;
674 	struct mctp_sk_key *key, *tmp;
675 	unsigned long flags;
676 	u8 tagbits;
677 
678 	/* for NULL destination EIDs, we may get a response from any peer */
679 	if (peer == MCTP_ADDR_NULL)
680 		peer = MCTP_ADDR_ANY;
681 
682 	/* be optimistic, alloc now */
683 	key = mctp_key_alloc(msk, netid, local, peer, 0, GFP_KERNEL);
684 	if (!key)
685 		return ERR_PTR(-ENOMEM);
686 
687 	/* 8 possible tag values */
688 	tagbits = 0xff;
689 
690 	spin_lock_irqsave(&mns->keys_lock, flags);
691 
692 	/* Walk through the existing keys, looking for potential conflicting
693 	 * tags. If we find a conflict, clear that bit from tagbits
694 	 */
695 	hlist_for_each_entry(tmp, &mns->keys, hlist) {
696 		/* We can check the lookup fields (*_addr, tag) without the
697 		 * lock held, they don't change over the lifetime of the key.
698 		 */
699 
700 		/* tags are net-specific */
701 		if (tmp->net != netid)
702 			continue;
703 
704 		/* if we don't own the tag, it can't conflict */
705 		if (tmp->tag & MCTP_HDR_FLAG_TO)
706 			continue;
707 
708 		/* Since we're avoiding conflicting entries, match peer and
709 		 * local addresses, including with a wildcard on ANY. See
710 		 * 'A note on key allocations' for background.
711 		 */
712 		if (peer != MCTP_ADDR_ANY &&
713 		    !mctp_address_matches(tmp->peer_addr, peer))
714 			continue;
715 
716 		if (local != MCTP_ADDR_ANY &&
717 		    !mctp_address_matches(tmp->local_addr, local))
718 			continue;
719 
720 		spin_lock(&tmp->lock);
721 		/* key must still be valid. If we find a match, clear the
722 		 * potential tag value
723 		 */
724 		if (tmp->valid)
725 			tagbits &= ~(1 << tmp->tag);
726 		spin_unlock(&tmp->lock);
727 
728 		if (!tagbits)
729 			break;
730 	}
731 
732 	if (tagbits) {
733 		key->tag = __ffs(tagbits);
734 		mctp_reserve_tag(net, key, msk);
735 		trace_mctp_key_acquire(key);
736 
737 		key->manual_alloc = manual;
738 		*tagp = key->tag;
739 	}
740 
741 	spin_unlock_irqrestore(&mns->keys_lock, flags);
742 
743 	if (!tagbits) {
744 		mctp_key_unref(key);
745 		return ERR_PTR(-EBUSY);
746 	}
747 
748 	return key;
749 }
750 
751 static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
752 						    unsigned int netid,
753 						    mctp_eid_t daddr,
754 						    u8 req_tag, u8 *tagp)
755 {
756 	struct net *net = sock_net(&msk->sk);
757 	struct netns_mctp *mns = &net->mctp;
758 	struct mctp_sk_key *key, *tmp;
759 	unsigned long flags;
760 
761 	req_tag &= ~(MCTP_TAG_PREALLOC | MCTP_TAG_OWNER);
762 	key = NULL;
763 
764 	spin_lock_irqsave(&mns->keys_lock, flags);
765 
766 	hlist_for_each_entry(tmp, &mns->keys, hlist) {
767 		if (tmp->net != netid)
768 			continue;
769 
770 		if (tmp->tag != req_tag)
771 			continue;
772 
773 		if (!mctp_address_matches(tmp->peer_addr, daddr))
774 			continue;
775 
776 		if (!tmp->manual_alloc)
777 			continue;
778 
779 		spin_lock(&tmp->lock);
780 		if (tmp->valid) {
781 			key = tmp;
782 			refcount_inc(&key->refs);
783 			spin_unlock(&tmp->lock);
784 			break;
785 		}
786 		spin_unlock(&tmp->lock);
787 	}
788 	spin_unlock_irqrestore(&mns->keys_lock, flags);
789 
790 	if (!key)
791 		return ERR_PTR(-ENOENT);
792 
793 	if (tagp)
794 		*tagp = key->tag;
795 
796 	return key;
797 }
798 
799 /* routing lookups */
800 static bool mctp_rt_match_eid(struct mctp_route *rt,
801 			      unsigned int net, mctp_eid_t eid)
802 {
803 	return READ_ONCE(rt->dev->net) == net &&
804 		rt->min <= eid && rt->max >= eid;
805 }
806 
807 /* compares match, used for duplicate prevention */
808 static bool mctp_rt_compare_exact(struct mctp_route *rt1,
809 				  struct mctp_route *rt2)
810 {
811 	ASSERT_RTNL();
812 	return rt1->dev->net == rt2->dev->net &&
813 		rt1->min == rt2->min &&
814 		rt1->max == rt2->max;
815 }
816 
817 static void mctp_dst_from_route(struct mctp_dst *dst, struct mctp_route *route)
818 {
819 	mctp_dev_hold(route->dev);
820 	dst->dev = route->dev;
821 	dst->mtu = route->mtu ?: READ_ONCE(dst->dev->dev->mtu);
822 	dst->halen = 0;
823 	dst->output = route->output;
824 }
825 
826 int mctp_dst_from_extaddr(struct mctp_dst *dst, struct net *net, int ifindex,
827 			  unsigned char halen, const unsigned char *haddr)
828 {
829 	struct net_device *netdev;
830 	struct mctp_dev *dev;
831 	int rc = -ENOENT;
832 
833 	if (halen > sizeof(dst->haddr))
834 		return -EINVAL;
835 
836 	rcu_read_lock();
837 
838 	netdev = dev_get_by_index_rcu(net, ifindex);
839 	if (!netdev)
840 		goto out_unlock;
841 
842 	if (netdev->addr_len != halen) {
843 		rc = -EINVAL;
844 		goto out_unlock;
845 	}
846 
847 	dev = __mctp_dev_get(netdev);
848 	if (!dev)
849 		goto out_unlock;
850 
851 	dst->dev = dev;
852 	dst->mtu = READ_ONCE(netdev->mtu);
853 	dst->halen = halen;
854 	dst->output = mctp_dst_output;
855 	memcpy(dst->haddr, haddr, halen);
856 
857 	rc = 0;
858 
859 out_unlock:
860 	rcu_read_unlock();
861 	return rc;
862 }
863 
864 void mctp_dst_release(struct mctp_dst *dst)
865 {
866 	mctp_dev_put(dst->dev);
867 }
868 
869 /* populates *dst on successful lookup, if set */
870 int mctp_route_lookup(struct net *net, unsigned int dnet,
871 		      mctp_eid_t daddr, struct mctp_dst *dst)
872 {
873 	int rc = -EHOSTUNREACH;
874 	struct mctp_route *rt;
875 
876 	rcu_read_lock();
877 
878 	list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
879 		/* TODO: add metrics */
880 		if (!mctp_rt_match_eid(rt, dnet, daddr))
881 			continue;
882 
883 		if (dst)
884 			mctp_dst_from_route(dst, rt);
885 		rc = 0;
886 		break;
887 	}
888 
889 	rcu_read_unlock();
890 
891 	return rc;
892 }
893 
894 static int mctp_route_lookup_null(struct net *net, struct net_device *dev,
895 				  struct mctp_dst *dst)
896 {
897 	int rc = -EHOSTUNREACH;
898 	struct mctp_route *rt;
899 
900 	rcu_read_lock();
901 
902 	list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
903 		if (rt->dev->dev != dev || rt->type != RTN_LOCAL)
904 			continue;
905 
906 		mctp_dst_from_route(dst, rt);
907 		rc = 0;
908 		break;
909 	}
910 
911 	rcu_read_unlock();
912 
913 	return rc;
914 }
915 
916 static int mctp_do_fragment_route(struct mctp_dst *dst, struct sk_buff *skb,
917 				  unsigned int mtu, u8 tag)
918 {
919 	const unsigned int hlen = sizeof(struct mctp_hdr);
920 	struct mctp_hdr *hdr, *hdr2;
921 	unsigned int pos, size, headroom;
922 	struct sk_buff *skb2;
923 	int rc;
924 	u8 seq;
925 
926 	hdr = mctp_hdr(skb);
927 	seq = 0;
928 	rc = 0;
929 
930 	if (mtu < hlen + 1) {
931 		kfree_skb(skb);
932 		return -EMSGSIZE;
933 	}
934 
935 	/* keep same headroom as the original skb */
936 	headroom = skb_headroom(skb);
937 
938 	/* we've got the header */
939 	skb_pull(skb, hlen);
940 
941 	for (pos = 0; pos < skb->len;) {
942 		/* size of message payload */
943 		size = min(mtu - hlen, skb->len - pos);
944 
945 		skb2 = alloc_skb(headroom + hlen + size, GFP_KERNEL);
946 		if (!skb2) {
947 			rc = -ENOMEM;
948 			break;
949 		}
950 
951 		/* generic skb copy */
952 		skb2->protocol = skb->protocol;
953 		skb2->priority = skb->priority;
954 		skb2->dev = skb->dev;
955 		memcpy(skb2->cb, skb->cb, sizeof(skb2->cb));
956 
957 		if (skb->sk)
958 			skb_set_owner_w(skb2, skb->sk);
959 
960 		/* establish packet */
961 		skb_reserve(skb2, headroom);
962 		skb_reset_network_header(skb2);
963 		skb_put(skb2, hlen + size);
964 		skb2->transport_header = skb2->network_header + hlen;
965 
966 		/* copy header fields, calculate SOM/EOM flags & seq */
967 		hdr2 = mctp_hdr(skb2);
968 		hdr2->ver = hdr->ver;
969 		hdr2->dest = hdr->dest;
970 		hdr2->src = hdr->src;
971 		hdr2->flags_seq_tag = tag &
972 			(MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
973 
974 		if (pos == 0)
975 			hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM;
976 
977 		if (pos + size == skb->len)
978 			hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM;
979 
980 		hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT;
981 
982 		/* copy message payload */
983 		skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
984 
985 		/* we need to copy the extensions, for MCTP flow data */
986 		skb_ext_copy(skb2, skb);
987 
988 		/* do route */
989 		rc = dst->output(dst, skb2);
990 		if (rc)
991 			break;
992 
993 		seq = (seq + 1) & MCTP_HDR_SEQ_MASK;
994 		pos += size;
995 	}
996 
997 	consume_skb(skb);
998 	return rc;
999 }
1000 
1001 int mctp_local_output(struct sock *sk, struct mctp_dst *dst,
1002 		      struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag)
1003 {
1004 	struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
1005 	struct mctp_sk_key *key;
1006 	struct mctp_hdr *hdr;
1007 	unsigned long flags;
1008 	unsigned int netid;
1009 	unsigned int mtu;
1010 	mctp_eid_t saddr;
1011 	int rc;
1012 	u8 tag;
1013 
1014 	rc = -ENODEV;
1015 
1016 	spin_lock_irqsave(&dst->dev->addrs_lock, flags);
1017 	if (dst->dev->num_addrs == 0) {
1018 		rc = -EHOSTUNREACH;
1019 	} else {
1020 		/* use the outbound interface's first address as our source */
1021 		saddr = dst->dev->addrs[0];
1022 		rc = 0;
1023 	}
1024 	spin_unlock_irqrestore(&dst->dev->addrs_lock, flags);
1025 	netid = READ_ONCE(dst->dev->net);
1026 
1027 	if (rc)
1028 		goto out_release;
1029 
1030 	if (req_tag & MCTP_TAG_OWNER) {
1031 		if (req_tag & MCTP_TAG_PREALLOC)
1032 			key = mctp_lookup_prealloc_tag(msk, netid, daddr,
1033 						       req_tag, &tag);
1034 		else
1035 			key = mctp_alloc_local_tag(msk, netid, saddr, daddr,
1036 						   false, &tag);
1037 
1038 		if (IS_ERR(key)) {
1039 			rc = PTR_ERR(key);
1040 			goto out_release;
1041 		}
1042 		mctp_skb_set_flow(skb, key);
1043 		/* done with the key in this scope */
1044 		mctp_key_unref(key);
1045 		tag |= MCTP_HDR_FLAG_TO;
1046 	} else {
1047 		key = NULL;
1048 		tag = req_tag & MCTP_TAG_MASK;
1049 	}
1050 
1051 	skb->pkt_type = PACKET_OUTGOING;
1052 	skb->protocol = htons(ETH_P_MCTP);
1053 	skb->priority = 0;
1054 	skb_reset_transport_header(skb);
1055 	skb_push(skb, sizeof(struct mctp_hdr));
1056 	skb_reset_network_header(skb);
1057 	skb->dev = dst->dev->dev;
1058 
1059 	/* set up common header fields */
1060 	hdr = mctp_hdr(skb);
1061 	hdr->ver = 1;
1062 	hdr->dest = daddr;
1063 	hdr->src = saddr;
1064 
1065 	mtu = dst->mtu;
1066 
1067 	if (skb->len + sizeof(struct mctp_hdr) <= mtu) {
1068 		hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM |
1069 			MCTP_HDR_FLAG_EOM | tag;
1070 		rc = dst->output(dst, skb);
1071 	} else {
1072 		rc = mctp_do_fragment_route(dst, skb, mtu, tag);
1073 	}
1074 
1075 	/* route output functions consume the skb, even on error */
1076 	skb = NULL;
1077 
1078 out_release:
1079 	kfree_skb(skb);
1080 	return rc;
1081 }
1082 
1083 /* route management */
1084 static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
1085 			  unsigned int daddr_extent, unsigned int mtu,
1086 			  unsigned char type)
1087 {
1088 	int (*rtfn)(struct mctp_dst *dst, struct sk_buff *skb);
1089 	struct net *net = dev_net(mdev->dev);
1090 	struct mctp_route *rt, *ert;
1091 
1092 	if (!mctp_address_unicast(daddr_start))
1093 		return -EINVAL;
1094 
1095 	if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
1096 		return -EINVAL;
1097 
1098 	switch (type) {
1099 	case RTN_LOCAL:
1100 		rtfn = mctp_dst_input;
1101 		break;
1102 	case RTN_UNICAST:
1103 		rtfn = mctp_dst_output;
1104 		break;
1105 	default:
1106 		return -EINVAL;
1107 	}
1108 
1109 	ASSERT_RTNL();
1110 
1111 	rt = mctp_route_alloc();
1112 	if (!rt)
1113 		return -ENOMEM;
1114 
1115 	rt->min = daddr_start;
1116 	rt->max = daddr_start + daddr_extent;
1117 	rt->mtu = mtu;
1118 	rt->dev = mdev;
1119 	mctp_dev_hold(rt->dev);
1120 	rt->type = type;
1121 	rt->output = rtfn;
1122 
1123 	/* Prevent duplicate identical routes. */
1124 	list_for_each_entry(ert, &net->mctp.routes, list) {
1125 		if (mctp_rt_compare_exact(rt, ert)) {
1126 			mctp_route_release(rt);
1127 			return -EEXIST;
1128 		}
1129 	}
1130 
1131 	list_add_rcu(&rt->list, &net->mctp.routes);
1132 
1133 	return 0;
1134 }
1135 
1136 static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
1137 			     unsigned int daddr_extent, unsigned char type)
1138 {
1139 	struct net *net = dev_net(mdev->dev);
1140 	struct mctp_route *rt, *tmp;
1141 	mctp_eid_t daddr_end;
1142 	bool dropped;
1143 
1144 	if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
1145 		return -EINVAL;
1146 
1147 	daddr_end = daddr_start + daddr_extent;
1148 	dropped = false;
1149 
1150 	ASSERT_RTNL();
1151 
1152 	list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
1153 		if (rt->dev == mdev &&
1154 		    rt->min == daddr_start && rt->max == daddr_end &&
1155 		    rt->type == type) {
1156 			list_del_rcu(&rt->list);
1157 			/* TODO: immediate RTM_DELROUTE */
1158 			mctp_route_release(rt);
1159 			dropped = true;
1160 		}
1161 	}
1162 
1163 	return dropped ? 0 : -ENOENT;
1164 }
1165 
1166 int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
1167 {
1168 	return mctp_route_add(mdev, addr, 0, 0, RTN_LOCAL);
1169 }
1170 
1171 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
1172 {
1173 	return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
1174 }
1175 
1176 /* removes all entries for a given device */
1177 void mctp_route_remove_dev(struct mctp_dev *mdev)
1178 {
1179 	struct net *net = dev_net(mdev->dev);
1180 	struct mctp_route *rt, *tmp;
1181 
1182 	ASSERT_RTNL();
1183 	list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
1184 		if (rt->dev == mdev) {
1185 			list_del_rcu(&rt->list);
1186 			/* TODO: immediate RTM_DELROUTE */
1187 			mctp_route_release(rt);
1188 		}
1189 	}
1190 }
1191 
1192 /* Incoming packet-handling */
1193 
1194 static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
1195 				struct packet_type *pt,
1196 				struct net_device *orig_dev)
1197 {
1198 	struct net *net = dev_net(dev);
1199 	struct mctp_dev *mdev;
1200 	struct mctp_skb_cb *cb;
1201 	struct mctp_dst dst;
1202 	struct mctp_hdr *mh;
1203 	int rc;
1204 
1205 	rcu_read_lock();
1206 	mdev = __mctp_dev_get(dev);
1207 	rcu_read_unlock();
1208 	if (!mdev) {
1209 		/* basic non-data sanity checks */
1210 		goto err_drop;
1211 	}
1212 
1213 	if (!pskb_may_pull(skb, sizeof(struct mctp_hdr)))
1214 		goto err_drop;
1215 
1216 	skb_reset_transport_header(skb);
1217 	skb_reset_network_header(skb);
1218 
1219 	/* We have enough for a header; decode and route */
1220 	mh = mctp_hdr(skb);
1221 	if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
1222 		goto err_drop;
1223 
1224 	/* source must be valid unicast or null; drop reserved ranges and
1225 	 * broadcast
1226 	 */
1227 	if (!(mctp_address_unicast(mh->src) || mctp_address_null(mh->src)))
1228 		goto err_drop;
1229 
1230 	/* dest address: as above, but allow broadcast */
1231 	if (!(mctp_address_unicast(mh->dest) || mctp_address_null(mh->dest) ||
1232 	      mctp_address_broadcast(mh->dest)))
1233 		goto err_drop;
1234 
1235 	/* MCTP drivers must populate halen/haddr */
1236 	if (dev->type == ARPHRD_MCTP) {
1237 		cb = mctp_cb(skb);
1238 	} else {
1239 		cb = __mctp_cb(skb);
1240 		cb->halen = 0;
1241 	}
1242 	cb->net = READ_ONCE(mdev->net);
1243 	cb->ifindex = dev->ifindex;
1244 
1245 	rc = mctp_route_lookup(net, cb->net, mh->dest, &dst);
1246 
1247 	/* NULL EID, but addressed to our physical address */
1248 	if (rc && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST)
1249 		rc = mctp_route_lookup_null(net, dev, &dst);
1250 
1251 	if (rc)
1252 		goto err_drop;
1253 
1254 	dst.output(&dst, skb);
1255 	mctp_dst_release(&dst);
1256 	mctp_dev_put(mdev);
1257 
1258 	return NET_RX_SUCCESS;
1259 
1260 err_drop:
1261 	kfree_skb(skb);
1262 	mctp_dev_put(mdev);
1263 	return NET_RX_DROP;
1264 }
1265 
1266 static struct packet_type mctp_packet_type = {
1267 	.type = cpu_to_be16(ETH_P_MCTP),
1268 	.func = mctp_pkttype_receive,
1269 };
1270 
1271 /* netlink interface */
1272 
1273 static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = {
1274 	[RTA_DST]		= { .type = NLA_U8 },
1275 	[RTA_METRICS]		= { .type = NLA_NESTED },
1276 	[RTA_OIF]		= { .type = NLA_U32 },
1277 };
1278 
1279 /* Common part for RTM_NEWROUTE and RTM_DELROUTE parsing.
1280  * tb must hold RTA_MAX+1 elements.
1281  */
1282 static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
1283 			      struct netlink_ext_ack *extack,
1284 			      struct nlattr **tb, struct rtmsg **rtm,
1285 			      struct mctp_dev **mdev, mctp_eid_t *daddr_start)
1286 {
1287 	struct net *net = sock_net(skb->sk);
1288 	struct net_device *dev;
1289 	unsigned int ifindex;
1290 	int rc;
1291 
1292 	rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX,
1293 			 rta_mctp_policy, extack);
1294 	if (rc < 0) {
1295 		NL_SET_ERR_MSG(extack, "incorrect format");
1296 		return rc;
1297 	}
1298 
1299 	if (!tb[RTA_DST]) {
1300 		NL_SET_ERR_MSG(extack, "dst EID missing");
1301 		return -EINVAL;
1302 	}
1303 	*daddr_start = nla_get_u8(tb[RTA_DST]);
1304 
1305 	if (!tb[RTA_OIF]) {
1306 		NL_SET_ERR_MSG(extack, "ifindex missing");
1307 		return -EINVAL;
1308 	}
1309 	ifindex = nla_get_u32(tb[RTA_OIF]);
1310 
1311 	*rtm = nlmsg_data(nlh);
1312 	if ((*rtm)->rtm_family != AF_MCTP) {
1313 		NL_SET_ERR_MSG(extack, "route family must be AF_MCTP");
1314 		return -EINVAL;
1315 	}
1316 
1317 	dev = __dev_get_by_index(net, ifindex);
1318 	if (!dev) {
1319 		NL_SET_ERR_MSG(extack, "bad ifindex");
1320 		return -ENODEV;
1321 	}
1322 	*mdev = mctp_dev_get_rtnl(dev);
1323 	if (!*mdev)
1324 		return -ENODEV;
1325 
1326 	if (dev->flags & IFF_LOOPBACK) {
1327 		NL_SET_ERR_MSG(extack, "no routes to loopback");
1328 		return -EINVAL;
1329 	}
1330 
1331 	return 0;
1332 }
1333 
1334 static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = {
1335 	[RTAX_MTU]		= { .type = NLA_U32 },
1336 };
1337 
1338 static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
1339 			 struct netlink_ext_ack *extack)
1340 {
1341 	struct nlattr *tb[RTA_MAX + 1];
1342 	struct nlattr *tbx[RTAX_MAX + 1];
1343 	mctp_eid_t daddr_start;
1344 	struct mctp_dev *mdev;
1345 	struct rtmsg *rtm;
1346 	unsigned int mtu;
1347 	int rc;
1348 
1349 	rc = mctp_route_nlparse(skb, nlh, extack, tb,
1350 				&rtm, &mdev, &daddr_start);
1351 	if (rc < 0)
1352 		return rc;
1353 
1354 	if (rtm->rtm_type != RTN_UNICAST) {
1355 		NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST");
1356 		return -EINVAL;
1357 	}
1358 
1359 	mtu = 0;
1360 	if (tb[RTA_METRICS]) {
1361 		rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS],
1362 				      rta_metrics_policy, NULL);
1363 		if (rc < 0)
1364 			return rc;
1365 		if (tbx[RTAX_MTU])
1366 			mtu = nla_get_u32(tbx[RTAX_MTU]);
1367 	}
1368 
1369 	rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu,
1370 			    rtm->rtm_type);
1371 	return rc;
1372 }
1373 
1374 static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
1375 			 struct netlink_ext_ack *extack)
1376 {
1377 	struct nlattr *tb[RTA_MAX + 1];
1378 	mctp_eid_t daddr_start;
1379 	struct mctp_dev *mdev;
1380 	struct rtmsg *rtm;
1381 	int rc;
1382 
1383 	rc = mctp_route_nlparse(skb, nlh, extack, tb,
1384 				&rtm, &mdev, &daddr_start);
1385 	if (rc < 0)
1386 		return rc;
1387 
1388 	/* we only have unicast routes */
1389 	if (rtm->rtm_type != RTN_UNICAST)
1390 		return -EINVAL;
1391 
1392 	rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
1393 	return rc;
1394 }
1395 
1396 static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt,
1397 			    u32 portid, u32 seq, int event, unsigned int flags)
1398 {
1399 	struct nlmsghdr *nlh;
1400 	struct rtmsg *hdr;
1401 	void *metrics;
1402 
1403 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
1404 	if (!nlh)
1405 		return -EMSGSIZE;
1406 
1407 	hdr = nlmsg_data(nlh);
1408 	hdr->rtm_family = AF_MCTP;
1409 
1410 	/* we use the _len fields as a number of EIDs, rather than
1411 	 * a number of bits in the address
1412 	 */
1413 	hdr->rtm_dst_len = rt->max - rt->min;
1414 	hdr->rtm_src_len = 0;
1415 	hdr->rtm_tos = 0;
1416 	hdr->rtm_table = RT_TABLE_DEFAULT;
1417 	hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */
1418 	hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */
1419 	hdr->rtm_type = rt->type;
1420 
1421 	if (nla_put_u8(skb, RTA_DST, rt->min))
1422 		goto cancel;
1423 
1424 	metrics = nla_nest_start_noflag(skb, RTA_METRICS);
1425 	if (!metrics)
1426 		goto cancel;
1427 
1428 	if (rt->mtu) {
1429 		if (nla_put_u32(skb, RTAX_MTU, rt->mtu))
1430 			goto cancel;
1431 	}
1432 
1433 	nla_nest_end(skb, metrics);
1434 
1435 	if (rt->dev) {
1436 		if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex))
1437 			goto cancel;
1438 	}
1439 
1440 	/* TODO: conditional neighbour physaddr? */
1441 
1442 	nlmsg_end(skb, nlh);
1443 
1444 	return 0;
1445 
1446 cancel:
1447 	nlmsg_cancel(skb, nlh);
1448 	return -EMSGSIZE;
1449 }
1450 
1451 static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb)
1452 {
1453 	struct net *net = sock_net(skb->sk);
1454 	struct mctp_route *rt;
1455 	int s_idx, idx;
1456 
1457 	/* TODO: allow filtering on route data, possibly under
1458 	 * cb->strict_check
1459 	 */
1460 
1461 	/* TODO: change to struct overlay */
1462 	s_idx = cb->args[0];
1463 	idx = 0;
1464 
1465 	rcu_read_lock();
1466 	list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
1467 		if (idx++ < s_idx)
1468 			continue;
1469 		if (mctp_fill_rtinfo(skb, rt,
1470 				     NETLINK_CB(cb->skb).portid,
1471 				     cb->nlh->nlmsg_seq,
1472 				     RTM_NEWROUTE, NLM_F_MULTI) < 0)
1473 			break;
1474 	}
1475 
1476 	rcu_read_unlock();
1477 	cb->args[0] = idx;
1478 
1479 	return skb->len;
1480 }
1481 
1482 /* net namespace implementation */
1483 static int __net_init mctp_routes_net_init(struct net *net)
1484 {
1485 	struct netns_mctp *ns = &net->mctp;
1486 
1487 	INIT_LIST_HEAD(&ns->routes);
1488 	INIT_HLIST_HEAD(&ns->binds);
1489 	mutex_init(&ns->bind_lock);
1490 	INIT_HLIST_HEAD(&ns->keys);
1491 	spin_lock_init(&ns->keys_lock);
1492 	WARN_ON(mctp_default_net_set(net, MCTP_INITIAL_DEFAULT_NET));
1493 	return 0;
1494 }
1495 
1496 static void __net_exit mctp_routes_net_exit(struct net *net)
1497 {
1498 	struct mctp_route *rt;
1499 
1500 	rcu_read_lock();
1501 	list_for_each_entry_rcu(rt, &net->mctp.routes, list)
1502 		mctp_route_release(rt);
1503 	rcu_read_unlock();
1504 }
1505 
1506 static struct pernet_operations mctp_net_ops = {
1507 	.init = mctp_routes_net_init,
1508 	.exit = mctp_routes_net_exit,
1509 };
1510 
1511 static const struct rtnl_msg_handler mctp_route_rtnl_msg_handlers[] = {
1512 	{THIS_MODULE, PF_MCTP, RTM_NEWROUTE, mctp_newroute, NULL, 0},
1513 	{THIS_MODULE, PF_MCTP, RTM_DELROUTE, mctp_delroute, NULL, 0},
1514 	{THIS_MODULE, PF_MCTP, RTM_GETROUTE, NULL, mctp_dump_rtinfo, 0},
1515 };
1516 
1517 int __init mctp_routes_init(void)
1518 {
1519 	int err;
1520 
1521 	dev_add_pack(&mctp_packet_type);
1522 
1523 	err = register_pernet_subsys(&mctp_net_ops);
1524 	if (err)
1525 		goto err_pernet;
1526 
1527 	err = rtnl_register_many(mctp_route_rtnl_msg_handlers);
1528 	if (err)
1529 		goto err_rtnl;
1530 
1531 	return 0;
1532 
1533 err_rtnl:
1534 	unregister_pernet_subsys(&mctp_net_ops);
1535 err_pernet:
1536 	dev_remove_pack(&mctp_packet_type);
1537 	return err;
1538 }
1539 
1540 void mctp_routes_exit(void)
1541 {
1542 	rtnl_unregister_many(mctp_route_rtnl_msg_handlers);
1543 	unregister_pernet_subsys(&mctp_net_ops);
1544 	dev_remove_pack(&mctp_packet_type);
1545 }
1546 
1547 #if IS_ENABLED(CONFIG_MCTP_TEST)
1548 #include "test/route-test.c"
1549 #endif
1550