1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Management Component Transport Protocol (MCTP) - routing 4 * implementation. 5 * 6 * This is currently based on a simple routing table, with no dst cache. The 7 * number of routes should stay fairly small, so the lookup cost is small. 8 * 9 * Copyright (c) 2021 Code Construct 10 * Copyright (c) 2021 Google 11 */ 12 13 #include <linux/idr.h> 14 #include <linux/kconfig.h> 15 #include <linux/mctp.h> 16 #include <linux/netdevice.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/skbuff.h> 19 20 #include <kunit/static_stub.h> 21 22 #include <uapi/linux/if_arp.h> 23 24 #include <net/mctp.h> 25 #include <net/mctpdevice.h> 26 #include <net/netlink.h> 27 #include <net/sock.h> 28 29 #include <trace/events/mctp.h> 30 31 static const unsigned int mctp_message_maxlen = 64 * 1024; 32 static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ; 33 34 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev); 35 36 /* route output callbacks */ 37 static int mctp_dst_discard(struct mctp_dst *dst, struct sk_buff *skb) 38 { 39 kfree_skb(skb); 40 return 0; 41 } 42 43 static struct mctp_sock *mctp_lookup_bind_details(struct net *net, 44 struct sk_buff *skb, 45 u8 type, u8 dest, 46 u8 src, bool allow_net_any) 47 { 48 struct mctp_skb_cb *cb = mctp_cb(skb); 49 struct sock *sk; 50 u8 hash; 51 52 WARN_ON_ONCE(!rcu_read_lock_held()); 53 54 hash = mctp_bind_hash(type, dest, src); 55 56 sk_for_each_rcu(sk, &net->mctp.binds[hash]) { 57 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 58 59 if (!allow_net_any && msk->bind_net == MCTP_NET_ANY) 60 continue; 61 62 if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net) 63 continue; 64 65 if (msk->bind_type != type) 66 continue; 67 68 if (msk->bind_peer_set && 69 !mctp_address_matches(msk->bind_peer_addr, src)) 70 continue; 71 72 if (!mctp_address_matches(msk->bind_local_addr, dest)) 73 continue; 74 75 return msk; 76 } 77 78 return NULL; 79 } 80 81 static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb) 82 { 83 struct mctp_sock *msk; 84 struct mctp_hdr *mh; 85 u8 type; 86 87 /* TODO: look up in skb->cb? */ 88 mh = mctp_hdr(skb); 89 90 if (!skb_headlen(skb)) 91 return NULL; 92 93 type = (*(u8 *)skb->data) & 0x7f; 94 95 /* Look for binds in order of widening scope. A given destination or 96 * source address also implies matching on a particular network. 97 * 98 * - Matching destination and source 99 * - Matching destination 100 * - Matching source 101 * - Matching network, any address 102 * - Any network or address 103 */ 104 105 msk = mctp_lookup_bind_details(net, skb, type, mh->dest, mh->src, 106 false); 107 if (msk) 108 return msk; 109 msk = mctp_lookup_bind_details(net, skb, type, MCTP_ADDR_ANY, mh->src, 110 false); 111 if (msk) 112 return msk; 113 msk = mctp_lookup_bind_details(net, skb, type, mh->dest, MCTP_ADDR_ANY, 114 false); 115 if (msk) 116 return msk; 117 msk = mctp_lookup_bind_details(net, skb, type, MCTP_ADDR_ANY, 118 MCTP_ADDR_ANY, false); 119 if (msk) 120 return msk; 121 msk = mctp_lookup_bind_details(net, skb, type, MCTP_ADDR_ANY, 122 MCTP_ADDR_ANY, true); 123 if (msk) 124 return msk; 125 126 return NULL; 127 } 128 129 /* A note on the key allocations. 130 * 131 * struct net->mctp.keys contains our set of currently-allocated keys for 132 * MCTP tag management. The lookup tuple for these is the peer EID, 133 * local EID and MCTP tag. 134 * 135 * In some cases, the peer EID may be MCTP_EID_ANY: for example, when a 136 * broadcast message is sent, we may receive responses from any peer EID. 137 * Because the broadcast dest address is equivalent to ANY, we create 138 * a key with (local = local-eid, peer = ANY). This allows a match on the 139 * incoming broadcast responses from any peer. 140 * 141 * We perform lookups when packets are received, and when tags are allocated 142 * in two scenarios: 143 * 144 * - when a packet is sent, with a locally-owned tag: we need to find an 145 * unused tag value for the (local, peer) EID pair. 146 * 147 * - when a tag is manually allocated: we need to find an unused tag value 148 * for the peer EID, but don't have a specific local EID at that stage. 149 * 150 * in the latter case, on successful allocation, we end up with a tag with 151 * (local = ANY, peer = peer-eid). 152 * 153 * So, the key set allows both a local EID of ANY, as well as a peer EID of 154 * ANY in the lookup tuple. Both may be ANY if we prealloc for a broadcast. 155 * The matching (in mctp_key_match()) during lookup allows the match value to 156 * be ANY in either the dest or source addresses. 157 * 158 * When allocating (+ inserting) a tag, we need to check for conflicts amongst 159 * the existing tag set. This requires macthing either exactly on the local 160 * and peer addresses, or either being ANY. 161 */ 162 163 static bool mctp_key_match(struct mctp_sk_key *key, unsigned int net, 164 mctp_eid_t local, mctp_eid_t peer, u8 tag) 165 { 166 if (key->net != net) 167 return false; 168 169 if (!mctp_address_matches(key->local_addr, local)) 170 return false; 171 172 if (!mctp_address_matches(key->peer_addr, peer)) 173 return false; 174 175 if (key->tag != tag) 176 return false; 177 178 return true; 179 } 180 181 /* returns a key (with key->lock held, and refcounted), or NULL if no such 182 * key exists. 183 */ 184 static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb, 185 unsigned int netid, mctp_eid_t peer, 186 unsigned long *irqflags) 187 __acquires(&key->lock) 188 { 189 struct mctp_sk_key *key, *ret; 190 unsigned long flags; 191 struct mctp_hdr *mh; 192 u8 tag; 193 194 mh = mctp_hdr(skb); 195 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 196 197 ret = NULL; 198 spin_lock_irqsave(&net->mctp.keys_lock, flags); 199 200 hlist_for_each_entry(key, &net->mctp.keys, hlist) { 201 if (!mctp_key_match(key, netid, mh->dest, peer, tag)) 202 continue; 203 204 spin_lock(&key->lock); 205 if (key->valid) { 206 refcount_inc(&key->refs); 207 ret = key; 208 break; 209 } 210 spin_unlock(&key->lock); 211 } 212 213 if (ret) { 214 spin_unlock(&net->mctp.keys_lock); 215 *irqflags = flags; 216 } else { 217 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 218 } 219 220 return ret; 221 } 222 223 static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk, 224 unsigned int net, 225 mctp_eid_t local, mctp_eid_t peer, 226 u8 tag, gfp_t gfp) 227 { 228 struct mctp_sk_key *key; 229 230 key = kzalloc_obj(*key, gfp); 231 if (!key) 232 return NULL; 233 234 key->net = net; 235 key->peer_addr = peer; 236 key->local_addr = local; 237 key->tag = tag; 238 key->sk = &msk->sk; 239 key->valid = true; 240 spin_lock_init(&key->lock); 241 refcount_set(&key->refs, 1); 242 sock_hold(key->sk); 243 244 return key; 245 } 246 247 void mctp_key_unref(struct mctp_sk_key *key) 248 { 249 unsigned long flags; 250 251 if (!refcount_dec_and_test(&key->refs)) 252 return; 253 254 /* even though no refs exist here, the lock allows us to stay 255 * consistent with the locking requirement of mctp_dev_release_key 256 */ 257 spin_lock_irqsave(&key->lock, flags); 258 mctp_dev_release_key(key->dev, key); 259 spin_unlock_irqrestore(&key->lock, flags); 260 261 sock_put(key->sk); 262 kfree(key); 263 } 264 265 static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) 266 { 267 struct net *net = sock_net(&msk->sk); 268 struct mctp_sk_key *tmp; 269 unsigned long flags; 270 int rc = 0; 271 272 spin_lock_irqsave(&net->mctp.keys_lock, flags); 273 274 if (sock_flag(&msk->sk, SOCK_DEAD)) { 275 rc = -EINVAL; 276 goto out_unlock; 277 } 278 279 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { 280 if (mctp_key_match(tmp, key->net, key->local_addr, 281 key->peer_addr, key->tag)) { 282 spin_lock(&tmp->lock); 283 if (tmp->valid) 284 rc = -EEXIST; 285 spin_unlock(&tmp->lock); 286 if (rc) 287 break; 288 } 289 } 290 291 if (!rc) { 292 refcount_inc(&key->refs); 293 key->expiry = jiffies + mctp_key_lifetime; 294 timer_reduce(&msk->key_expiry, key->expiry); 295 296 hlist_add_head(&key->hlist, &net->mctp.keys); 297 hlist_add_head(&key->sklist, &msk->keys); 298 } 299 300 out_unlock: 301 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 302 303 return rc; 304 } 305 306 /* Helper for mctp_route_input(). 307 * We're done with the key; unlock and unref the key. 308 * For the usual case of automatic expiry we remove the key from lists. 309 * In the case that manual allocation is set on a key we release the lock 310 * and local ref, reset reassembly, but don't remove from lists. 311 */ 312 static void __mctp_key_done_in(struct mctp_sk_key *key, struct net *net, 313 unsigned long flags, unsigned long reason) 314 __releases(&key->lock) 315 { 316 struct sk_buff *skb; 317 318 trace_mctp_key_release(key, reason); 319 skb = key->reasm_head; 320 key->reasm_head = NULL; 321 322 if (!key->manual_alloc) { 323 key->reasm_dead = true; 324 key->valid = false; 325 mctp_dev_release_key(key->dev, key); 326 } 327 spin_unlock_irqrestore(&key->lock, flags); 328 329 if (!key->manual_alloc) { 330 spin_lock_irqsave(&net->mctp.keys_lock, flags); 331 if (!hlist_unhashed(&key->hlist)) { 332 hlist_del_init(&key->hlist); 333 hlist_del_init(&key->sklist); 334 mctp_key_unref(key); 335 } 336 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 337 } 338 339 /* and one for the local reference */ 340 mctp_key_unref(key); 341 342 kfree_skb(skb); 343 } 344 345 #ifdef CONFIG_MCTP_FLOWS 346 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) 347 { 348 struct mctp_flow *flow; 349 350 flow = skb_ext_add(skb, SKB_EXT_MCTP); 351 if (!flow) 352 return; 353 354 refcount_inc(&key->refs); 355 flow->key = key; 356 } 357 358 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) 359 { 360 struct mctp_sk_key *key; 361 struct mctp_flow *flow; 362 unsigned long flags; 363 364 flow = skb_ext_find(skb, SKB_EXT_MCTP); 365 if (!flow) 366 return; 367 368 key = flow->key; 369 370 spin_lock_irqsave(&key->lock, flags); 371 372 if (!key->dev) 373 mctp_dev_set_key(dev, key); 374 else 375 WARN_ON(key->dev != dev); 376 377 spin_unlock_irqrestore(&key->lock, flags); 378 } 379 #else 380 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {} 381 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {} 382 #endif 383 384 /* takes ownership of skb, both in success and failure cases */ 385 static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb) 386 { 387 struct mctp_hdr *hdr = mctp_hdr(skb); 388 u8 exp_seq, this_seq; 389 390 this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) 391 & MCTP_HDR_SEQ_MASK; 392 393 if (!key->reasm_head) { 394 /* Since we're manipulating the shared frag_list, ensure it 395 * isn't shared with any other SKBs. In the cloned case, 396 * this will free the skb; callers can no longer access it 397 * safely. 398 */ 399 key->reasm_head = skb_unshare(skb, GFP_ATOMIC); 400 if (!key->reasm_head) 401 return -ENOMEM; 402 403 key->reasm_tailp = &(skb_shinfo(key->reasm_head)->frag_list); 404 key->last_seq = this_seq; 405 return 0; 406 } 407 408 exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK; 409 410 if (this_seq != exp_seq) 411 goto err_free; 412 413 if (key->reasm_head->len + skb->len > mctp_message_maxlen) 414 goto err_free; 415 416 skb->next = NULL; 417 skb->sk = NULL; 418 *key->reasm_tailp = skb; 419 key->reasm_tailp = &skb->next; 420 421 key->last_seq = this_seq; 422 423 key->reasm_head->data_len += skb->len; 424 key->reasm_head->len += skb->len; 425 key->reasm_head->truesize += skb->truesize; 426 427 return 0; 428 429 err_free: 430 kfree_skb(skb); 431 return -EINVAL; 432 } 433 434 static int mctp_dst_input(struct mctp_dst *dst, struct sk_buff *skb) 435 { 436 struct mctp_sk_key *key, *any_key = NULL; 437 struct net *net = dev_net(skb->dev); 438 struct mctp_sock *msk; 439 struct mctp_hdr *mh; 440 unsigned int netid; 441 unsigned long f; 442 u8 tag, flags; 443 int rc; 444 u8 ver; 445 446 msk = NULL; 447 rc = -EINVAL; 448 449 /* We may be receiving a locally-routed packet; drop source sk 450 * accounting. 451 * 452 * From here, we will either queue the skb - either to a frag_queue, or 453 * to a receiving socket. When that succeeds, we clear the skb pointer; 454 * a non-NULL skb on exit will be otherwise unowned, and hence 455 * kfree_skb()-ed. 456 */ 457 skb_orphan(skb); 458 459 if (skb->pkt_type == PACKET_OUTGOING) 460 skb->pkt_type = PACKET_LOOPBACK; 461 462 /* ensure we have enough data for a header and a type */ 463 if (skb->len < sizeof(struct mctp_hdr) + 1) 464 goto out; 465 466 /* grab header, advance data ptr */ 467 mh = mctp_hdr(skb); 468 netid = mctp_cb(skb)->net; 469 skb_pull(skb, sizeof(struct mctp_hdr)); 470 471 ver = mh->ver & MCTP_HDR_VER_MASK; 472 if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX) 473 goto out; 474 475 flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM); 476 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 477 478 rcu_read_lock(); 479 480 /* lookup socket / reasm context, exactly matching (src,dest,tag). 481 * we hold a ref on the key, and key->lock held. 482 */ 483 key = mctp_lookup_key(net, skb, netid, mh->src, &f); 484 485 if (flags & MCTP_HDR_FLAG_SOM) { 486 if (key) { 487 msk = container_of(key->sk, struct mctp_sock, sk); 488 } else { 489 /* first response to a broadcast? do a more general 490 * key lookup to find the socket, but don't use this 491 * key for reassembly - we'll create a more specific 492 * one for future packets if required (ie, !EOM). 493 * 494 * this lookup requires key->peer to be MCTP_ADDR_ANY, 495 * it doesn't match just any key->peer. 496 */ 497 any_key = mctp_lookup_key(net, skb, netid, 498 MCTP_ADDR_ANY, &f); 499 if (any_key) { 500 msk = container_of(any_key->sk, 501 struct mctp_sock, sk); 502 spin_unlock_irqrestore(&any_key->lock, f); 503 } 504 } 505 506 if (!key && !msk && (tag & MCTP_HDR_FLAG_TO)) 507 msk = mctp_lookup_bind(net, skb); 508 509 if (!msk) { 510 rc = -ENOENT; 511 goto out_unlock; 512 } 513 514 /* single-packet message? deliver to socket, clean up any 515 * pending key. 516 */ 517 if (flags & MCTP_HDR_FLAG_EOM) { 518 rc = sock_queue_rcv_skb(&msk->sk, skb); 519 if (!rc) 520 skb = NULL; 521 if (key) { 522 /* we've hit a pending reassembly; not much we 523 * can do but drop it 524 */ 525 __mctp_key_done_in(key, net, f, 526 MCTP_TRACE_KEY_REPLIED); 527 key = NULL; 528 } 529 goto out_unlock; 530 } 531 532 /* broadcast response or a bind() - create a key for further 533 * packets for this message 534 */ 535 if (!key) { 536 key = mctp_key_alloc(msk, netid, mh->dest, mh->src, 537 tag, GFP_ATOMIC); 538 if (!key) { 539 rc = -ENOMEM; 540 goto out_unlock; 541 } 542 543 /* we can queue without the key lock here, as the 544 * key isn't observable yet 545 */ 546 mctp_frag_queue(key, skb); 547 skb = NULL; 548 549 /* if the key_add fails, we've raced with another 550 * SOM packet with the same src, dest and tag. There's 551 * no way to distinguish future packets, so all we 552 * can do is drop. 553 */ 554 rc = mctp_key_add(key, msk); 555 if (!rc) 556 trace_mctp_key_acquire(key); 557 558 /* we don't need to release key->lock on exit, so 559 * clean up here and suppress the unlock via 560 * setting to NULL 561 */ 562 mctp_key_unref(key); 563 key = NULL; 564 565 } else { 566 if (key->reasm_head || key->reasm_dead) { 567 /* duplicate start? drop everything */ 568 __mctp_key_done_in(key, net, f, 569 MCTP_TRACE_KEY_INVALIDATED); 570 rc = -EEXIST; 571 key = NULL; 572 } else { 573 rc = mctp_frag_queue(key, skb); 574 skb = NULL; 575 } 576 } 577 578 } else if (key) { 579 /* this packet continues a previous message; reassemble 580 * using the message-specific key 581 */ 582 583 /* we need to be continuing an existing reassembly... */ 584 if (!key->reasm_head) { 585 rc = -EINVAL; 586 } else { 587 rc = mctp_frag_queue(key, skb); 588 skb = NULL; 589 } 590 591 if (rc) 592 goto out_unlock; 593 594 /* end of message? deliver to socket, and we're done with 595 * the reassembly/response key 596 */ 597 if (flags & MCTP_HDR_FLAG_EOM) { 598 rc = sock_queue_rcv_skb(key->sk, key->reasm_head); 599 if (!rc) 600 key->reasm_head = NULL; 601 __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED); 602 key = NULL; 603 } 604 605 } else { 606 /* not a start, no matching key */ 607 rc = -ENOENT; 608 } 609 610 out_unlock: 611 rcu_read_unlock(); 612 if (key) { 613 spin_unlock_irqrestore(&key->lock, f); 614 mctp_key_unref(key); 615 } 616 if (any_key) 617 mctp_key_unref(any_key); 618 out: 619 kfree_skb(skb); 620 return rc; 621 } 622 623 static int mctp_dst_output(struct mctp_dst *dst, struct sk_buff *skb) 624 { 625 char daddr_buf[MAX_ADDR_LEN]; 626 char *daddr = NULL; 627 int rc; 628 629 skb->protocol = htons(ETH_P_MCTP); 630 skb->pkt_type = PACKET_OUTGOING; 631 skb->dev = dst->dev->dev; 632 633 if (skb->len > dst->mtu) { 634 kfree_skb(skb); 635 return -EMSGSIZE; 636 } 637 638 /* direct route; use the hwaddr we stashed in sendmsg */ 639 if (dst->halen) { 640 if (dst->halen != skb->dev->addr_len) { 641 /* sanity check, sendmsg should have already caught this */ 642 kfree_skb(skb); 643 return -EMSGSIZE; 644 } 645 daddr = dst->haddr; 646 } else { 647 /* If lookup fails let the device handle daddr==NULL */ 648 if (mctp_neigh_lookup(dst->dev, dst->nexthop, daddr_buf) == 0) 649 daddr = daddr_buf; 650 } 651 652 rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol), 653 daddr, skb->dev->dev_addr, skb->len); 654 if (rc < 0) { 655 kfree_skb(skb); 656 return -EHOSTUNREACH; 657 } 658 659 mctp_flow_prepare_output(skb, dst->dev); 660 661 rc = dev_queue_xmit(skb); 662 if (rc) 663 rc = net_xmit_errno(rc); 664 665 return rc; 666 } 667 668 /* route alloc/release */ 669 static void mctp_route_release(struct mctp_route *rt) 670 { 671 if (refcount_dec_and_test(&rt->refs)) { 672 if (rt->dst_type == MCTP_ROUTE_DIRECT) 673 mctp_dev_put(rt->dev); 674 kfree_rcu(rt, rcu); 675 } 676 } 677 678 /* returns a route with the refcount at 1 */ 679 static struct mctp_route *mctp_route_alloc(void) 680 { 681 struct mctp_route *rt; 682 683 rt = kzalloc_obj(*rt); 684 if (!rt) 685 return NULL; 686 687 INIT_LIST_HEAD(&rt->list); 688 refcount_set(&rt->refs, 1); 689 rt->output = mctp_dst_discard; 690 691 return rt; 692 } 693 694 unsigned int mctp_default_net(struct net *net) 695 { 696 return READ_ONCE(net->mctp.default_net); 697 } 698 699 int mctp_default_net_set(struct net *net, unsigned int index) 700 { 701 if (index == 0) 702 return -EINVAL; 703 WRITE_ONCE(net->mctp.default_net, index); 704 return 0; 705 } 706 707 /* tag management */ 708 static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key, 709 struct mctp_sock *msk) 710 { 711 struct netns_mctp *mns = &net->mctp; 712 713 lockdep_assert_held(&mns->keys_lock); 714 715 key->expiry = jiffies + mctp_key_lifetime; 716 timer_reduce(&msk->key_expiry, key->expiry); 717 718 /* we hold the net->key_lock here, allowing updates to both 719 * then net and sk 720 */ 721 hlist_add_head_rcu(&key->hlist, &mns->keys); 722 hlist_add_head_rcu(&key->sklist, &msk->keys); 723 refcount_inc(&key->refs); 724 } 725 726 /* Allocate a locally-owned tag value for (local, peer), and reserve 727 * it for the socket msk 728 */ 729 struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 730 unsigned int netid, 731 mctp_eid_t local, mctp_eid_t peer, 732 bool manual, u8 *tagp) 733 { 734 struct net *net = sock_net(&msk->sk); 735 struct netns_mctp *mns = &net->mctp; 736 struct mctp_sk_key *key, *tmp; 737 unsigned long flags; 738 u8 tagbits; 739 740 /* for NULL destination EIDs, we may get a response from any peer */ 741 if (peer == MCTP_ADDR_NULL) 742 peer = MCTP_ADDR_ANY; 743 744 /* be optimistic, alloc now */ 745 key = mctp_key_alloc(msk, netid, local, peer, 0, GFP_KERNEL); 746 if (!key) 747 return ERR_PTR(-ENOMEM); 748 749 /* 8 possible tag values */ 750 tagbits = 0xff; 751 752 spin_lock_irqsave(&mns->keys_lock, flags); 753 754 /* Walk through the existing keys, looking for potential conflicting 755 * tags. If we find a conflict, clear that bit from tagbits 756 */ 757 hlist_for_each_entry(tmp, &mns->keys, hlist) { 758 /* We can check the lookup fields (*_addr, tag) without the 759 * lock held, they don't change over the lifetime of the key. 760 */ 761 762 /* tags are net-specific */ 763 if (tmp->net != netid) 764 continue; 765 766 /* if we don't own the tag, it can't conflict */ 767 if (tmp->tag & MCTP_HDR_FLAG_TO) 768 continue; 769 770 /* Since we're avoiding conflicting entries, match peer and 771 * local addresses, including with a wildcard on ANY. See 772 * 'A note on key allocations' for background. 773 */ 774 if (peer != MCTP_ADDR_ANY && 775 !mctp_address_matches(tmp->peer_addr, peer)) 776 continue; 777 778 if (local != MCTP_ADDR_ANY && 779 !mctp_address_matches(tmp->local_addr, local)) 780 continue; 781 782 spin_lock(&tmp->lock); 783 /* key must still be valid. If we find a match, clear the 784 * potential tag value 785 */ 786 if (tmp->valid) 787 tagbits &= ~(1 << tmp->tag); 788 spin_unlock(&tmp->lock); 789 790 if (!tagbits) 791 break; 792 } 793 794 if (tagbits) { 795 key->tag = __ffs(tagbits); 796 mctp_reserve_tag(net, key, msk); 797 trace_mctp_key_acquire(key); 798 799 key->manual_alloc = manual; 800 *tagp = key->tag; 801 } 802 803 spin_unlock_irqrestore(&mns->keys_lock, flags); 804 805 if (!tagbits) { 806 mctp_key_unref(key); 807 return ERR_PTR(-EBUSY); 808 } 809 810 return key; 811 } 812 813 static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk, 814 unsigned int netid, 815 mctp_eid_t daddr, 816 u8 req_tag, u8 *tagp) 817 { 818 struct net *net = sock_net(&msk->sk); 819 struct netns_mctp *mns = &net->mctp; 820 struct mctp_sk_key *key, *tmp; 821 unsigned long flags; 822 823 req_tag &= ~(MCTP_TAG_PREALLOC | MCTP_TAG_OWNER); 824 key = NULL; 825 826 spin_lock_irqsave(&mns->keys_lock, flags); 827 828 hlist_for_each_entry(tmp, &mns->keys, hlist) { 829 if (tmp->net != netid) 830 continue; 831 832 if (tmp->tag != req_tag) 833 continue; 834 835 if (!mctp_address_matches(tmp->peer_addr, daddr)) 836 continue; 837 838 if (!tmp->manual_alloc) 839 continue; 840 841 spin_lock(&tmp->lock); 842 if (tmp->valid) { 843 key = tmp; 844 refcount_inc(&key->refs); 845 spin_unlock(&tmp->lock); 846 break; 847 } 848 spin_unlock(&tmp->lock); 849 } 850 spin_unlock_irqrestore(&mns->keys_lock, flags); 851 852 if (!key) 853 return ERR_PTR(-ENOENT); 854 855 if (tagp) 856 *tagp = key->tag; 857 858 return key; 859 } 860 861 /* routing lookups */ 862 static unsigned int mctp_route_netid(struct mctp_route *rt) 863 { 864 return rt->dst_type == MCTP_ROUTE_DIRECT ? 865 READ_ONCE(rt->dev->net) : rt->gateway.net; 866 } 867 868 static bool mctp_rt_match_eid(struct mctp_route *rt, 869 unsigned int net, mctp_eid_t eid) 870 { 871 return mctp_route_netid(rt) == net && 872 rt->min <= eid && rt->max >= eid; 873 } 874 875 /* compares match, used for duplicate prevention */ 876 static bool mctp_rt_compare_exact(struct mctp_route *rt1, 877 struct mctp_route *rt2) 878 { 879 ASSERT_RTNL(); 880 return mctp_route_netid(rt1) == mctp_route_netid(rt2) && 881 rt1->min == rt2->min && 882 rt1->max == rt2->max; 883 } 884 885 static mctp_eid_t mctp_dev_saddr(struct mctp_dev *dev) 886 { 887 mctp_eid_t addr = MCTP_ADDR_NULL; 888 unsigned long flags; 889 890 spin_lock_irqsave(&dev->addrs_lock, flags); 891 if (dev->num_addrs) { 892 /* use the outbound interface's first address as our source */ 893 addr = dev->addrs[0]; 894 } 895 spin_unlock_irqrestore(&dev->addrs_lock, flags); 896 897 return addr; 898 } 899 900 /* must only be called on a direct route, as the final output hop */ 901 static void mctp_dst_from_route(struct mctp_dst *dst, mctp_eid_t eid, 902 mctp_eid_t saddr, unsigned int mtu, 903 struct mctp_route *route) 904 { 905 mctp_dev_hold(route->dev); 906 dst->nexthop = eid; 907 dst->dev = route->dev; 908 dst->mtu = READ_ONCE(dst->dev->dev->mtu); 909 if (mtu) 910 dst->mtu = min(dst->mtu, mtu); 911 dst->halen = 0; 912 dst->output = route->output; 913 dst->saddr = saddr; 914 } 915 916 int mctp_dst_from_extaddr(struct mctp_dst *dst, struct net *net, int ifindex, 917 unsigned char halen, const unsigned char *haddr) 918 { 919 struct net_device *netdev; 920 struct mctp_dev *dev; 921 int rc = -ENOENT; 922 923 if (halen > sizeof(dst->haddr)) 924 return -EINVAL; 925 926 rcu_read_lock(); 927 928 netdev = dev_get_by_index_rcu(net, ifindex); 929 if (!netdev) 930 goto out_unlock; 931 932 if (netdev->addr_len != halen) { 933 rc = -EINVAL; 934 goto out_unlock; 935 } 936 937 dev = __mctp_dev_get(netdev); 938 if (!dev) 939 goto out_unlock; 940 941 dst->dev = dev; 942 dst->mtu = READ_ONCE(netdev->mtu); 943 dst->halen = halen; 944 dst->output = mctp_dst_output; 945 dst->nexthop = 0; 946 dst->saddr = mctp_dev_saddr(dev); 947 memcpy(dst->haddr, haddr, halen); 948 949 rc = 0; 950 951 out_unlock: 952 rcu_read_unlock(); 953 return rc; 954 } 955 956 void mctp_dst_release(struct mctp_dst *dst) 957 { 958 mctp_dev_put(dst->dev); 959 } 960 961 static struct mctp_route *mctp_route_lookup_single(struct net *net, 962 unsigned int dnet, 963 mctp_eid_t daddr) 964 { 965 struct mctp_route *rt; 966 967 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { 968 if (mctp_rt_match_eid(rt, dnet, daddr)) 969 return rt; 970 } 971 972 return NULL; 973 } 974 975 /* populates *dst on successful lookup, if set */ 976 int mctp_route_lookup(struct net *net, unsigned int dnet, 977 mctp_eid_t daddr, struct mctp_dst *dst) 978 { 979 const unsigned int max_depth = 32; 980 unsigned int depth, mtu = 0; 981 int rc = -EHOSTUNREACH; 982 983 rcu_read_lock(); 984 985 for (depth = 0; depth < max_depth; depth++) { 986 struct mctp_route *rt; 987 988 rt = mctp_route_lookup_single(net, dnet, daddr); 989 if (!rt) 990 break; 991 992 /* clamp mtu to the smallest in the path, allowing 0 993 * to specify no restrictions 994 */ 995 if (mtu && rt->mtu) 996 mtu = min(mtu, rt->mtu); 997 else 998 mtu = mtu ?: rt->mtu; 999 1000 if (rt->dst_type == MCTP_ROUTE_DIRECT) { 1001 mctp_eid_t saddr = mctp_dev_saddr(rt->dev); 1002 1003 /* cannot do gateway-ed routes without a src */ 1004 if (saddr == MCTP_ADDR_NULL && depth != 0) 1005 break; 1006 1007 if (dst) 1008 mctp_dst_from_route(dst, daddr, saddr, mtu, rt); 1009 rc = 0; 1010 break; 1011 1012 } else if (rt->dst_type == MCTP_ROUTE_GATEWAY) { 1013 daddr = rt->gateway.eid; 1014 } 1015 } 1016 1017 rcu_read_unlock(); 1018 1019 return rc; 1020 } 1021 1022 static int mctp_dst_input_null(struct net *net, struct net_device *dev, 1023 struct mctp_dst *dst) 1024 { 1025 rcu_read_lock(); 1026 dst->dev = __mctp_dev_get(dev); 1027 rcu_read_unlock(); 1028 1029 if (!dst->dev) 1030 return -EHOSTUNREACH; 1031 1032 dst->mtu = READ_ONCE(dev->mtu); 1033 dst->halen = 0; 1034 dst->output = mctp_dst_input; 1035 dst->nexthop = 0; 1036 1037 return 0; 1038 } 1039 1040 static int mctp_do_fragment_route(struct mctp_dst *dst, struct sk_buff *skb, 1041 unsigned int mtu, u8 tag) 1042 { 1043 const unsigned int hlen = sizeof(struct mctp_hdr); 1044 struct mctp_hdr *hdr, *hdr2; 1045 unsigned int pos, size, headroom; 1046 struct sk_buff *skb2; 1047 int rc; 1048 u8 seq; 1049 1050 hdr = mctp_hdr(skb); 1051 seq = 0; 1052 rc = 0; 1053 1054 if (mtu < hlen + 1) { 1055 kfree_skb(skb); 1056 return -EMSGSIZE; 1057 } 1058 1059 /* within MTU? avoid the copy, send original skb */ 1060 if (skb->len <= mtu) { 1061 hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | 1062 MCTP_HDR_FLAG_EOM | tag; 1063 return dst->output(dst, skb); 1064 } 1065 1066 /* keep same headroom as the original skb */ 1067 headroom = skb_headroom(skb); 1068 1069 /* we've got the header */ 1070 skb_pull(skb, hlen); 1071 1072 for (pos = 0; pos < skb->len;) { 1073 /* size of message payload */ 1074 size = min(mtu - hlen, skb->len - pos); 1075 1076 skb2 = alloc_skb(headroom + hlen + size, GFP_KERNEL); 1077 if (!skb2) { 1078 rc = -ENOMEM; 1079 break; 1080 } 1081 1082 /* generic skb copy */ 1083 skb2->protocol = skb->protocol; 1084 skb2->priority = skb->priority; 1085 skb2->dev = skb->dev; 1086 memcpy(skb2->cb, skb->cb, sizeof(skb2->cb)); 1087 1088 if (skb->sk) 1089 skb_set_owner_w(skb2, skb->sk); 1090 1091 /* establish packet */ 1092 skb_reserve(skb2, headroom); 1093 skb_reset_network_header(skb2); 1094 skb_put(skb2, hlen + size); 1095 skb2->transport_header = skb2->network_header + hlen; 1096 1097 /* copy header fields, calculate SOM/EOM flags & seq */ 1098 hdr2 = mctp_hdr(skb2); 1099 hdr2->ver = hdr->ver; 1100 hdr2->dest = hdr->dest; 1101 hdr2->src = hdr->src; 1102 hdr2->flags_seq_tag = tag & 1103 (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 1104 1105 if (pos == 0) 1106 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM; 1107 1108 if (pos + size == skb->len) 1109 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM; 1110 1111 hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT; 1112 1113 /* copy message payload */ 1114 skb_copy_bits(skb, pos, skb_transport_header(skb2), size); 1115 1116 /* we need to copy the extensions, for MCTP flow data */ 1117 skb_ext_copy(skb2, skb); 1118 1119 /* do route */ 1120 rc = dst->output(dst, skb2); 1121 if (rc) 1122 break; 1123 1124 seq = (seq + 1) & MCTP_HDR_SEQ_MASK; 1125 pos += size; 1126 } 1127 1128 consume_skb(skb); 1129 return rc; 1130 } 1131 1132 int mctp_local_output(struct sock *sk, struct mctp_dst *dst, 1133 struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag) 1134 { 1135 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 1136 struct mctp_sk_key *key; 1137 struct mctp_hdr *hdr; 1138 unsigned int netid; 1139 u8 tag; 1140 1141 KUNIT_STATIC_STUB_REDIRECT(mctp_local_output, sk, dst, skb, daddr, 1142 req_tag); 1143 1144 netid = READ_ONCE(dst->dev->net); 1145 1146 if (req_tag & MCTP_TAG_OWNER) { 1147 if (req_tag & MCTP_TAG_PREALLOC) 1148 key = mctp_lookup_prealloc_tag(msk, netid, daddr, 1149 req_tag, &tag); 1150 else 1151 key = mctp_alloc_local_tag(msk, netid, dst->saddr, 1152 daddr, false, &tag); 1153 1154 if (IS_ERR(key)) { 1155 kfree_skb(skb); 1156 return PTR_ERR(key); 1157 } 1158 mctp_skb_set_flow(skb, key); 1159 /* done with the key in this scope */ 1160 mctp_key_unref(key); 1161 tag |= MCTP_HDR_FLAG_TO; 1162 } else { 1163 key = NULL; 1164 tag = req_tag & MCTP_TAG_MASK; 1165 } 1166 1167 skb->pkt_type = PACKET_OUTGOING; 1168 skb->protocol = htons(ETH_P_MCTP); 1169 skb->priority = 0; 1170 skb_reset_transport_header(skb); 1171 skb_push(skb, sizeof(struct mctp_hdr)); 1172 skb_reset_network_header(skb); 1173 skb->dev = dst->dev->dev; 1174 1175 /* set up common header fields */ 1176 hdr = mctp_hdr(skb); 1177 hdr->ver = 1; 1178 hdr->dest = daddr; 1179 hdr->src = dst->saddr; 1180 1181 /* route output functions consume the skb, even on error */ 1182 return mctp_do_fragment_route(dst, skb, dst->mtu, tag); 1183 } 1184 1185 /* route management */ 1186 1187 /* mctp_route_add(): Add the provided route, previously allocated via 1188 * mctp_route_alloc(). On success, takes ownership of @rt, which includes a 1189 * hold on rt->dev for usage in the route table. On failure a caller will want 1190 * to mctp_route_release(). 1191 * 1192 * We expect that the caller has set rt->type, rt->dst_type, rt->min, rt->max, 1193 * rt->mtu and either rt->dev (with a reference held appropriately) or 1194 * rt->gateway. Other fields will be populated. 1195 */ 1196 static int mctp_route_add(struct net *net, struct mctp_route *rt) 1197 { 1198 struct mctp_route *ert; 1199 1200 if (!mctp_address_unicast(rt->min) || !mctp_address_unicast(rt->max)) 1201 return -EINVAL; 1202 1203 if (rt->dst_type == MCTP_ROUTE_DIRECT && !rt->dev) 1204 return -EINVAL; 1205 1206 if (rt->dst_type == MCTP_ROUTE_GATEWAY && !rt->gateway.eid) 1207 return -EINVAL; 1208 1209 switch (rt->type) { 1210 case RTN_LOCAL: 1211 rt->output = mctp_dst_input; 1212 break; 1213 case RTN_UNICAST: 1214 rt->output = mctp_dst_output; 1215 break; 1216 default: 1217 return -EINVAL; 1218 } 1219 1220 ASSERT_RTNL(); 1221 1222 /* Prevent duplicate identical routes. */ 1223 list_for_each_entry(ert, &net->mctp.routes, list) { 1224 if (mctp_rt_compare_exact(rt, ert)) { 1225 return -EEXIST; 1226 } 1227 } 1228 1229 list_add_rcu(&rt->list, &net->mctp.routes); 1230 1231 return 0; 1232 } 1233 1234 static int mctp_route_remove(struct net *net, unsigned int netid, 1235 mctp_eid_t daddr_start, unsigned int daddr_extent, 1236 unsigned char type) 1237 { 1238 struct mctp_route *rt, *tmp; 1239 mctp_eid_t daddr_end; 1240 bool dropped; 1241 1242 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255) 1243 return -EINVAL; 1244 1245 daddr_end = daddr_start + daddr_extent; 1246 dropped = false; 1247 1248 ASSERT_RTNL(); 1249 1250 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { 1251 if (mctp_route_netid(rt) == netid && 1252 rt->min == daddr_start && rt->max == daddr_end && 1253 rt->type == type) { 1254 list_del_rcu(&rt->list); 1255 /* TODO: immediate RTM_DELROUTE */ 1256 mctp_route_release(rt); 1257 dropped = true; 1258 } 1259 } 1260 1261 return dropped ? 0 : -ENOENT; 1262 } 1263 1264 int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr) 1265 { 1266 struct mctp_route *rt; 1267 int rc; 1268 1269 rt = mctp_route_alloc(); 1270 if (!rt) 1271 return -ENOMEM; 1272 1273 rt->min = addr; 1274 rt->max = addr; 1275 rt->dst_type = MCTP_ROUTE_DIRECT; 1276 rt->dev = mdev; 1277 rt->type = RTN_LOCAL; 1278 1279 mctp_dev_hold(rt->dev); 1280 1281 rc = mctp_route_add(dev_net(mdev->dev), rt); 1282 if (rc) 1283 mctp_route_release(rt); 1284 1285 return rc; 1286 } 1287 1288 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr) 1289 { 1290 return mctp_route_remove(dev_net(mdev->dev), mdev->net, 1291 addr, 0, RTN_LOCAL); 1292 } 1293 1294 /* removes all entries for a given device */ 1295 void mctp_route_remove_dev(struct mctp_dev *mdev) 1296 { 1297 struct net *net = dev_net(mdev->dev); 1298 struct mctp_route *rt, *tmp; 1299 1300 ASSERT_RTNL(); 1301 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { 1302 if (rt->dst_type == MCTP_ROUTE_DIRECT && rt->dev == mdev) { 1303 list_del_rcu(&rt->list); 1304 /* TODO: immediate RTM_DELROUTE */ 1305 mctp_route_release(rt); 1306 } 1307 } 1308 } 1309 1310 /* Incoming packet-handling */ 1311 1312 static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev, 1313 struct packet_type *pt, 1314 struct net_device *orig_dev) 1315 { 1316 struct net *net = dev_net(dev); 1317 struct mctp_dev *mdev; 1318 struct mctp_skb_cb *cb; 1319 struct mctp_dst dst; 1320 struct mctp_hdr *mh; 1321 int rc; 1322 u8 ver; 1323 1324 rcu_read_lock(); 1325 mdev = __mctp_dev_get(dev); 1326 rcu_read_unlock(); 1327 if (!mdev) { 1328 /* basic non-data sanity checks */ 1329 goto err_drop; 1330 } 1331 1332 if (!pskb_may_pull(skb, sizeof(struct mctp_hdr))) 1333 goto err_drop; 1334 1335 skb_reset_transport_header(skb); 1336 skb_reset_network_header(skb); 1337 1338 /* We have enough for a header; decode and route */ 1339 mh = mctp_hdr(skb); 1340 ver = mh->ver & MCTP_HDR_VER_MASK; 1341 if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX) 1342 goto err_drop; 1343 1344 /* source must be valid unicast or null; drop reserved ranges and 1345 * broadcast 1346 */ 1347 if (!(mctp_address_unicast(mh->src) || mctp_address_null(mh->src))) 1348 goto err_drop; 1349 1350 /* dest address: as above, but allow broadcast */ 1351 if (!(mctp_address_unicast(mh->dest) || mctp_address_null(mh->dest) || 1352 mctp_address_broadcast(mh->dest))) 1353 goto err_drop; 1354 1355 /* MCTP drivers must populate halen/haddr */ 1356 if (dev->type == ARPHRD_MCTP) { 1357 cb = mctp_cb(skb); 1358 } else { 1359 cb = __mctp_cb(skb); 1360 cb->halen = 0; 1361 } 1362 cb->net = READ_ONCE(mdev->net); 1363 cb->ifindex = dev->ifindex; 1364 1365 rc = mctp_route_lookup(net, cb->net, mh->dest, &dst); 1366 1367 /* NULL EID, but addressed to our physical address */ 1368 if (rc && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST) 1369 rc = mctp_dst_input_null(net, dev, &dst); 1370 1371 if (rc) 1372 goto err_drop; 1373 1374 dst.output(&dst, skb); 1375 mctp_dst_release(&dst); 1376 mctp_dev_put(mdev); 1377 1378 return NET_RX_SUCCESS; 1379 1380 err_drop: 1381 kfree_skb(skb); 1382 mctp_dev_put(mdev); 1383 return NET_RX_DROP; 1384 } 1385 1386 static struct packet_type mctp_packet_type = { 1387 .type = cpu_to_be16(ETH_P_MCTP), 1388 .func = mctp_pkttype_receive, 1389 }; 1390 1391 /* netlink interface */ 1392 1393 static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = { 1394 [RTA_DST] = { .type = NLA_U8 }, 1395 [RTA_METRICS] = { .type = NLA_NESTED }, 1396 [RTA_OIF] = { .type = NLA_U32 }, 1397 [RTA_GATEWAY] = NLA_POLICY_EXACT_LEN(sizeof(struct mctp_fq_addr)), 1398 }; 1399 1400 static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = { 1401 [RTAX_MTU] = { .type = NLA_U32 }, 1402 }; 1403 1404 /* base parsing; common to both _lookup and _populate variants. 1405 * 1406 * For gateway routes (which have a RTA_GATEWAY, and no RTA_OIF), we populate 1407 * *gatweayp. for direct routes (RTA_OIF, no RTA_GATEWAY), we populate *mdev. 1408 */ 1409 static int mctp_route_nlparse_common(struct net *net, struct nlmsghdr *nlh, 1410 struct netlink_ext_ack *extack, 1411 struct nlattr **tb, struct rtmsg **rtm, 1412 struct mctp_dev **mdev, 1413 struct mctp_fq_addr *gatewayp, 1414 mctp_eid_t *daddr_start) 1415 { 1416 struct mctp_fq_addr *gateway = NULL; 1417 unsigned int ifindex = 0; 1418 struct net_device *dev; 1419 int rc; 1420 1421 rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX, 1422 rta_mctp_policy, extack); 1423 if (rc < 0) { 1424 NL_SET_ERR_MSG(extack, "incorrect format"); 1425 return rc; 1426 } 1427 1428 if (!tb[RTA_DST]) { 1429 NL_SET_ERR_MSG(extack, "dst EID missing"); 1430 return -EINVAL; 1431 } 1432 *daddr_start = nla_get_u8(tb[RTA_DST]); 1433 1434 if (tb[RTA_OIF]) 1435 ifindex = nla_get_u32(tb[RTA_OIF]); 1436 1437 if (tb[RTA_GATEWAY]) 1438 gateway = nla_data(tb[RTA_GATEWAY]); 1439 1440 if (ifindex && gateway) { 1441 NL_SET_ERR_MSG(extack, 1442 "cannot specify both ifindex and gateway"); 1443 return -EINVAL; 1444 1445 } else if (ifindex) { 1446 dev = __dev_get_by_index(net, ifindex); 1447 if (!dev) { 1448 NL_SET_ERR_MSG(extack, "bad ifindex"); 1449 return -ENODEV; 1450 } 1451 *mdev = mctp_dev_get_rtnl(dev); 1452 if (!*mdev) 1453 return -ENODEV; 1454 gatewayp->eid = 0; 1455 1456 } else if (gateway) { 1457 if (!mctp_address_unicast(gateway->eid)) { 1458 NL_SET_ERR_MSG(extack, "bad gateway"); 1459 return -EINVAL; 1460 } 1461 1462 gatewayp->eid = gateway->eid; 1463 gatewayp->net = gateway->net != MCTP_NET_ANY ? 1464 gateway->net : 1465 READ_ONCE(net->mctp.default_net); 1466 *mdev = NULL; 1467 1468 } else { 1469 NL_SET_ERR_MSG(extack, "no route output provided"); 1470 return -EINVAL; 1471 } 1472 1473 *rtm = nlmsg_data(nlh); 1474 if ((*rtm)->rtm_family != AF_MCTP) { 1475 NL_SET_ERR_MSG(extack, "route family must be AF_MCTP"); 1476 return -EINVAL; 1477 } 1478 1479 if ((*rtm)->rtm_type != RTN_UNICAST) { 1480 NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST"); 1481 return -EINVAL; 1482 } 1483 1484 return 0; 1485 } 1486 1487 /* Route parsing for lookup operations; we only need the "route target" 1488 * components (ie., network and dest-EID range). 1489 */ 1490 static int mctp_route_nlparse_lookup(struct net *net, struct nlmsghdr *nlh, 1491 struct netlink_ext_ack *extack, 1492 unsigned char *type, unsigned int *netid, 1493 mctp_eid_t *daddr_start, 1494 unsigned int *daddr_extent) 1495 { 1496 struct nlattr *tb[RTA_MAX + 1]; 1497 struct mctp_fq_addr gw; 1498 struct mctp_dev *mdev; 1499 struct rtmsg *rtm; 1500 int rc; 1501 1502 rc = mctp_route_nlparse_common(net, nlh, extack, tb, &rtm, 1503 &mdev, &gw, daddr_start); 1504 if (rc) 1505 return rc; 1506 1507 if (mdev) { 1508 *netid = mdev->net; 1509 } else if (gw.eid) { 1510 *netid = gw.net; 1511 } else { 1512 /* bug: _nlparse_common should not allow this */ 1513 return -1; 1514 } 1515 1516 *type = rtm->rtm_type; 1517 *daddr_extent = rtm->rtm_dst_len; 1518 1519 return 0; 1520 } 1521 1522 /* Full route parse for RTM_NEWROUTE: populate @rt. On success, 1523 * MCTP_ROUTE_DIRECT routes (ie, those with a direct dev) will hold a reference 1524 * to that dev. 1525 */ 1526 static int mctp_route_nlparse_populate(struct net *net, struct nlmsghdr *nlh, 1527 struct netlink_ext_ack *extack, 1528 struct mctp_route *rt) 1529 { 1530 struct nlattr *tbx[RTAX_MAX + 1]; 1531 struct nlattr *tb[RTA_MAX + 1]; 1532 unsigned int daddr_extent; 1533 struct mctp_fq_addr gw; 1534 mctp_eid_t daddr_start; 1535 struct mctp_dev *dev; 1536 struct rtmsg *rtm; 1537 u32 mtu = 0; 1538 int rc; 1539 1540 rc = mctp_route_nlparse_common(net, nlh, extack, tb, &rtm, 1541 &dev, &gw, &daddr_start); 1542 if (rc) 1543 return rc; 1544 1545 daddr_extent = rtm->rtm_dst_len; 1546 1547 if (daddr_extent > 0xff || daddr_extent + daddr_start >= 255) { 1548 NL_SET_ERR_MSG(extack, "invalid eid range"); 1549 return -EINVAL; 1550 } 1551 1552 if (tb[RTA_METRICS]) { 1553 rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS], 1554 rta_metrics_policy, NULL); 1555 if (rc < 0) { 1556 NL_SET_ERR_MSG(extack, "incorrect RTA_METRICS format"); 1557 return rc; 1558 } 1559 if (tbx[RTAX_MTU]) 1560 mtu = nla_get_u32(tbx[RTAX_MTU]); 1561 } 1562 1563 rt->type = rtm->rtm_type; 1564 rt->min = daddr_start; 1565 rt->max = daddr_start + daddr_extent; 1566 rt->mtu = mtu; 1567 if (gw.eid) { 1568 rt->dst_type = MCTP_ROUTE_GATEWAY; 1569 rt->gateway.eid = gw.eid; 1570 rt->gateway.net = gw.net; 1571 } else { 1572 rt->dst_type = MCTP_ROUTE_DIRECT; 1573 rt->dev = dev; 1574 mctp_dev_hold(rt->dev); 1575 } 1576 1577 return 0; 1578 } 1579 1580 static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, 1581 struct netlink_ext_ack *extack) 1582 { 1583 struct net *net = sock_net(skb->sk); 1584 struct mctp_route *rt; 1585 int rc; 1586 1587 rt = mctp_route_alloc(); 1588 if (!rt) 1589 return -ENOMEM; 1590 1591 rc = mctp_route_nlparse_populate(net, nlh, extack, rt); 1592 if (rc < 0) 1593 goto err_free; 1594 1595 if (rt->dst_type == MCTP_ROUTE_DIRECT && 1596 rt->dev->dev->flags & IFF_LOOPBACK) { 1597 NL_SET_ERR_MSG(extack, "no routes to loopback"); 1598 rc = -EINVAL; 1599 goto err_free; 1600 } 1601 1602 rc = mctp_route_add(net, rt); 1603 if (!rc) 1604 return 0; 1605 1606 err_free: 1607 mctp_route_release(rt); 1608 return rc; 1609 } 1610 1611 static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, 1612 struct netlink_ext_ack *extack) 1613 { 1614 struct net *net = sock_net(skb->sk); 1615 unsigned int netid, daddr_extent; 1616 unsigned char type = RTN_UNSPEC; 1617 mctp_eid_t daddr_start; 1618 int rc; 1619 1620 rc = mctp_route_nlparse_lookup(net, nlh, extack, &type, &netid, 1621 &daddr_start, &daddr_extent); 1622 if (rc < 0) 1623 return rc; 1624 1625 /* we only have unicast routes */ 1626 if (type != RTN_UNICAST) 1627 return -EINVAL; 1628 1629 rc = mctp_route_remove(net, netid, daddr_start, daddr_extent, type); 1630 return rc; 1631 } 1632 1633 static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt, 1634 u32 portid, u32 seq, int event, unsigned int flags) 1635 { 1636 struct nlmsghdr *nlh; 1637 struct rtmsg *hdr; 1638 void *metrics; 1639 1640 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags); 1641 if (!nlh) 1642 return -EMSGSIZE; 1643 1644 hdr = nlmsg_data(nlh); 1645 memset(hdr, 0, sizeof(*hdr)); 1646 hdr->rtm_family = AF_MCTP; 1647 1648 /* we use the _len fields as a number of EIDs, rather than 1649 * a number of bits in the address 1650 */ 1651 hdr->rtm_dst_len = rt->max - rt->min; 1652 hdr->rtm_src_len = 0; 1653 hdr->rtm_tos = 0; 1654 hdr->rtm_table = RT_TABLE_DEFAULT; 1655 hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */ 1656 hdr->rtm_type = rt->type; 1657 1658 if (nla_put_u8(skb, RTA_DST, rt->min)) 1659 goto cancel; 1660 1661 metrics = nla_nest_start_noflag(skb, RTA_METRICS); 1662 if (!metrics) 1663 goto cancel; 1664 1665 if (rt->mtu) { 1666 if (nla_put_u32(skb, RTAX_MTU, rt->mtu)) 1667 goto cancel; 1668 } 1669 1670 nla_nest_end(skb, metrics); 1671 1672 if (rt->dst_type == MCTP_ROUTE_DIRECT) { 1673 hdr->rtm_scope = RT_SCOPE_LINK; 1674 if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex)) 1675 goto cancel; 1676 } else if (rt->dst_type == MCTP_ROUTE_GATEWAY) { 1677 hdr->rtm_scope = RT_SCOPE_UNIVERSE; 1678 if (nla_put(skb, RTA_GATEWAY, 1679 sizeof(rt->gateway), &rt->gateway)) 1680 goto cancel; 1681 } 1682 1683 nlmsg_end(skb, nlh); 1684 1685 return 0; 1686 1687 cancel: 1688 nlmsg_cancel(skb, nlh); 1689 return -EMSGSIZE; 1690 } 1691 1692 static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb) 1693 { 1694 struct net *net = sock_net(skb->sk); 1695 struct mctp_route *rt; 1696 int s_idx, idx; 1697 1698 /* TODO: allow filtering on route data, possibly under 1699 * cb->strict_check 1700 */ 1701 1702 /* TODO: change to struct overlay */ 1703 s_idx = cb->args[0]; 1704 idx = 0; 1705 1706 rcu_read_lock(); 1707 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { 1708 if (idx++ < s_idx) 1709 continue; 1710 if (mctp_fill_rtinfo(skb, rt, 1711 NETLINK_CB(cb->skb).portid, 1712 cb->nlh->nlmsg_seq, 1713 RTM_NEWROUTE, NLM_F_MULTI) < 0) 1714 break; 1715 } 1716 1717 rcu_read_unlock(); 1718 cb->args[0] = idx; 1719 1720 return skb->len; 1721 } 1722 1723 /* net namespace implementation */ 1724 static int __net_init mctp_routes_net_init(struct net *net) 1725 { 1726 struct netns_mctp *ns = &net->mctp; 1727 1728 INIT_LIST_HEAD(&ns->routes); 1729 hash_init(ns->binds); 1730 mutex_init(&ns->bind_lock); 1731 INIT_HLIST_HEAD(&ns->keys); 1732 spin_lock_init(&ns->keys_lock); 1733 WARN_ON(mctp_default_net_set(net, MCTP_INITIAL_DEFAULT_NET)); 1734 return 0; 1735 } 1736 1737 static void __net_exit mctp_routes_net_exit(struct net *net) 1738 { 1739 struct mctp_route *rt; 1740 1741 rcu_read_lock(); 1742 list_for_each_entry_rcu(rt, &net->mctp.routes, list) 1743 mctp_route_release(rt); 1744 rcu_read_unlock(); 1745 } 1746 1747 static struct pernet_operations mctp_net_ops = { 1748 .init = mctp_routes_net_init, 1749 .exit = mctp_routes_net_exit, 1750 }; 1751 1752 static const struct rtnl_msg_handler mctp_route_rtnl_msg_handlers[] = { 1753 {THIS_MODULE, PF_MCTP, RTM_NEWROUTE, mctp_newroute, NULL, 0}, 1754 {THIS_MODULE, PF_MCTP, RTM_DELROUTE, mctp_delroute, NULL, 0}, 1755 {THIS_MODULE, PF_MCTP, RTM_GETROUTE, NULL, mctp_dump_rtinfo, 0}, 1756 }; 1757 1758 int __init mctp_routes_init(void) 1759 { 1760 int err; 1761 1762 dev_add_pack(&mctp_packet_type); 1763 1764 err = register_pernet_subsys(&mctp_net_ops); 1765 if (err) 1766 goto err_pernet; 1767 1768 err = rtnl_register_many(mctp_route_rtnl_msg_handlers); 1769 if (err) 1770 goto err_rtnl; 1771 1772 return 0; 1773 1774 err_rtnl: 1775 unregister_pernet_subsys(&mctp_net_ops); 1776 err_pernet: 1777 dev_remove_pack(&mctp_packet_type); 1778 return err; 1779 } 1780 1781 void mctp_routes_exit(void) 1782 { 1783 rtnl_unregister_many(mctp_route_rtnl_msg_handlers); 1784 unregister_pernet_subsys(&mctp_net_ops); 1785 dev_remove_pack(&mctp_packet_type); 1786 } 1787 1788 #if IS_ENABLED(CONFIG_MCTP_TEST) 1789 #include "test/route-test.c" 1790 #endif 1791