1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux IPv6 multicast routing support for BSD pim6sd 4 * Based on net/ipv4/ipmr.c. 5 * 6 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr> 7 * LSIIT Laboratory, Strasbourg, France 8 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com> 9 * 6WIND, Paris, France 10 * Copyright (C)2007,2008 USAGI/WIDE Project 11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 12 */ 13 14 #include <linux/uaccess.h> 15 #include <linux/types.h> 16 #include <linux/sched.h> 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/kernel.h> 20 #include <linux/fcntl.h> 21 #include <linux/stat.h> 22 #include <linux/socket.h> 23 #include <linux/inet.h> 24 #include <linux/netdevice.h> 25 #include <linux/inetdevice.h> 26 #include <linux/proc_fs.h> 27 #include <linux/seq_file.h> 28 #include <linux/init.h> 29 #include <linux/compat.h> 30 #include <linux/rhashtable.h> 31 #include <net/protocol.h> 32 #include <linux/skbuff.h> 33 #include <net/raw.h> 34 #include <linux/notifier.h> 35 #include <linux/if_arp.h> 36 #include <net/checksum.h> 37 #include <net/netlink.h> 38 #include <net/fib_rules.h> 39 40 #include <net/ipv6.h> 41 #include <net/ip6_route.h> 42 #include <linux/mroute6.h> 43 #include <linux/pim.h> 44 #include <net/addrconf.h> 45 #include <linux/netfilter_ipv6.h> 46 #include <linux/export.h> 47 #include <net/ip6_checksum.h> 48 #include <linux/netconf.h> 49 #include <net/ip_tunnels.h> 50 51 #include <linux/nospec.h> 52 53 struct ip6mr_rule { 54 struct fib_rule common; 55 }; 56 57 struct ip6mr_result { 58 struct mr_table *mrt; 59 }; 60 61 /* Big lock, protecting vif table, mrt cache and mroute socket state. 62 Note that the changes are semaphored via rtnl_lock. 63 */ 64 65 static DEFINE_RWLOCK(mrt_lock); 66 67 /* Multicast router control variables */ 68 69 /* Special spinlock for queue of unresolved entries */ 70 static DEFINE_SPINLOCK(mfc_unres_lock); 71 72 /* We return to original Alan's scheme. Hash table of resolved 73 entries is changed only in process context and protected 74 with weak lock mrt_lock. Queue of unresolved entries is protected 75 with strong spinlock mfc_unres_lock. 76 77 In this case data path is free of exclusive locks at all. 78 */ 79 80 static struct kmem_cache *mrt_cachep __read_mostly; 81 82 static struct mr_table *ip6mr_new_table(struct net *net, u32 id); 83 static void ip6mr_free_table(struct mr_table *mrt); 84 85 static void ip6_mr_forward(struct net *net, struct mr_table *mrt, 86 struct net_device *dev, struct sk_buff *skb, 87 struct mfc6_cache *cache); 88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, 89 mifi_t mifi, int assert); 90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc, 91 int cmd); 92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt); 93 static int ip6mr_rtm_dumproute(struct sk_buff *skb, 94 struct netlink_callback *cb); 95 static void mroute_clean_tables(struct mr_table *mrt, int flags); 96 static void ipmr_expire_process(struct timer_list *t); 97 98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 99 #define ip6mr_for_each_table(mrt, net) \ 100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \ 101 lockdep_rtnl_is_held() || \ 102 list_empty(&net->ipv6.mr6_tables)) 103 104 static struct mr_table *ip6mr_mr_table_iter(struct net *net, 105 struct mr_table *mrt) 106 { 107 struct mr_table *ret; 108 109 if (!mrt) 110 ret = list_entry_rcu(net->ipv6.mr6_tables.next, 111 struct mr_table, list); 112 else 113 ret = list_entry_rcu(mrt->list.next, 114 struct mr_table, list); 115 116 if (&ret->list == &net->ipv6.mr6_tables) 117 return NULL; 118 return ret; 119 } 120 121 static struct mr_table *ip6mr_get_table(struct net *net, u32 id) 122 { 123 struct mr_table *mrt; 124 125 ip6mr_for_each_table(mrt, net) { 126 if (mrt->id == id) 127 return mrt; 128 } 129 return NULL; 130 } 131 132 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, 133 struct mr_table **mrt) 134 { 135 int err; 136 struct ip6mr_result res; 137 struct fib_lookup_arg arg = { 138 .result = &res, 139 .flags = FIB_LOOKUP_NOREF, 140 }; 141 142 /* update flow if oif or iif point to device enslaved to l3mdev */ 143 l3mdev_update_flow(net, flowi6_to_flowi(flp6)); 144 145 err = fib_rules_lookup(net->ipv6.mr6_rules_ops, 146 flowi6_to_flowi(flp6), 0, &arg); 147 if (err < 0) 148 return err; 149 *mrt = res.mrt; 150 return 0; 151 } 152 153 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, 154 int flags, struct fib_lookup_arg *arg) 155 { 156 struct ip6mr_result *res = arg->result; 157 struct mr_table *mrt; 158 159 switch (rule->action) { 160 case FR_ACT_TO_TBL: 161 break; 162 case FR_ACT_UNREACHABLE: 163 return -ENETUNREACH; 164 case FR_ACT_PROHIBIT: 165 return -EACCES; 166 case FR_ACT_BLACKHOLE: 167 default: 168 return -EINVAL; 169 } 170 171 arg->table = fib_rule_get_table(rule, arg); 172 173 mrt = ip6mr_get_table(rule->fr_net, arg->table); 174 if (!mrt) 175 return -EAGAIN; 176 res->mrt = mrt; 177 return 0; 178 } 179 180 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) 181 { 182 return 1; 183 } 184 185 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 186 struct fib_rule_hdr *frh, struct nlattr **tb, 187 struct netlink_ext_ack *extack) 188 { 189 return 0; 190 } 191 192 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 193 struct nlattr **tb) 194 { 195 return 1; 196 } 197 198 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 199 struct fib_rule_hdr *frh) 200 { 201 frh->dst_len = 0; 202 frh->src_len = 0; 203 frh->tos = 0; 204 return 0; 205 } 206 207 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = { 208 .family = RTNL_FAMILY_IP6MR, 209 .rule_size = sizeof(struct ip6mr_rule), 210 .addr_size = sizeof(struct in6_addr), 211 .action = ip6mr_rule_action, 212 .match = ip6mr_rule_match, 213 .configure = ip6mr_rule_configure, 214 .compare = ip6mr_rule_compare, 215 .fill = ip6mr_rule_fill, 216 .nlgroup = RTNLGRP_IPV6_RULE, 217 .owner = THIS_MODULE, 218 }; 219 220 static int __net_init ip6mr_rules_init(struct net *net) 221 { 222 struct fib_rules_ops *ops; 223 struct mr_table *mrt; 224 int err; 225 226 ops = fib_rules_register(&ip6mr_rules_ops_template, net); 227 if (IS_ERR(ops)) 228 return PTR_ERR(ops); 229 230 INIT_LIST_HEAD(&net->ipv6.mr6_tables); 231 232 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); 233 if (IS_ERR(mrt)) { 234 err = PTR_ERR(mrt); 235 goto err1; 236 } 237 238 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0); 239 if (err < 0) 240 goto err2; 241 242 net->ipv6.mr6_rules_ops = ops; 243 return 0; 244 245 err2: 246 rtnl_lock(); 247 ip6mr_free_table(mrt); 248 rtnl_unlock(); 249 err1: 250 fib_rules_unregister(ops); 251 return err; 252 } 253 254 static void __net_exit ip6mr_rules_exit(struct net *net) 255 { 256 struct mr_table *mrt, *next; 257 258 ASSERT_RTNL(); 259 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { 260 list_del(&mrt->list); 261 ip6mr_free_table(mrt); 262 } 263 fib_rules_unregister(net->ipv6.mr6_rules_ops); 264 } 265 266 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb, 267 struct netlink_ext_ack *extack) 268 { 269 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack); 270 } 271 272 static unsigned int ip6mr_rules_seq_read(struct net *net) 273 { 274 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR); 275 } 276 277 bool ip6mr_rule_default(const struct fib_rule *rule) 278 { 279 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL && 280 rule->table == RT6_TABLE_DFLT && !rule->l3mdev; 281 } 282 EXPORT_SYMBOL(ip6mr_rule_default); 283 #else 284 #define ip6mr_for_each_table(mrt, net) \ 285 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) 286 287 static struct mr_table *ip6mr_mr_table_iter(struct net *net, 288 struct mr_table *mrt) 289 { 290 if (!mrt) 291 return net->ipv6.mrt6; 292 return NULL; 293 } 294 295 static struct mr_table *ip6mr_get_table(struct net *net, u32 id) 296 { 297 return net->ipv6.mrt6; 298 } 299 300 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, 301 struct mr_table **mrt) 302 { 303 *mrt = net->ipv6.mrt6; 304 return 0; 305 } 306 307 static int __net_init ip6mr_rules_init(struct net *net) 308 { 309 struct mr_table *mrt; 310 311 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); 312 if (IS_ERR(mrt)) 313 return PTR_ERR(mrt); 314 net->ipv6.mrt6 = mrt; 315 return 0; 316 } 317 318 static void __net_exit ip6mr_rules_exit(struct net *net) 319 { 320 ASSERT_RTNL(); 321 ip6mr_free_table(net->ipv6.mrt6); 322 net->ipv6.mrt6 = NULL; 323 } 324 325 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb, 326 struct netlink_ext_ack *extack) 327 { 328 return 0; 329 } 330 331 static unsigned int ip6mr_rules_seq_read(struct net *net) 332 { 333 return 0; 334 } 335 #endif 336 337 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg, 338 const void *ptr) 339 { 340 const struct mfc6_cache_cmp_arg *cmparg = arg->key; 341 struct mfc6_cache *c = (struct mfc6_cache *)ptr; 342 343 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) || 344 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin); 345 } 346 347 static const struct rhashtable_params ip6mr_rht_params = { 348 .head_offset = offsetof(struct mr_mfc, mnode), 349 .key_offset = offsetof(struct mfc6_cache, cmparg), 350 .key_len = sizeof(struct mfc6_cache_cmp_arg), 351 .nelem_hint = 3, 352 .obj_cmpfn = ip6mr_hash_cmp, 353 .automatic_shrinking = true, 354 }; 355 356 static void ip6mr_new_table_set(struct mr_table *mrt, 357 struct net *net) 358 { 359 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 360 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables); 361 #endif 362 } 363 364 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = { 365 .mf6c_origin = IN6ADDR_ANY_INIT, 366 .mf6c_mcastgrp = IN6ADDR_ANY_INIT, 367 }; 368 369 static struct mr_table_ops ip6mr_mr_table_ops = { 370 .rht_params = &ip6mr_rht_params, 371 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any, 372 }; 373 374 static struct mr_table *ip6mr_new_table(struct net *net, u32 id) 375 { 376 struct mr_table *mrt; 377 378 mrt = ip6mr_get_table(net, id); 379 if (mrt) 380 return mrt; 381 382 return mr_table_alloc(net, id, &ip6mr_mr_table_ops, 383 ipmr_expire_process, ip6mr_new_table_set); 384 } 385 386 static void ip6mr_free_table(struct mr_table *mrt) 387 { 388 del_timer_sync(&mrt->ipmr_expire_timer); 389 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC | 390 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC); 391 rhltable_destroy(&mrt->mfc_hash); 392 kfree(mrt); 393 } 394 395 #ifdef CONFIG_PROC_FS 396 /* The /proc interfaces to multicast routing 397 * /proc/ip6_mr_cache /proc/ip6_mr_vif 398 */ 399 400 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) 401 __acquires(mrt_lock) 402 { 403 struct mr_vif_iter *iter = seq->private; 404 struct net *net = seq_file_net(seq); 405 struct mr_table *mrt; 406 407 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); 408 if (!mrt) 409 return ERR_PTR(-ENOENT); 410 411 iter->mrt = mrt; 412 413 read_lock(&mrt_lock); 414 return mr_vif_seq_start(seq, pos); 415 } 416 417 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v) 418 __releases(mrt_lock) 419 { 420 read_unlock(&mrt_lock); 421 } 422 423 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) 424 { 425 struct mr_vif_iter *iter = seq->private; 426 struct mr_table *mrt = iter->mrt; 427 428 if (v == SEQ_START_TOKEN) { 429 seq_puts(seq, 430 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n"); 431 } else { 432 const struct vif_device *vif = v; 433 const char *name = vif->dev ? vif->dev->name : "none"; 434 435 seq_printf(seq, 436 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", 437 vif - mrt->vif_table, 438 name, vif->bytes_in, vif->pkt_in, 439 vif->bytes_out, vif->pkt_out, 440 vif->flags); 441 } 442 return 0; 443 } 444 445 static const struct seq_operations ip6mr_vif_seq_ops = { 446 .start = ip6mr_vif_seq_start, 447 .next = mr_vif_seq_next, 448 .stop = ip6mr_vif_seq_stop, 449 .show = ip6mr_vif_seq_show, 450 }; 451 452 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 453 { 454 struct net *net = seq_file_net(seq); 455 struct mr_table *mrt; 456 457 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); 458 if (!mrt) 459 return ERR_PTR(-ENOENT); 460 461 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock); 462 } 463 464 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 465 { 466 int n; 467 468 if (v == SEQ_START_TOKEN) { 469 seq_puts(seq, 470 "Group " 471 "Origin " 472 "Iif Pkts Bytes Wrong Oifs\n"); 473 } else { 474 const struct mfc6_cache *mfc = v; 475 const struct mr_mfc_iter *it = seq->private; 476 struct mr_table *mrt = it->mrt; 477 478 seq_printf(seq, "%pI6 %pI6 %-3hd", 479 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, 480 mfc->_c.mfc_parent); 481 482 if (it->cache != &mrt->mfc_unres_queue) { 483 seq_printf(seq, " %8lu %8lu %8lu", 484 mfc->_c.mfc_un.res.pkt, 485 mfc->_c.mfc_un.res.bytes, 486 mfc->_c.mfc_un.res.wrong_if); 487 for (n = mfc->_c.mfc_un.res.minvif; 488 n < mfc->_c.mfc_un.res.maxvif; n++) { 489 if (VIF_EXISTS(mrt, n) && 490 mfc->_c.mfc_un.res.ttls[n] < 255) 491 seq_printf(seq, 492 " %2d:%-3d", n, 493 mfc->_c.mfc_un.res.ttls[n]); 494 } 495 } else { 496 /* unresolved mfc_caches don't contain 497 * pkt, bytes and wrong_if values 498 */ 499 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); 500 } 501 seq_putc(seq, '\n'); 502 } 503 return 0; 504 } 505 506 static const struct seq_operations ipmr_mfc_seq_ops = { 507 .start = ipmr_mfc_seq_start, 508 .next = mr_mfc_seq_next, 509 .stop = mr_mfc_seq_stop, 510 .show = ipmr_mfc_seq_show, 511 }; 512 #endif 513 514 #ifdef CONFIG_IPV6_PIMSM_V2 515 516 static int pim6_rcv(struct sk_buff *skb) 517 { 518 struct pimreghdr *pim; 519 struct ipv6hdr *encap; 520 struct net_device *reg_dev = NULL; 521 struct net *net = dev_net(skb->dev); 522 struct mr_table *mrt; 523 struct flowi6 fl6 = { 524 .flowi6_iif = skb->dev->ifindex, 525 .flowi6_mark = skb->mark, 526 }; 527 int reg_vif_num; 528 529 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) 530 goto drop; 531 532 pim = (struct pimreghdr *)skb_transport_header(skb); 533 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) || 534 (pim->flags & PIM_NULL_REGISTER) || 535 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 536 sizeof(*pim), IPPROTO_PIM, 537 csum_partial((void *)pim, sizeof(*pim), 0)) && 538 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 539 goto drop; 540 541 /* check if the inner packet is destined to mcast group */ 542 encap = (struct ipv6hdr *)(skb_transport_header(skb) + 543 sizeof(*pim)); 544 545 if (!ipv6_addr_is_multicast(&encap->daddr) || 546 encap->payload_len == 0 || 547 ntohs(encap->payload_len) + sizeof(*pim) > skb->len) 548 goto drop; 549 550 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0) 551 goto drop; 552 reg_vif_num = mrt->mroute_reg_vif_num; 553 554 read_lock(&mrt_lock); 555 if (reg_vif_num >= 0) 556 reg_dev = mrt->vif_table[reg_vif_num].dev; 557 dev_hold(reg_dev); 558 read_unlock(&mrt_lock); 559 560 if (!reg_dev) 561 goto drop; 562 563 skb->mac_header = skb->network_header; 564 skb_pull(skb, (u8 *)encap - skb->data); 565 skb_reset_network_header(skb); 566 skb->protocol = htons(ETH_P_IPV6); 567 skb->ip_summed = CHECKSUM_NONE; 568 569 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev)); 570 571 netif_rx(skb); 572 573 dev_put(reg_dev); 574 return 0; 575 drop: 576 kfree_skb(skb); 577 return 0; 578 } 579 580 static const struct inet6_protocol pim6_protocol = { 581 .handler = pim6_rcv, 582 }; 583 584 /* Service routines creating virtual interfaces: PIMREG */ 585 586 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, 587 struct net_device *dev) 588 { 589 struct net *net = dev_net(dev); 590 struct mr_table *mrt; 591 struct flowi6 fl6 = { 592 .flowi6_oif = dev->ifindex, 593 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, 594 .flowi6_mark = skb->mark, 595 }; 596 597 if (!pskb_inet_may_pull(skb)) 598 goto tx_err; 599 600 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0) 601 goto tx_err; 602 603 read_lock(&mrt_lock); 604 dev->stats.tx_bytes += skb->len; 605 dev->stats.tx_packets++; 606 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT); 607 read_unlock(&mrt_lock); 608 kfree_skb(skb); 609 return NETDEV_TX_OK; 610 611 tx_err: 612 dev->stats.tx_errors++; 613 kfree_skb(skb); 614 return NETDEV_TX_OK; 615 } 616 617 static int reg_vif_get_iflink(const struct net_device *dev) 618 { 619 return 0; 620 } 621 622 static const struct net_device_ops reg_vif_netdev_ops = { 623 .ndo_start_xmit = reg_vif_xmit, 624 .ndo_get_iflink = reg_vif_get_iflink, 625 }; 626 627 static void reg_vif_setup(struct net_device *dev) 628 { 629 dev->type = ARPHRD_PIMREG; 630 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 631 dev->flags = IFF_NOARP; 632 dev->netdev_ops = ®_vif_netdev_ops; 633 dev->needs_free_netdev = true; 634 dev->features |= NETIF_F_NETNS_LOCAL; 635 } 636 637 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt) 638 { 639 struct net_device *dev; 640 char name[IFNAMSIZ]; 641 642 if (mrt->id == RT6_TABLE_DFLT) 643 sprintf(name, "pim6reg"); 644 else 645 sprintf(name, "pim6reg%u", mrt->id); 646 647 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); 648 if (!dev) 649 return NULL; 650 651 dev_net_set(dev, net); 652 653 if (register_netdevice(dev)) { 654 free_netdev(dev); 655 return NULL; 656 } 657 658 if (dev_open(dev, NULL)) 659 goto failure; 660 661 dev_hold(dev); 662 return dev; 663 664 failure: 665 unregister_netdevice(dev); 666 return NULL; 667 } 668 #endif 669 670 static int call_ip6mr_vif_entry_notifiers(struct net *net, 671 enum fib_event_type event_type, 672 struct vif_device *vif, 673 mifi_t vif_index, u32 tb_id) 674 { 675 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type, 676 vif, vif_index, tb_id, 677 &net->ipv6.ipmr_seq); 678 } 679 680 static int call_ip6mr_mfc_entry_notifiers(struct net *net, 681 enum fib_event_type event_type, 682 struct mfc6_cache *mfc, u32 tb_id) 683 { 684 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type, 685 &mfc->_c, tb_id, &net->ipv6.ipmr_seq); 686 } 687 688 /* Delete a VIF entry */ 689 static int mif6_delete(struct mr_table *mrt, int vifi, int notify, 690 struct list_head *head) 691 { 692 struct vif_device *v; 693 struct net_device *dev; 694 struct inet6_dev *in6_dev; 695 696 if (vifi < 0 || vifi >= mrt->maxvif) 697 return -EADDRNOTAVAIL; 698 699 v = &mrt->vif_table[vifi]; 700 701 if (VIF_EXISTS(mrt, vifi)) 702 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net), 703 FIB_EVENT_VIF_DEL, v, vifi, 704 mrt->id); 705 706 write_lock_bh(&mrt_lock); 707 dev = v->dev; 708 v->dev = NULL; 709 710 if (!dev) { 711 write_unlock_bh(&mrt_lock); 712 return -EADDRNOTAVAIL; 713 } 714 715 #ifdef CONFIG_IPV6_PIMSM_V2 716 if (vifi == mrt->mroute_reg_vif_num) 717 mrt->mroute_reg_vif_num = -1; 718 #endif 719 720 if (vifi + 1 == mrt->maxvif) { 721 int tmp; 722 for (tmp = vifi - 1; tmp >= 0; tmp--) { 723 if (VIF_EXISTS(mrt, tmp)) 724 break; 725 } 726 mrt->maxvif = tmp + 1; 727 } 728 729 write_unlock_bh(&mrt_lock); 730 731 dev_set_allmulti(dev, -1); 732 733 in6_dev = __in6_dev_get(dev); 734 if (in6_dev) { 735 atomic_dec(&in6_dev->cnf.mc_forwarding); 736 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, 737 NETCONFA_MC_FORWARDING, 738 dev->ifindex, &in6_dev->cnf); 739 } 740 741 if ((v->flags & MIFF_REGISTER) && !notify) 742 unregister_netdevice_queue(dev, head); 743 744 dev_put_track(dev, &v->dev_tracker); 745 return 0; 746 } 747 748 static inline void ip6mr_cache_free_rcu(struct rcu_head *head) 749 { 750 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu); 751 752 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c); 753 } 754 755 static inline void ip6mr_cache_free(struct mfc6_cache *c) 756 { 757 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu); 758 } 759 760 /* Destroy an unresolved cache entry, killing queued skbs 761 and reporting error to netlink readers. 762 */ 763 764 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c) 765 { 766 struct net *net = read_pnet(&mrt->net); 767 struct sk_buff *skb; 768 769 atomic_dec(&mrt->cache_resolve_queue_len); 770 771 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) { 772 if (ipv6_hdr(skb)->version == 0) { 773 struct nlmsghdr *nlh = skb_pull(skb, 774 sizeof(struct ipv6hdr)); 775 nlh->nlmsg_type = NLMSG_ERROR; 776 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); 777 skb_trim(skb, nlh->nlmsg_len); 778 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT; 779 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 780 } else 781 kfree_skb(skb); 782 } 783 784 ip6mr_cache_free(c); 785 } 786 787 788 /* Timer process for all the unresolved queue. */ 789 790 static void ipmr_do_expire_process(struct mr_table *mrt) 791 { 792 unsigned long now = jiffies; 793 unsigned long expires = 10 * HZ; 794 struct mr_mfc *c, *next; 795 796 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 797 if (time_after(c->mfc_un.unres.expires, now)) { 798 /* not yet... */ 799 unsigned long interval = c->mfc_un.unres.expires - now; 800 if (interval < expires) 801 expires = interval; 802 continue; 803 } 804 805 list_del(&c->list); 806 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); 807 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); 808 } 809 810 if (!list_empty(&mrt->mfc_unres_queue)) 811 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); 812 } 813 814 static void ipmr_expire_process(struct timer_list *t) 815 { 816 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer); 817 818 if (!spin_trylock(&mfc_unres_lock)) { 819 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1); 820 return; 821 } 822 823 if (!list_empty(&mrt->mfc_unres_queue)) 824 ipmr_do_expire_process(mrt); 825 826 spin_unlock(&mfc_unres_lock); 827 } 828 829 /* Fill oifs list. It is called under write locked mrt_lock. */ 830 831 static void ip6mr_update_thresholds(struct mr_table *mrt, 832 struct mr_mfc *cache, 833 unsigned char *ttls) 834 { 835 int vifi; 836 837 cache->mfc_un.res.minvif = MAXMIFS; 838 cache->mfc_un.res.maxvif = 0; 839 memset(cache->mfc_un.res.ttls, 255, MAXMIFS); 840 841 for (vifi = 0; vifi < mrt->maxvif; vifi++) { 842 if (VIF_EXISTS(mrt, vifi) && 843 ttls[vifi] && ttls[vifi] < 255) { 844 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 845 if (cache->mfc_un.res.minvif > vifi) 846 cache->mfc_un.res.minvif = vifi; 847 if (cache->mfc_un.res.maxvif <= vifi) 848 cache->mfc_un.res.maxvif = vifi + 1; 849 } 850 } 851 cache->mfc_un.res.lastuse = jiffies; 852 } 853 854 static int mif6_add(struct net *net, struct mr_table *mrt, 855 struct mif6ctl *vifc, int mrtsock) 856 { 857 int vifi = vifc->mif6c_mifi; 858 struct vif_device *v = &mrt->vif_table[vifi]; 859 struct net_device *dev; 860 struct inet6_dev *in6_dev; 861 int err; 862 863 /* Is vif busy ? */ 864 if (VIF_EXISTS(mrt, vifi)) 865 return -EADDRINUSE; 866 867 switch (vifc->mif6c_flags) { 868 #ifdef CONFIG_IPV6_PIMSM_V2 869 case MIFF_REGISTER: 870 /* 871 * Special Purpose VIF in PIM 872 * All the packets will be sent to the daemon 873 */ 874 if (mrt->mroute_reg_vif_num >= 0) 875 return -EADDRINUSE; 876 dev = ip6mr_reg_vif(net, mrt); 877 if (!dev) 878 return -ENOBUFS; 879 err = dev_set_allmulti(dev, 1); 880 if (err) { 881 unregister_netdevice(dev); 882 dev_put(dev); 883 return err; 884 } 885 break; 886 #endif 887 case 0: 888 dev = dev_get_by_index(net, vifc->mif6c_pifi); 889 if (!dev) 890 return -EADDRNOTAVAIL; 891 err = dev_set_allmulti(dev, 1); 892 if (err) { 893 dev_put(dev); 894 return err; 895 } 896 break; 897 default: 898 return -EINVAL; 899 } 900 901 in6_dev = __in6_dev_get(dev); 902 if (in6_dev) { 903 atomic_inc(&in6_dev->cnf.mc_forwarding); 904 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, 905 NETCONFA_MC_FORWARDING, 906 dev->ifindex, &in6_dev->cnf); 907 } 908 909 /* Fill in the VIF structures */ 910 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold, 911 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0), 912 MIFF_REGISTER); 913 914 /* And finish update writing critical data */ 915 write_lock_bh(&mrt_lock); 916 v->dev = dev; 917 netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC); 918 #ifdef CONFIG_IPV6_PIMSM_V2 919 if (v->flags & MIFF_REGISTER) 920 mrt->mroute_reg_vif_num = vifi; 921 #endif 922 if (vifi + 1 > mrt->maxvif) 923 mrt->maxvif = vifi + 1; 924 write_unlock_bh(&mrt_lock); 925 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, 926 v, vifi, mrt->id); 927 return 0; 928 } 929 930 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt, 931 const struct in6_addr *origin, 932 const struct in6_addr *mcastgrp) 933 { 934 struct mfc6_cache_cmp_arg arg = { 935 .mf6c_origin = *origin, 936 .mf6c_mcastgrp = *mcastgrp, 937 }; 938 939 return mr_mfc_find(mrt, &arg); 940 } 941 942 /* Look for a (*,G) entry */ 943 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt, 944 struct in6_addr *mcastgrp, 945 mifi_t mifi) 946 { 947 struct mfc6_cache_cmp_arg arg = { 948 .mf6c_origin = in6addr_any, 949 .mf6c_mcastgrp = *mcastgrp, 950 }; 951 952 if (ipv6_addr_any(mcastgrp)) 953 return mr_mfc_find_any_parent(mrt, mifi); 954 return mr_mfc_find_any(mrt, mifi, &arg); 955 } 956 957 /* Look for a (S,G,iif) entry if parent != -1 */ 958 static struct mfc6_cache * 959 ip6mr_cache_find_parent(struct mr_table *mrt, 960 const struct in6_addr *origin, 961 const struct in6_addr *mcastgrp, 962 int parent) 963 { 964 struct mfc6_cache_cmp_arg arg = { 965 .mf6c_origin = *origin, 966 .mf6c_mcastgrp = *mcastgrp, 967 }; 968 969 return mr_mfc_find_parent(mrt, &arg, parent); 970 } 971 972 /* Allocate a multicast cache entry */ 973 static struct mfc6_cache *ip6mr_cache_alloc(void) 974 { 975 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 976 if (!c) 977 return NULL; 978 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; 979 c->_c.mfc_un.res.minvif = MAXMIFS; 980 c->_c.free = ip6mr_cache_free_rcu; 981 refcount_set(&c->_c.mfc_un.res.refcount, 1); 982 return c; 983 } 984 985 static struct mfc6_cache *ip6mr_cache_alloc_unres(void) 986 { 987 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 988 if (!c) 989 return NULL; 990 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved); 991 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ; 992 return c; 993 } 994 995 /* 996 * A cache entry has gone into a resolved state from queued 997 */ 998 999 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt, 1000 struct mfc6_cache *uc, struct mfc6_cache *c) 1001 { 1002 struct sk_buff *skb; 1003 1004 /* 1005 * Play the pending entries through our router 1006 */ 1007 1008 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) { 1009 if (ipv6_hdr(skb)->version == 0) { 1010 struct nlmsghdr *nlh = skb_pull(skb, 1011 sizeof(struct ipv6hdr)); 1012 1013 if (mr_fill_mroute(mrt, skb, &c->_c, 1014 nlmsg_data(nlh)) > 0) { 1015 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; 1016 } else { 1017 nlh->nlmsg_type = NLMSG_ERROR; 1018 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); 1019 skb_trim(skb, nlh->nlmsg_len); 1020 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE; 1021 } 1022 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 1023 } else 1024 ip6_mr_forward(net, mrt, skb->dev, skb, c); 1025 } 1026 } 1027 1028 /* 1029 * Bounce a cache query up to pim6sd and netlink. 1030 * 1031 * Called under mrt_lock. 1032 */ 1033 1034 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, 1035 mifi_t mifi, int assert) 1036 { 1037 struct sock *mroute6_sk; 1038 struct sk_buff *skb; 1039 struct mrt6msg *msg; 1040 int ret; 1041 1042 #ifdef CONFIG_IPV6_PIMSM_V2 1043 if (assert == MRT6MSG_WHOLEPKT || assert == MRT6MSG_WRMIFWHOLE) 1044 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt) 1045 +sizeof(*msg)); 1046 else 1047 #endif 1048 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC); 1049 1050 if (!skb) 1051 return -ENOBUFS; 1052 1053 /* I suppose that internal messages 1054 * do not require checksums */ 1055 1056 skb->ip_summed = CHECKSUM_UNNECESSARY; 1057 1058 #ifdef CONFIG_IPV6_PIMSM_V2 1059 if (assert == MRT6MSG_WHOLEPKT || assert == MRT6MSG_WRMIFWHOLE) { 1060 /* Ugly, but we have no choice with this interface. 1061 Duplicate old header, fix length etc. 1062 And all this only to mangle msg->im6_msgtype and 1063 to set msg->im6_mbz to "mbz" :-) 1064 */ 1065 skb_push(skb, -skb_network_offset(pkt)); 1066 1067 skb_push(skb, sizeof(*msg)); 1068 skb_reset_transport_header(skb); 1069 msg = (struct mrt6msg *)skb_transport_header(skb); 1070 msg->im6_mbz = 0; 1071 msg->im6_msgtype = assert; 1072 if (assert == MRT6MSG_WRMIFWHOLE) 1073 msg->im6_mif = mifi; 1074 else 1075 msg->im6_mif = mrt->mroute_reg_vif_num; 1076 msg->im6_pad = 0; 1077 msg->im6_src = ipv6_hdr(pkt)->saddr; 1078 msg->im6_dst = ipv6_hdr(pkt)->daddr; 1079 1080 skb->ip_summed = CHECKSUM_UNNECESSARY; 1081 } else 1082 #endif 1083 { 1084 /* 1085 * Copy the IP header 1086 */ 1087 1088 skb_put(skb, sizeof(struct ipv6hdr)); 1089 skb_reset_network_header(skb); 1090 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr)); 1091 1092 /* 1093 * Add our header 1094 */ 1095 skb_put(skb, sizeof(*msg)); 1096 skb_reset_transport_header(skb); 1097 msg = (struct mrt6msg *)skb_transport_header(skb); 1098 1099 msg->im6_mbz = 0; 1100 msg->im6_msgtype = assert; 1101 msg->im6_mif = mifi; 1102 msg->im6_pad = 0; 1103 msg->im6_src = ipv6_hdr(pkt)->saddr; 1104 msg->im6_dst = ipv6_hdr(pkt)->daddr; 1105 1106 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 1107 skb->ip_summed = CHECKSUM_UNNECESSARY; 1108 } 1109 1110 rcu_read_lock(); 1111 mroute6_sk = rcu_dereference(mrt->mroute_sk); 1112 if (!mroute6_sk) { 1113 rcu_read_unlock(); 1114 kfree_skb(skb); 1115 return -EINVAL; 1116 } 1117 1118 mrt6msg_netlink_event(mrt, skb); 1119 1120 /* Deliver to user space multicast routing algorithms */ 1121 ret = sock_queue_rcv_skb(mroute6_sk, skb); 1122 rcu_read_unlock(); 1123 if (ret < 0) { 1124 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n"); 1125 kfree_skb(skb); 1126 } 1127 1128 return ret; 1129 } 1130 1131 /* Queue a packet for resolution. It gets locked cache entry! */ 1132 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi, 1133 struct sk_buff *skb, struct net_device *dev) 1134 { 1135 struct mfc6_cache *c; 1136 bool found = false; 1137 int err; 1138 1139 spin_lock_bh(&mfc_unres_lock); 1140 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) { 1141 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && 1142 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { 1143 found = true; 1144 break; 1145 } 1146 } 1147 1148 if (!found) { 1149 /* 1150 * Create a new entry if allowable 1151 */ 1152 1153 c = ip6mr_cache_alloc_unres(); 1154 if (!c) { 1155 spin_unlock_bh(&mfc_unres_lock); 1156 1157 kfree_skb(skb); 1158 return -ENOBUFS; 1159 } 1160 1161 /* Fill in the new cache entry */ 1162 c->_c.mfc_parent = -1; 1163 c->mf6c_origin = ipv6_hdr(skb)->saddr; 1164 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr; 1165 1166 /* 1167 * Reflect first query at pim6sd 1168 */ 1169 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE); 1170 if (err < 0) { 1171 /* If the report failed throw the cache entry 1172 out - Brad Parker 1173 */ 1174 spin_unlock_bh(&mfc_unres_lock); 1175 1176 ip6mr_cache_free(c); 1177 kfree_skb(skb); 1178 return err; 1179 } 1180 1181 atomic_inc(&mrt->cache_resolve_queue_len); 1182 list_add(&c->_c.list, &mrt->mfc_unres_queue); 1183 mr6_netlink_event(mrt, c, RTM_NEWROUTE); 1184 1185 ipmr_do_expire_process(mrt); 1186 } 1187 1188 /* See if we can append the packet */ 1189 if (c->_c.mfc_un.unres.unresolved.qlen > 3) { 1190 kfree_skb(skb); 1191 err = -ENOBUFS; 1192 } else { 1193 if (dev) { 1194 skb->dev = dev; 1195 skb->skb_iif = dev->ifindex; 1196 } 1197 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); 1198 err = 0; 1199 } 1200 1201 spin_unlock_bh(&mfc_unres_lock); 1202 return err; 1203 } 1204 1205 /* 1206 * MFC6 cache manipulation by user space 1207 */ 1208 1209 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc, 1210 int parent) 1211 { 1212 struct mfc6_cache *c; 1213 1214 /* The entries are added/deleted only under RTNL */ 1215 rcu_read_lock(); 1216 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr, 1217 &mfc->mf6cc_mcastgrp.sin6_addr, parent); 1218 rcu_read_unlock(); 1219 if (!c) 1220 return -ENOENT; 1221 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params); 1222 list_del_rcu(&c->_c.list); 1223 1224 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), 1225 FIB_EVENT_ENTRY_DEL, c, mrt->id); 1226 mr6_netlink_event(mrt, c, RTM_DELROUTE); 1227 mr_cache_put(&c->_c); 1228 return 0; 1229 } 1230 1231 static int ip6mr_device_event(struct notifier_block *this, 1232 unsigned long event, void *ptr) 1233 { 1234 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1235 struct net *net = dev_net(dev); 1236 struct mr_table *mrt; 1237 struct vif_device *v; 1238 int ct; 1239 1240 if (event != NETDEV_UNREGISTER) 1241 return NOTIFY_DONE; 1242 1243 ip6mr_for_each_table(mrt, net) { 1244 v = &mrt->vif_table[0]; 1245 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1246 if (v->dev == dev) 1247 mif6_delete(mrt, ct, 1, NULL); 1248 } 1249 } 1250 1251 return NOTIFY_DONE; 1252 } 1253 1254 static unsigned int ip6mr_seq_read(struct net *net) 1255 { 1256 ASSERT_RTNL(); 1257 1258 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net); 1259 } 1260 1261 static int ip6mr_dump(struct net *net, struct notifier_block *nb, 1262 struct netlink_ext_ack *extack) 1263 { 1264 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump, 1265 ip6mr_mr_table_iter, &mrt_lock, extack); 1266 } 1267 1268 static struct notifier_block ip6_mr_notifier = { 1269 .notifier_call = ip6mr_device_event 1270 }; 1271 1272 static const struct fib_notifier_ops ip6mr_notifier_ops_template = { 1273 .family = RTNL_FAMILY_IP6MR, 1274 .fib_seq_read = ip6mr_seq_read, 1275 .fib_dump = ip6mr_dump, 1276 .owner = THIS_MODULE, 1277 }; 1278 1279 static int __net_init ip6mr_notifier_init(struct net *net) 1280 { 1281 struct fib_notifier_ops *ops; 1282 1283 net->ipv6.ipmr_seq = 0; 1284 1285 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net); 1286 if (IS_ERR(ops)) 1287 return PTR_ERR(ops); 1288 1289 net->ipv6.ip6mr_notifier_ops = ops; 1290 1291 return 0; 1292 } 1293 1294 static void __net_exit ip6mr_notifier_exit(struct net *net) 1295 { 1296 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops); 1297 net->ipv6.ip6mr_notifier_ops = NULL; 1298 } 1299 1300 /* Setup for IP multicast routing */ 1301 static int __net_init ip6mr_net_init(struct net *net) 1302 { 1303 int err; 1304 1305 err = ip6mr_notifier_init(net); 1306 if (err) 1307 return err; 1308 1309 err = ip6mr_rules_init(net); 1310 if (err < 0) 1311 goto ip6mr_rules_fail; 1312 1313 #ifdef CONFIG_PROC_FS 1314 err = -ENOMEM; 1315 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops, 1316 sizeof(struct mr_vif_iter))) 1317 goto proc_vif_fail; 1318 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops, 1319 sizeof(struct mr_mfc_iter))) 1320 goto proc_cache_fail; 1321 #endif 1322 1323 return 0; 1324 1325 #ifdef CONFIG_PROC_FS 1326 proc_cache_fail: 1327 remove_proc_entry("ip6_mr_vif", net->proc_net); 1328 proc_vif_fail: 1329 rtnl_lock(); 1330 ip6mr_rules_exit(net); 1331 rtnl_unlock(); 1332 #endif 1333 ip6mr_rules_fail: 1334 ip6mr_notifier_exit(net); 1335 return err; 1336 } 1337 1338 static void __net_exit ip6mr_net_exit(struct net *net) 1339 { 1340 #ifdef CONFIG_PROC_FS 1341 remove_proc_entry("ip6_mr_cache", net->proc_net); 1342 remove_proc_entry("ip6_mr_vif", net->proc_net); 1343 #endif 1344 ip6mr_notifier_exit(net); 1345 } 1346 1347 static void __net_exit ip6mr_net_exit_batch(struct list_head *net_list) 1348 { 1349 struct net *net; 1350 1351 rtnl_lock(); 1352 list_for_each_entry(net, net_list, exit_list) 1353 ip6mr_rules_exit(net); 1354 rtnl_unlock(); 1355 } 1356 1357 static struct pernet_operations ip6mr_net_ops = { 1358 .init = ip6mr_net_init, 1359 .exit = ip6mr_net_exit, 1360 .exit_batch = ip6mr_net_exit_batch, 1361 }; 1362 1363 int __init ip6_mr_init(void) 1364 { 1365 int err; 1366 1367 mrt_cachep = kmem_cache_create("ip6_mrt_cache", 1368 sizeof(struct mfc6_cache), 1369 0, SLAB_HWCACHE_ALIGN, 1370 NULL); 1371 if (!mrt_cachep) 1372 return -ENOMEM; 1373 1374 err = register_pernet_subsys(&ip6mr_net_ops); 1375 if (err) 1376 goto reg_pernet_fail; 1377 1378 err = register_netdevice_notifier(&ip6_mr_notifier); 1379 if (err) 1380 goto reg_notif_fail; 1381 #ifdef CONFIG_IPV6_PIMSM_V2 1382 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) { 1383 pr_err("%s: can't add PIM protocol\n", __func__); 1384 err = -EAGAIN; 1385 goto add_proto_fail; 1386 } 1387 #endif 1388 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE, 1389 NULL, ip6mr_rtm_dumproute, 0); 1390 if (err == 0) 1391 return 0; 1392 1393 #ifdef CONFIG_IPV6_PIMSM_V2 1394 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); 1395 add_proto_fail: 1396 unregister_netdevice_notifier(&ip6_mr_notifier); 1397 #endif 1398 reg_notif_fail: 1399 unregister_pernet_subsys(&ip6mr_net_ops); 1400 reg_pernet_fail: 1401 kmem_cache_destroy(mrt_cachep); 1402 return err; 1403 } 1404 1405 void ip6_mr_cleanup(void) 1406 { 1407 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); 1408 #ifdef CONFIG_IPV6_PIMSM_V2 1409 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); 1410 #endif 1411 unregister_netdevice_notifier(&ip6_mr_notifier); 1412 unregister_pernet_subsys(&ip6mr_net_ops); 1413 kmem_cache_destroy(mrt_cachep); 1414 } 1415 1416 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt, 1417 struct mf6cctl *mfc, int mrtsock, int parent) 1418 { 1419 unsigned char ttls[MAXMIFS]; 1420 struct mfc6_cache *uc, *c; 1421 struct mr_mfc *_uc; 1422 bool found; 1423 int i, err; 1424 1425 if (mfc->mf6cc_parent >= MAXMIFS) 1426 return -ENFILE; 1427 1428 memset(ttls, 255, MAXMIFS); 1429 for (i = 0; i < MAXMIFS; i++) { 1430 if (IF_ISSET(i, &mfc->mf6cc_ifset)) 1431 ttls[i] = 1; 1432 } 1433 1434 /* The entries are added/deleted only under RTNL */ 1435 rcu_read_lock(); 1436 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr, 1437 &mfc->mf6cc_mcastgrp.sin6_addr, parent); 1438 rcu_read_unlock(); 1439 if (c) { 1440 write_lock_bh(&mrt_lock); 1441 c->_c.mfc_parent = mfc->mf6cc_parent; 1442 ip6mr_update_thresholds(mrt, &c->_c, ttls); 1443 if (!mrtsock) 1444 c->_c.mfc_flags |= MFC_STATIC; 1445 write_unlock_bh(&mrt_lock); 1446 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, 1447 c, mrt->id); 1448 mr6_netlink_event(mrt, c, RTM_NEWROUTE); 1449 return 0; 1450 } 1451 1452 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) && 1453 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) 1454 return -EINVAL; 1455 1456 c = ip6mr_cache_alloc(); 1457 if (!c) 1458 return -ENOMEM; 1459 1460 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; 1461 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; 1462 c->_c.mfc_parent = mfc->mf6cc_parent; 1463 ip6mr_update_thresholds(mrt, &c->_c, ttls); 1464 if (!mrtsock) 1465 c->_c.mfc_flags |= MFC_STATIC; 1466 1467 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode, 1468 ip6mr_rht_params); 1469 if (err) { 1470 pr_err("ip6mr: rhtable insert error %d\n", err); 1471 ip6mr_cache_free(c); 1472 return err; 1473 } 1474 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list); 1475 1476 /* Check to see if we resolved a queued list. If so we 1477 * need to send on the frames and tidy up. 1478 */ 1479 found = false; 1480 spin_lock_bh(&mfc_unres_lock); 1481 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) { 1482 uc = (struct mfc6_cache *)_uc; 1483 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && 1484 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { 1485 list_del(&_uc->list); 1486 atomic_dec(&mrt->cache_resolve_queue_len); 1487 found = true; 1488 break; 1489 } 1490 } 1491 if (list_empty(&mrt->mfc_unres_queue)) 1492 del_timer(&mrt->ipmr_expire_timer); 1493 spin_unlock_bh(&mfc_unres_lock); 1494 1495 if (found) { 1496 ip6mr_cache_resolve(net, mrt, uc, c); 1497 ip6mr_cache_free(uc); 1498 } 1499 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, 1500 c, mrt->id); 1501 mr6_netlink_event(mrt, c, RTM_NEWROUTE); 1502 return 0; 1503 } 1504 1505 /* 1506 * Close the multicast socket, and clear the vif tables etc 1507 */ 1508 1509 static void mroute_clean_tables(struct mr_table *mrt, int flags) 1510 { 1511 struct mr_mfc *c, *tmp; 1512 LIST_HEAD(list); 1513 int i; 1514 1515 /* Shut down all active vif entries */ 1516 if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) { 1517 for (i = 0; i < mrt->maxvif; i++) { 1518 if (((mrt->vif_table[i].flags & VIFF_STATIC) && 1519 !(flags & MRT6_FLUSH_MIFS_STATIC)) || 1520 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS))) 1521 continue; 1522 mif6_delete(mrt, i, 0, &list); 1523 } 1524 unregister_netdevice_many(&list); 1525 } 1526 1527 /* Wipe the cache */ 1528 if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) { 1529 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) { 1530 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) || 1531 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC))) 1532 continue; 1533 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); 1534 list_del_rcu(&c->list); 1535 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), 1536 FIB_EVENT_ENTRY_DEL, 1537 (struct mfc6_cache *)c, mrt->id); 1538 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); 1539 mr_cache_put(c); 1540 } 1541 } 1542 1543 if (flags & MRT6_FLUSH_MFC) { 1544 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { 1545 spin_lock_bh(&mfc_unres_lock); 1546 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { 1547 list_del(&c->list); 1548 mr6_netlink_event(mrt, (struct mfc6_cache *)c, 1549 RTM_DELROUTE); 1550 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); 1551 } 1552 spin_unlock_bh(&mfc_unres_lock); 1553 } 1554 } 1555 } 1556 1557 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk) 1558 { 1559 int err = 0; 1560 struct net *net = sock_net(sk); 1561 1562 rtnl_lock(); 1563 write_lock_bh(&mrt_lock); 1564 if (rtnl_dereference(mrt->mroute_sk)) { 1565 err = -EADDRINUSE; 1566 } else { 1567 rcu_assign_pointer(mrt->mroute_sk, sk); 1568 sock_set_flag(sk, SOCK_RCU_FREE); 1569 atomic_inc(&net->ipv6.devconf_all->mc_forwarding); 1570 } 1571 write_unlock_bh(&mrt_lock); 1572 1573 if (!err) 1574 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 1575 NETCONFA_MC_FORWARDING, 1576 NETCONFA_IFINDEX_ALL, 1577 net->ipv6.devconf_all); 1578 rtnl_unlock(); 1579 1580 return err; 1581 } 1582 1583 int ip6mr_sk_done(struct sock *sk) 1584 { 1585 struct net *net = sock_net(sk); 1586 struct ipv6_devconf *devconf; 1587 struct mr_table *mrt; 1588 int err = -EACCES; 1589 1590 if (sk->sk_type != SOCK_RAW || 1591 inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1592 return err; 1593 1594 devconf = net->ipv6.devconf_all; 1595 if (!devconf || !atomic_read(&devconf->mc_forwarding)) 1596 return err; 1597 1598 rtnl_lock(); 1599 ip6mr_for_each_table(mrt, net) { 1600 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1601 write_lock_bh(&mrt_lock); 1602 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1603 /* Note that mroute_sk had SOCK_RCU_FREE set, 1604 * so the RCU grace period before sk freeing 1605 * is guaranteed by sk_destruct() 1606 */ 1607 atomic_dec(&devconf->mc_forwarding); 1608 write_unlock_bh(&mrt_lock); 1609 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 1610 NETCONFA_MC_FORWARDING, 1611 NETCONFA_IFINDEX_ALL, 1612 net->ipv6.devconf_all); 1613 1614 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC); 1615 err = 0; 1616 break; 1617 } 1618 } 1619 rtnl_unlock(); 1620 1621 return err; 1622 } 1623 1624 bool mroute6_is_socket(struct net *net, struct sk_buff *skb) 1625 { 1626 struct mr_table *mrt; 1627 struct flowi6 fl6 = { 1628 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, 1629 .flowi6_oif = skb->dev->ifindex, 1630 .flowi6_mark = skb->mark, 1631 }; 1632 1633 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0) 1634 return NULL; 1635 1636 return rcu_access_pointer(mrt->mroute_sk); 1637 } 1638 EXPORT_SYMBOL(mroute6_is_socket); 1639 1640 /* 1641 * Socket options and virtual interface manipulation. The whole 1642 * virtual interface system is a complete heap, but unfortunately 1643 * that's how BSD mrouted happens to think. Maybe one day with a proper 1644 * MOSPF/PIM router set up we can clean this up. 1645 */ 1646 1647 int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, 1648 unsigned int optlen) 1649 { 1650 int ret, parent = 0; 1651 struct mif6ctl vif; 1652 struct mf6cctl mfc; 1653 mifi_t mifi; 1654 struct net *net = sock_net(sk); 1655 struct mr_table *mrt; 1656 bool do_wrmifwhole; 1657 1658 if (sk->sk_type != SOCK_RAW || 1659 inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1660 return -EOPNOTSUPP; 1661 1662 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1663 if (!mrt) 1664 return -ENOENT; 1665 1666 if (optname != MRT6_INIT) { 1667 if (sk != rcu_access_pointer(mrt->mroute_sk) && 1668 !ns_capable(net->user_ns, CAP_NET_ADMIN)) 1669 return -EACCES; 1670 } 1671 1672 switch (optname) { 1673 case MRT6_INIT: 1674 if (optlen < sizeof(int)) 1675 return -EINVAL; 1676 1677 return ip6mr_sk_init(mrt, sk); 1678 1679 case MRT6_DONE: 1680 return ip6mr_sk_done(sk); 1681 1682 case MRT6_ADD_MIF: 1683 if (optlen < sizeof(vif)) 1684 return -EINVAL; 1685 if (copy_from_sockptr(&vif, optval, sizeof(vif))) 1686 return -EFAULT; 1687 if (vif.mif6c_mifi >= MAXMIFS) 1688 return -ENFILE; 1689 rtnl_lock(); 1690 ret = mif6_add(net, mrt, &vif, 1691 sk == rtnl_dereference(mrt->mroute_sk)); 1692 rtnl_unlock(); 1693 return ret; 1694 1695 case MRT6_DEL_MIF: 1696 if (optlen < sizeof(mifi_t)) 1697 return -EINVAL; 1698 if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t))) 1699 return -EFAULT; 1700 rtnl_lock(); 1701 ret = mif6_delete(mrt, mifi, 0, NULL); 1702 rtnl_unlock(); 1703 return ret; 1704 1705 /* 1706 * Manipulate the forwarding caches. These live 1707 * in a sort of kernel/user symbiosis. 1708 */ 1709 case MRT6_ADD_MFC: 1710 case MRT6_DEL_MFC: 1711 parent = -1; 1712 fallthrough; 1713 case MRT6_ADD_MFC_PROXY: 1714 case MRT6_DEL_MFC_PROXY: 1715 if (optlen < sizeof(mfc)) 1716 return -EINVAL; 1717 if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) 1718 return -EFAULT; 1719 if (parent == 0) 1720 parent = mfc.mf6cc_parent; 1721 rtnl_lock(); 1722 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY) 1723 ret = ip6mr_mfc_delete(mrt, &mfc, parent); 1724 else 1725 ret = ip6mr_mfc_add(net, mrt, &mfc, 1726 sk == 1727 rtnl_dereference(mrt->mroute_sk), 1728 parent); 1729 rtnl_unlock(); 1730 return ret; 1731 1732 case MRT6_FLUSH: 1733 { 1734 int flags; 1735 1736 if (optlen != sizeof(flags)) 1737 return -EINVAL; 1738 if (copy_from_sockptr(&flags, optval, sizeof(flags))) 1739 return -EFAULT; 1740 rtnl_lock(); 1741 mroute_clean_tables(mrt, flags); 1742 rtnl_unlock(); 1743 return 0; 1744 } 1745 1746 /* 1747 * Control PIM assert (to activate pim will activate assert) 1748 */ 1749 case MRT6_ASSERT: 1750 { 1751 int v; 1752 1753 if (optlen != sizeof(v)) 1754 return -EINVAL; 1755 if (copy_from_sockptr(&v, optval, sizeof(v))) 1756 return -EFAULT; 1757 mrt->mroute_do_assert = v; 1758 return 0; 1759 } 1760 1761 #ifdef CONFIG_IPV6_PIMSM_V2 1762 case MRT6_PIM: 1763 { 1764 int v; 1765 1766 if (optlen != sizeof(v)) 1767 return -EINVAL; 1768 if (copy_from_sockptr(&v, optval, sizeof(v))) 1769 return -EFAULT; 1770 1771 do_wrmifwhole = (v == MRT6MSG_WRMIFWHOLE); 1772 v = !!v; 1773 rtnl_lock(); 1774 ret = 0; 1775 if (v != mrt->mroute_do_pim) { 1776 mrt->mroute_do_pim = v; 1777 mrt->mroute_do_assert = v; 1778 mrt->mroute_do_wrvifwhole = do_wrmifwhole; 1779 } 1780 rtnl_unlock(); 1781 return ret; 1782 } 1783 1784 #endif 1785 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 1786 case MRT6_TABLE: 1787 { 1788 u32 v; 1789 1790 if (optlen != sizeof(u32)) 1791 return -EINVAL; 1792 if (copy_from_sockptr(&v, optval, sizeof(v))) 1793 return -EFAULT; 1794 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */ 1795 if (v != RT_TABLE_DEFAULT && v >= 100000000) 1796 return -EINVAL; 1797 if (sk == rcu_access_pointer(mrt->mroute_sk)) 1798 return -EBUSY; 1799 1800 rtnl_lock(); 1801 ret = 0; 1802 mrt = ip6mr_new_table(net, v); 1803 if (IS_ERR(mrt)) 1804 ret = PTR_ERR(mrt); 1805 else 1806 raw6_sk(sk)->ip6mr_table = v; 1807 rtnl_unlock(); 1808 return ret; 1809 } 1810 #endif 1811 /* 1812 * Spurious command, or MRT6_VERSION which you cannot 1813 * set. 1814 */ 1815 default: 1816 return -ENOPROTOOPT; 1817 } 1818 } 1819 1820 /* 1821 * Getsock opt support for the multicast routing system. 1822 */ 1823 1824 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, 1825 int __user *optlen) 1826 { 1827 int olr; 1828 int val; 1829 struct net *net = sock_net(sk); 1830 struct mr_table *mrt; 1831 1832 if (sk->sk_type != SOCK_RAW || 1833 inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1834 return -EOPNOTSUPP; 1835 1836 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1837 if (!mrt) 1838 return -ENOENT; 1839 1840 switch (optname) { 1841 case MRT6_VERSION: 1842 val = 0x0305; 1843 break; 1844 #ifdef CONFIG_IPV6_PIMSM_V2 1845 case MRT6_PIM: 1846 val = mrt->mroute_do_pim; 1847 break; 1848 #endif 1849 case MRT6_ASSERT: 1850 val = mrt->mroute_do_assert; 1851 break; 1852 default: 1853 return -ENOPROTOOPT; 1854 } 1855 1856 if (get_user(olr, optlen)) 1857 return -EFAULT; 1858 1859 olr = min_t(int, olr, sizeof(int)); 1860 if (olr < 0) 1861 return -EINVAL; 1862 1863 if (put_user(olr, optlen)) 1864 return -EFAULT; 1865 if (copy_to_user(optval, &val, olr)) 1866 return -EFAULT; 1867 return 0; 1868 } 1869 1870 /* 1871 * The IP multicast ioctl support routines. 1872 */ 1873 1874 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) 1875 { 1876 struct sioc_sg_req6 sr; 1877 struct sioc_mif_req6 vr; 1878 struct vif_device *vif; 1879 struct mfc6_cache *c; 1880 struct net *net = sock_net(sk); 1881 struct mr_table *mrt; 1882 1883 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1884 if (!mrt) 1885 return -ENOENT; 1886 1887 switch (cmd) { 1888 case SIOCGETMIFCNT_IN6: 1889 if (copy_from_user(&vr, arg, sizeof(vr))) 1890 return -EFAULT; 1891 if (vr.mifi >= mrt->maxvif) 1892 return -EINVAL; 1893 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif); 1894 read_lock(&mrt_lock); 1895 vif = &mrt->vif_table[vr.mifi]; 1896 if (VIF_EXISTS(mrt, vr.mifi)) { 1897 vr.icount = vif->pkt_in; 1898 vr.ocount = vif->pkt_out; 1899 vr.ibytes = vif->bytes_in; 1900 vr.obytes = vif->bytes_out; 1901 read_unlock(&mrt_lock); 1902 1903 if (copy_to_user(arg, &vr, sizeof(vr))) 1904 return -EFAULT; 1905 return 0; 1906 } 1907 read_unlock(&mrt_lock); 1908 return -EADDRNOTAVAIL; 1909 case SIOCGETSGCNT_IN6: 1910 if (copy_from_user(&sr, arg, sizeof(sr))) 1911 return -EFAULT; 1912 1913 rcu_read_lock(); 1914 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); 1915 if (c) { 1916 sr.pktcnt = c->_c.mfc_un.res.pkt; 1917 sr.bytecnt = c->_c.mfc_un.res.bytes; 1918 sr.wrong_if = c->_c.mfc_un.res.wrong_if; 1919 rcu_read_unlock(); 1920 1921 if (copy_to_user(arg, &sr, sizeof(sr))) 1922 return -EFAULT; 1923 return 0; 1924 } 1925 rcu_read_unlock(); 1926 return -EADDRNOTAVAIL; 1927 default: 1928 return -ENOIOCTLCMD; 1929 } 1930 } 1931 1932 #ifdef CONFIG_COMPAT 1933 struct compat_sioc_sg_req6 { 1934 struct sockaddr_in6 src; 1935 struct sockaddr_in6 grp; 1936 compat_ulong_t pktcnt; 1937 compat_ulong_t bytecnt; 1938 compat_ulong_t wrong_if; 1939 }; 1940 1941 struct compat_sioc_mif_req6 { 1942 mifi_t mifi; 1943 compat_ulong_t icount; 1944 compat_ulong_t ocount; 1945 compat_ulong_t ibytes; 1946 compat_ulong_t obytes; 1947 }; 1948 1949 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 1950 { 1951 struct compat_sioc_sg_req6 sr; 1952 struct compat_sioc_mif_req6 vr; 1953 struct vif_device *vif; 1954 struct mfc6_cache *c; 1955 struct net *net = sock_net(sk); 1956 struct mr_table *mrt; 1957 1958 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1959 if (!mrt) 1960 return -ENOENT; 1961 1962 switch (cmd) { 1963 case SIOCGETMIFCNT_IN6: 1964 if (copy_from_user(&vr, arg, sizeof(vr))) 1965 return -EFAULT; 1966 if (vr.mifi >= mrt->maxvif) 1967 return -EINVAL; 1968 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif); 1969 read_lock(&mrt_lock); 1970 vif = &mrt->vif_table[vr.mifi]; 1971 if (VIF_EXISTS(mrt, vr.mifi)) { 1972 vr.icount = vif->pkt_in; 1973 vr.ocount = vif->pkt_out; 1974 vr.ibytes = vif->bytes_in; 1975 vr.obytes = vif->bytes_out; 1976 read_unlock(&mrt_lock); 1977 1978 if (copy_to_user(arg, &vr, sizeof(vr))) 1979 return -EFAULT; 1980 return 0; 1981 } 1982 read_unlock(&mrt_lock); 1983 return -EADDRNOTAVAIL; 1984 case SIOCGETSGCNT_IN6: 1985 if (copy_from_user(&sr, arg, sizeof(sr))) 1986 return -EFAULT; 1987 1988 rcu_read_lock(); 1989 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); 1990 if (c) { 1991 sr.pktcnt = c->_c.mfc_un.res.pkt; 1992 sr.bytecnt = c->_c.mfc_un.res.bytes; 1993 sr.wrong_if = c->_c.mfc_un.res.wrong_if; 1994 rcu_read_unlock(); 1995 1996 if (copy_to_user(arg, &sr, sizeof(sr))) 1997 return -EFAULT; 1998 return 0; 1999 } 2000 rcu_read_unlock(); 2001 return -EADDRNOTAVAIL; 2002 default: 2003 return -ENOIOCTLCMD; 2004 } 2005 } 2006 #endif 2007 2008 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 2009 { 2010 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 2011 IPSTATS_MIB_OUTFORWDATAGRAMS); 2012 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)), 2013 IPSTATS_MIB_OUTOCTETS, skb->len); 2014 return dst_output(net, sk, skb); 2015 } 2016 2017 /* 2018 * Processing handlers for ip6mr_forward 2019 */ 2020 2021 static int ip6mr_forward2(struct net *net, struct mr_table *mrt, 2022 struct sk_buff *skb, int vifi) 2023 { 2024 struct ipv6hdr *ipv6h; 2025 struct vif_device *vif = &mrt->vif_table[vifi]; 2026 struct net_device *dev; 2027 struct dst_entry *dst; 2028 struct flowi6 fl6; 2029 2030 if (!vif->dev) 2031 goto out_free; 2032 2033 #ifdef CONFIG_IPV6_PIMSM_V2 2034 if (vif->flags & MIFF_REGISTER) { 2035 vif->pkt_out++; 2036 vif->bytes_out += skb->len; 2037 vif->dev->stats.tx_bytes += skb->len; 2038 vif->dev->stats.tx_packets++; 2039 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); 2040 goto out_free; 2041 } 2042 #endif 2043 2044 ipv6h = ipv6_hdr(skb); 2045 2046 fl6 = (struct flowi6) { 2047 .flowi6_oif = vif->link, 2048 .daddr = ipv6h->daddr, 2049 }; 2050 2051 dst = ip6_route_output(net, NULL, &fl6); 2052 if (dst->error) { 2053 dst_release(dst); 2054 goto out_free; 2055 } 2056 2057 skb_dst_drop(skb); 2058 skb_dst_set(skb, dst); 2059 2060 /* 2061 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 2062 * not only before forwarding, but after forwarding on all output 2063 * interfaces. It is clear, if mrouter runs a multicasting 2064 * program, it should receive packets not depending to what interface 2065 * program is joined. 2066 * If we will not make it, the program will have to join on all 2067 * interfaces. On the other hand, multihoming host (or router, but 2068 * not mrouter) cannot join to more than one interface - it will 2069 * result in receiving multiple packets. 2070 */ 2071 dev = vif->dev; 2072 skb->dev = dev; 2073 vif->pkt_out++; 2074 vif->bytes_out += skb->len; 2075 2076 /* We are about to write */ 2077 /* XXX: extension headers? */ 2078 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev))) 2079 goto out_free; 2080 2081 ipv6h = ipv6_hdr(skb); 2082 ipv6h->hop_limit--; 2083 2084 IP6CB(skb)->flags |= IP6SKB_FORWARDED; 2085 2086 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, 2087 net, NULL, skb, skb->dev, dev, 2088 ip6mr_forward2_finish); 2089 2090 out_free: 2091 kfree_skb(skb); 2092 return 0; 2093 } 2094 2095 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev) 2096 { 2097 int ct; 2098 2099 for (ct = mrt->maxvif - 1; ct >= 0; ct--) { 2100 if (mrt->vif_table[ct].dev == dev) 2101 break; 2102 } 2103 return ct; 2104 } 2105 2106 static void ip6_mr_forward(struct net *net, struct mr_table *mrt, 2107 struct net_device *dev, struct sk_buff *skb, 2108 struct mfc6_cache *c) 2109 { 2110 int psend = -1; 2111 int vif, ct; 2112 int true_vifi = ip6mr_find_vif(mrt, dev); 2113 2114 vif = c->_c.mfc_parent; 2115 c->_c.mfc_un.res.pkt++; 2116 c->_c.mfc_un.res.bytes += skb->len; 2117 c->_c.mfc_un.res.lastuse = jiffies; 2118 2119 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) { 2120 struct mfc6_cache *cache_proxy; 2121 2122 /* For an (*,G) entry, we only check that the incoming 2123 * interface is part of the static tree. 2124 */ 2125 rcu_read_lock(); 2126 cache_proxy = mr_mfc_find_any_parent(mrt, vif); 2127 if (cache_proxy && 2128 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) { 2129 rcu_read_unlock(); 2130 goto forward; 2131 } 2132 rcu_read_unlock(); 2133 } 2134 2135 /* 2136 * Wrong interface: drop packet and (maybe) send PIM assert. 2137 */ 2138 if (mrt->vif_table[vif].dev != dev) { 2139 c->_c.mfc_un.res.wrong_if++; 2140 2141 if (true_vifi >= 0 && mrt->mroute_do_assert && 2142 /* pimsm uses asserts, when switching from RPT to SPT, 2143 so that we cannot check that packet arrived on an oif. 2144 It is bad, but otherwise we would need to move pretty 2145 large chunk of pimd to kernel. Ough... --ANK 2146 */ 2147 (mrt->mroute_do_pim || 2148 c->_c.mfc_un.res.ttls[true_vifi] < 255) && 2149 time_after(jiffies, 2150 c->_c.mfc_un.res.last_assert + 2151 MFC_ASSERT_THRESH)) { 2152 c->_c.mfc_un.res.last_assert = jiffies; 2153 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF); 2154 if (mrt->mroute_do_wrvifwhole) 2155 ip6mr_cache_report(mrt, skb, true_vifi, 2156 MRT6MSG_WRMIFWHOLE); 2157 } 2158 goto dont_forward; 2159 } 2160 2161 forward: 2162 mrt->vif_table[vif].pkt_in++; 2163 mrt->vif_table[vif].bytes_in += skb->len; 2164 2165 /* 2166 * Forward the frame 2167 */ 2168 if (ipv6_addr_any(&c->mf6c_origin) && 2169 ipv6_addr_any(&c->mf6c_mcastgrp)) { 2170 if (true_vifi >= 0 && 2171 true_vifi != c->_c.mfc_parent && 2172 ipv6_hdr(skb)->hop_limit > 2173 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) { 2174 /* It's an (*,*) entry and the packet is not coming from 2175 * the upstream: forward the packet to the upstream 2176 * only. 2177 */ 2178 psend = c->_c.mfc_parent; 2179 goto last_forward; 2180 } 2181 goto dont_forward; 2182 } 2183 for (ct = c->_c.mfc_un.res.maxvif - 1; 2184 ct >= c->_c.mfc_un.res.minvif; ct--) { 2185 /* For (*,G) entry, don't forward to the incoming interface */ 2186 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) && 2187 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) { 2188 if (psend != -1) { 2189 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 2190 if (skb2) 2191 ip6mr_forward2(net, mrt, skb2, psend); 2192 } 2193 psend = ct; 2194 } 2195 } 2196 last_forward: 2197 if (psend != -1) { 2198 ip6mr_forward2(net, mrt, skb, psend); 2199 return; 2200 } 2201 2202 dont_forward: 2203 kfree_skb(skb); 2204 } 2205 2206 2207 /* 2208 * Multicast packets for forwarding arrive here 2209 */ 2210 2211 int ip6_mr_input(struct sk_buff *skb) 2212 { 2213 struct mfc6_cache *cache; 2214 struct net *net = dev_net(skb->dev); 2215 struct mr_table *mrt; 2216 struct flowi6 fl6 = { 2217 .flowi6_iif = skb->dev->ifindex, 2218 .flowi6_mark = skb->mark, 2219 }; 2220 int err; 2221 struct net_device *dev; 2222 2223 /* skb->dev passed in is the master dev for vrfs. 2224 * Get the proper interface that does have a vif associated with it. 2225 */ 2226 dev = skb->dev; 2227 if (netif_is_l3_master(skb->dev)) { 2228 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); 2229 if (!dev) { 2230 kfree_skb(skb); 2231 return -ENODEV; 2232 } 2233 } 2234 2235 err = ip6mr_fib_lookup(net, &fl6, &mrt); 2236 if (err < 0) { 2237 kfree_skb(skb); 2238 return err; 2239 } 2240 2241 read_lock(&mrt_lock); 2242 cache = ip6mr_cache_find(mrt, 2243 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); 2244 if (!cache) { 2245 int vif = ip6mr_find_vif(mrt, dev); 2246 2247 if (vif >= 0) 2248 cache = ip6mr_cache_find_any(mrt, 2249 &ipv6_hdr(skb)->daddr, 2250 vif); 2251 } 2252 2253 /* 2254 * No usable cache entry 2255 */ 2256 if (!cache) { 2257 int vif; 2258 2259 vif = ip6mr_find_vif(mrt, dev); 2260 if (vif >= 0) { 2261 int err = ip6mr_cache_unresolved(mrt, vif, skb, dev); 2262 read_unlock(&mrt_lock); 2263 2264 return err; 2265 } 2266 read_unlock(&mrt_lock); 2267 kfree_skb(skb); 2268 return -ENODEV; 2269 } 2270 2271 ip6_mr_forward(net, mrt, dev, skb, cache); 2272 2273 read_unlock(&mrt_lock); 2274 2275 return 0; 2276 } 2277 2278 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, 2279 u32 portid) 2280 { 2281 int err; 2282 struct mr_table *mrt; 2283 struct mfc6_cache *cache; 2284 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); 2285 2286 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); 2287 if (!mrt) 2288 return -ENOENT; 2289 2290 read_lock(&mrt_lock); 2291 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); 2292 if (!cache && skb->dev) { 2293 int vif = ip6mr_find_vif(mrt, skb->dev); 2294 2295 if (vif >= 0) 2296 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr, 2297 vif); 2298 } 2299 2300 if (!cache) { 2301 struct sk_buff *skb2; 2302 struct ipv6hdr *iph; 2303 struct net_device *dev; 2304 int vif; 2305 2306 dev = skb->dev; 2307 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) { 2308 read_unlock(&mrt_lock); 2309 return -ENODEV; 2310 } 2311 2312 /* really correct? */ 2313 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); 2314 if (!skb2) { 2315 read_unlock(&mrt_lock); 2316 return -ENOMEM; 2317 } 2318 2319 NETLINK_CB(skb2).portid = portid; 2320 skb_reset_transport_header(skb2); 2321 2322 skb_put(skb2, sizeof(struct ipv6hdr)); 2323 skb_reset_network_header(skb2); 2324 2325 iph = ipv6_hdr(skb2); 2326 iph->version = 0; 2327 iph->priority = 0; 2328 iph->flow_lbl[0] = 0; 2329 iph->flow_lbl[1] = 0; 2330 iph->flow_lbl[2] = 0; 2331 iph->payload_len = 0; 2332 iph->nexthdr = IPPROTO_NONE; 2333 iph->hop_limit = 0; 2334 iph->saddr = rt->rt6i_src.addr; 2335 iph->daddr = rt->rt6i_dst.addr; 2336 2337 err = ip6mr_cache_unresolved(mrt, vif, skb2, dev); 2338 read_unlock(&mrt_lock); 2339 2340 return err; 2341 } 2342 2343 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm); 2344 read_unlock(&mrt_lock); 2345 return err; 2346 } 2347 2348 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2349 u32 portid, u32 seq, struct mfc6_cache *c, int cmd, 2350 int flags) 2351 { 2352 struct nlmsghdr *nlh; 2353 struct rtmsg *rtm; 2354 int err; 2355 2356 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); 2357 if (!nlh) 2358 return -EMSGSIZE; 2359 2360 rtm = nlmsg_data(nlh); 2361 rtm->rtm_family = RTNL_FAMILY_IP6MR; 2362 rtm->rtm_dst_len = 128; 2363 rtm->rtm_src_len = 128; 2364 rtm->rtm_tos = 0; 2365 rtm->rtm_table = mrt->id; 2366 if (nla_put_u32(skb, RTA_TABLE, mrt->id)) 2367 goto nla_put_failure; 2368 rtm->rtm_type = RTN_MULTICAST; 2369 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2370 if (c->_c.mfc_flags & MFC_STATIC) 2371 rtm->rtm_protocol = RTPROT_STATIC; 2372 else 2373 rtm->rtm_protocol = RTPROT_MROUTED; 2374 rtm->rtm_flags = 0; 2375 2376 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) || 2377 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp)) 2378 goto nla_put_failure; 2379 err = mr_fill_mroute(mrt, skb, &c->_c, rtm); 2380 /* do not break the dump if cache is unresolved */ 2381 if (err < 0 && err != -ENOENT) 2382 goto nla_put_failure; 2383 2384 nlmsg_end(skb, nlh); 2385 return 0; 2386 2387 nla_put_failure: 2388 nlmsg_cancel(skb, nlh); 2389 return -EMSGSIZE; 2390 } 2391 2392 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2393 u32 portid, u32 seq, struct mr_mfc *c, 2394 int cmd, int flags) 2395 { 2396 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c, 2397 cmd, flags); 2398 } 2399 2400 static int mr6_msgsize(bool unresolved, int maxvif) 2401 { 2402 size_t len = 2403 NLMSG_ALIGN(sizeof(struct rtmsg)) 2404 + nla_total_size(4) /* RTA_TABLE */ 2405 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */ 2406 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */ 2407 ; 2408 2409 if (!unresolved) 2410 len = len 2411 + nla_total_size(4) /* RTA_IIF */ 2412 + nla_total_size(0) /* RTA_MULTIPATH */ 2413 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop)) 2414 /* RTA_MFC_STATS */ 2415 + nla_total_size_64bit(sizeof(struct rta_mfc_stats)) 2416 ; 2417 2418 return len; 2419 } 2420 2421 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc, 2422 int cmd) 2423 { 2424 struct net *net = read_pnet(&mrt->net); 2425 struct sk_buff *skb; 2426 int err = -ENOBUFS; 2427 2428 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif), 2429 GFP_ATOMIC); 2430 if (!skb) 2431 goto errout; 2432 2433 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); 2434 if (err < 0) 2435 goto errout; 2436 2437 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC); 2438 return; 2439 2440 errout: 2441 kfree_skb(skb); 2442 if (err < 0) 2443 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err); 2444 } 2445 2446 static size_t mrt6msg_netlink_msgsize(size_t payloadlen) 2447 { 2448 size_t len = 2449 NLMSG_ALIGN(sizeof(struct rtgenmsg)) 2450 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */ 2451 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */ 2452 /* IP6MRA_CREPORT_SRC_ADDR */ 2453 + nla_total_size(sizeof(struct in6_addr)) 2454 /* IP6MRA_CREPORT_DST_ADDR */ 2455 + nla_total_size(sizeof(struct in6_addr)) 2456 /* IP6MRA_CREPORT_PKT */ 2457 + nla_total_size(payloadlen) 2458 ; 2459 2460 return len; 2461 } 2462 2463 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt) 2464 { 2465 struct net *net = read_pnet(&mrt->net); 2466 struct nlmsghdr *nlh; 2467 struct rtgenmsg *rtgenm; 2468 struct mrt6msg *msg; 2469 struct sk_buff *skb; 2470 struct nlattr *nla; 2471 int payloadlen; 2472 2473 payloadlen = pkt->len - sizeof(struct mrt6msg); 2474 msg = (struct mrt6msg *)skb_transport_header(pkt); 2475 2476 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC); 2477 if (!skb) 2478 goto errout; 2479 2480 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT, 2481 sizeof(struct rtgenmsg), 0); 2482 if (!nlh) 2483 goto errout; 2484 rtgenm = nlmsg_data(nlh); 2485 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR; 2486 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) || 2487 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) || 2488 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR, 2489 &msg->im6_src) || 2490 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR, 2491 &msg->im6_dst)) 2492 goto nla_put_failure; 2493 2494 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen); 2495 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg), 2496 nla_data(nla), payloadlen)) 2497 goto nla_put_failure; 2498 2499 nlmsg_end(skb, nlh); 2500 2501 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC); 2502 return; 2503 2504 nla_put_failure: 2505 nlmsg_cancel(skb, nlh); 2506 errout: 2507 kfree_skb(skb); 2508 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS); 2509 } 2510 2511 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2512 { 2513 const struct nlmsghdr *nlh = cb->nlh; 2514 struct fib_dump_filter filter = {}; 2515 int err; 2516 2517 if (cb->strict_check) { 2518 err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh, 2519 &filter, cb); 2520 if (err < 0) 2521 return err; 2522 } 2523 2524 if (filter.table_id) { 2525 struct mr_table *mrt; 2526 2527 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); 2528 if (!mrt) { 2529 if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR) 2530 return skb->len; 2531 2532 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist"); 2533 return -ENOENT; 2534 } 2535 err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute, 2536 &mfc_unres_lock, &filter); 2537 return skb->len ? : err; 2538 } 2539 2540 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter, 2541 _ip6mr_fill_mroute, &mfc_unres_lock, &filter); 2542 } 2543