1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux IPv6 multicast routing support for BSD pim6sd 4 * Based on net/ipv4/ipmr.c. 5 * 6 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr> 7 * LSIIT Laboratory, Strasbourg, France 8 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com> 9 * 6WIND, Paris, France 10 * Copyright (C)2007,2008 USAGI/WIDE Project 11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 12 */ 13 14 #include <linux/uaccess.h> 15 #include <linux/types.h> 16 #include <linux/sched.h> 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/kernel.h> 20 #include <linux/fcntl.h> 21 #include <linux/stat.h> 22 #include <linux/socket.h> 23 #include <linux/inet.h> 24 #include <linux/netdevice.h> 25 #include <linux/inetdevice.h> 26 #include <linux/proc_fs.h> 27 #include <linux/seq_file.h> 28 #include <linux/init.h> 29 #include <linux/compat.h> 30 #include <linux/rhashtable.h> 31 #include <net/protocol.h> 32 #include <linux/skbuff.h> 33 #include <net/raw.h> 34 #include <linux/notifier.h> 35 #include <linux/if_arp.h> 36 #include <net/checksum.h> 37 #include <net/netlink.h> 38 #include <net/fib_rules.h> 39 40 #include <net/ipv6.h> 41 #include <net/ip6_route.h> 42 #include <linux/mroute6.h> 43 #include <linux/pim.h> 44 #include <net/addrconf.h> 45 #include <linux/netfilter_ipv6.h> 46 #include <linux/export.h> 47 #include <net/ip6_checksum.h> 48 #include <linux/netconf.h> 49 #include <net/ip_tunnels.h> 50 51 #include <linux/nospec.h> 52 53 struct ip6mr_rule { 54 struct fib_rule common; 55 }; 56 57 struct ip6mr_result { 58 struct mr_table *mrt; 59 }; 60 61 /* Big lock, protecting vif table, mrt cache and mroute socket state. 62 Note that the changes are semaphored via rtnl_lock. 63 */ 64 65 static DEFINE_RWLOCK(mrt_lock); 66 67 /* Multicast router control variables */ 68 69 /* Special spinlock for queue of unresolved entries */ 70 static DEFINE_SPINLOCK(mfc_unres_lock); 71 72 /* We return to original Alan's scheme. Hash table of resolved 73 entries is changed only in process context and protected 74 with weak lock mrt_lock. Queue of unresolved entries is protected 75 with strong spinlock mfc_unres_lock. 76 77 In this case data path is free of exclusive locks at all. 78 */ 79 80 static struct kmem_cache *mrt_cachep __read_mostly; 81 82 static struct mr_table *ip6mr_new_table(struct net *net, u32 id); 83 static void ip6mr_free_table(struct mr_table *mrt); 84 85 static void ip6_mr_forward(struct net *net, struct mr_table *mrt, 86 struct net_device *dev, struct sk_buff *skb, 87 struct mfc6_cache *cache); 88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, 89 mifi_t mifi, int assert); 90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc, 91 int cmd); 92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt); 93 static int ip6mr_rtm_dumproute(struct sk_buff *skb, 94 struct netlink_callback *cb); 95 static void mroute_clean_tables(struct mr_table *mrt, int flags); 96 static void ipmr_expire_process(struct timer_list *t); 97 98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 99 #define ip6mr_for_each_table(mrt, net) \ 100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \ 101 lockdep_rtnl_is_held()) 102 103 static struct mr_table *ip6mr_mr_table_iter(struct net *net, 104 struct mr_table *mrt) 105 { 106 struct mr_table *ret; 107 108 if (!mrt) 109 ret = list_entry_rcu(net->ipv6.mr6_tables.next, 110 struct mr_table, list); 111 else 112 ret = list_entry_rcu(mrt->list.next, 113 struct mr_table, list); 114 115 if (&ret->list == &net->ipv6.mr6_tables) 116 return NULL; 117 return ret; 118 } 119 120 static struct mr_table *ip6mr_get_table(struct net *net, u32 id) 121 { 122 struct mr_table *mrt; 123 124 ip6mr_for_each_table(mrt, net) { 125 if (mrt->id == id) 126 return mrt; 127 } 128 return NULL; 129 } 130 131 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, 132 struct mr_table **mrt) 133 { 134 int err; 135 struct ip6mr_result res; 136 struct fib_lookup_arg arg = { 137 .result = &res, 138 .flags = FIB_LOOKUP_NOREF, 139 }; 140 141 /* update flow if oif or iif point to device enslaved to l3mdev */ 142 l3mdev_update_flow(net, flowi6_to_flowi(flp6)); 143 144 err = fib_rules_lookup(net->ipv6.mr6_rules_ops, 145 flowi6_to_flowi(flp6), 0, &arg); 146 if (err < 0) 147 return err; 148 *mrt = res.mrt; 149 return 0; 150 } 151 152 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, 153 int flags, struct fib_lookup_arg *arg) 154 { 155 struct ip6mr_result *res = arg->result; 156 struct mr_table *mrt; 157 158 switch (rule->action) { 159 case FR_ACT_TO_TBL: 160 break; 161 case FR_ACT_UNREACHABLE: 162 return -ENETUNREACH; 163 case FR_ACT_PROHIBIT: 164 return -EACCES; 165 case FR_ACT_BLACKHOLE: 166 default: 167 return -EINVAL; 168 } 169 170 arg->table = fib_rule_get_table(rule, arg); 171 172 mrt = ip6mr_get_table(rule->fr_net, arg->table); 173 if (!mrt) 174 return -EAGAIN; 175 res->mrt = mrt; 176 return 0; 177 } 178 179 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) 180 { 181 return 1; 182 } 183 184 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = { 185 FRA_GENERIC_POLICY, 186 }; 187 188 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 189 struct fib_rule_hdr *frh, struct nlattr **tb, 190 struct netlink_ext_ack *extack) 191 { 192 return 0; 193 } 194 195 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 196 struct nlattr **tb) 197 { 198 return 1; 199 } 200 201 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 202 struct fib_rule_hdr *frh) 203 { 204 frh->dst_len = 0; 205 frh->src_len = 0; 206 frh->tos = 0; 207 return 0; 208 } 209 210 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = { 211 .family = RTNL_FAMILY_IP6MR, 212 .rule_size = sizeof(struct ip6mr_rule), 213 .addr_size = sizeof(struct in6_addr), 214 .action = ip6mr_rule_action, 215 .match = ip6mr_rule_match, 216 .configure = ip6mr_rule_configure, 217 .compare = ip6mr_rule_compare, 218 .fill = ip6mr_rule_fill, 219 .nlgroup = RTNLGRP_IPV6_RULE, 220 .policy = ip6mr_rule_policy, 221 .owner = THIS_MODULE, 222 }; 223 224 static int __net_init ip6mr_rules_init(struct net *net) 225 { 226 struct fib_rules_ops *ops; 227 struct mr_table *mrt; 228 int err; 229 230 ops = fib_rules_register(&ip6mr_rules_ops_template, net); 231 if (IS_ERR(ops)) 232 return PTR_ERR(ops); 233 234 INIT_LIST_HEAD(&net->ipv6.mr6_tables); 235 236 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); 237 if (IS_ERR(mrt)) { 238 err = PTR_ERR(mrt); 239 goto err1; 240 } 241 242 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0); 243 if (err < 0) 244 goto err2; 245 246 net->ipv6.mr6_rules_ops = ops; 247 return 0; 248 249 err2: 250 ip6mr_free_table(mrt); 251 err1: 252 fib_rules_unregister(ops); 253 return err; 254 } 255 256 static void __net_exit ip6mr_rules_exit(struct net *net) 257 { 258 struct mr_table *mrt, *next; 259 260 rtnl_lock(); 261 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { 262 list_del(&mrt->list); 263 ip6mr_free_table(mrt); 264 } 265 fib_rules_unregister(net->ipv6.mr6_rules_ops); 266 rtnl_unlock(); 267 } 268 269 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb, 270 struct netlink_ext_ack *extack) 271 { 272 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack); 273 } 274 275 static unsigned int ip6mr_rules_seq_read(struct net *net) 276 { 277 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR); 278 } 279 280 bool ip6mr_rule_default(const struct fib_rule *rule) 281 { 282 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL && 283 rule->table == RT6_TABLE_DFLT && !rule->l3mdev; 284 } 285 EXPORT_SYMBOL(ip6mr_rule_default); 286 #else 287 #define ip6mr_for_each_table(mrt, net) \ 288 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) 289 290 static struct mr_table *ip6mr_mr_table_iter(struct net *net, 291 struct mr_table *mrt) 292 { 293 if (!mrt) 294 return net->ipv6.mrt6; 295 return NULL; 296 } 297 298 static struct mr_table *ip6mr_get_table(struct net *net, u32 id) 299 { 300 return net->ipv6.mrt6; 301 } 302 303 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, 304 struct mr_table **mrt) 305 { 306 *mrt = net->ipv6.mrt6; 307 return 0; 308 } 309 310 static int __net_init ip6mr_rules_init(struct net *net) 311 { 312 struct mr_table *mrt; 313 314 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); 315 if (IS_ERR(mrt)) 316 return PTR_ERR(mrt); 317 net->ipv6.mrt6 = mrt; 318 return 0; 319 } 320 321 static void __net_exit ip6mr_rules_exit(struct net *net) 322 { 323 rtnl_lock(); 324 ip6mr_free_table(net->ipv6.mrt6); 325 net->ipv6.mrt6 = NULL; 326 rtnl_unlock(); 327 } 328 329 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb, 330 struct netlink_ext_ack *extack) 331 { 332 return 0; 333 } 334 335 static unsigned int ip6mr_rules_seq_read(struct net *net) 336 { 337 return 0; 338 } 339 #endif 340 341 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg, 342 const void *ptr) 343 { 344 const struct mfc6_cache_cmp_arg *cmparg = arg->key; 345 struct mfc6_cache *c = (struct mfc6_cache *)ptr; 346 347 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) || 348 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin); 349 } 350 351 static const struct rhashtable_params ip6mr_rht_params = { 352 .head_offset = offsetof(struct mr_mfc, mnode), 353 .key_offset = offsetof(struct mfc6_cache, cmparg), 354 .key_len = sizeof(struct mfc6_cache_cmp_arg), 355 .nelem_hint = 3, 356 .obj_cmpfn = ip6mr_hash_cmp, 357 .automatic_shrinking = true, 358 }; 359 360 static void ip6mr_new_table_set(struct mr_table *mrt, 361 struct net *net) 362 { 363 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 364 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables); 365 #endif 366 } 367 368 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = { 369 .mf6c_origin = IN6ADDR_ANY_INIT, 370 .mf6c_mcastgrp = IN6ADDR_ANY_INIT, 371 }; 372 373 static struct mr_table_ops ip6mr_mr_table_ops = { 374 .rht_params = &ip6mr_rht_params, 375 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any, 376 }; 377 378 static struct mr_table *ip6mr_new_table(struct net *net, u32 id) 379 { 380 struct mr_table *mrt; 381 382 mrt = ip6mr_get_table(net, id); 383 if (mrt) 384 return mrt; 385 386 return mr_table_alloc(net, id, &ip6mr_mr_table_ops, 387 ipmr_expire_process, ip6mr_new_table_set); 388 } 389 390 static void ip6mr_free_table(struct mr_table *mrt) 391 { 392 del_timer_sync(&mrt->ipmr_expire_timer); 393 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC | 394 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC); 395 rhltable_destroy(&mrt->mfc_hash); 396 kfree(mrt); 397 } 398 399 #ifdef CONFIG_PROC_FS 400 /* The /proc interfaces to multicast routing 401 * /proc/ip6_mr_cache /proc/ip6_mr_vif 402 */ 403 404 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) 405 __acquires(mrt_lock) 406 { 407 struct mr_vif_iter *iter = seq->private; 408 struct net *net = seq_file_net(seq); 409 struct mr_table *mrt; 410 411 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); 412 if (!mrt) 413 return ERR_PTR(-ENOENT); 414 415 iter->mrt = mrt; 416 417 read_lock(&mrt_lock); 418 return mr_vif_seq_start(seq, pos); 419 } 420 421 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v) 422 __releases(mrt_lock) 423 { 424 read_unlock(&mrt_lock); 425 } 426 427 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) 428 { 429 struct mr_vif_iter *iter = seq->private; 430 struct mr_table *mrt = iter->mrt; 431 432 if (v == SEQ_START_TOKEN) { 433 seq_puts(seq, 434 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n"); 435 } else { 436 const struct vif_device *vif = v; 437 const char *name = vif->dev ? vif->dev->name : "none"; 438 439 seq_printf(seq, 440 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", 441 vif - mrt->vif_table, 442 name, vif->bytes_in, vif->pkt_in, 443 vif->bytes_out, vif->pkt_out, 444 vif->flags); 445 } 446 return 0; 447 } 448 449 static const struct seq_operations ip6mr_vif_seq_ops = { 450 .start = ip6mr_vif_seq_start, 451 .next = mr_vif_seq_next, 452 .stop = ip6mr_vif_seq_stop, 453 .show = ip6mr_vif_seq_show, 454 }; 455 456 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 457 { 458 struct net *net = seq_file_net(seq); 459 struct mr_table *mrt; 460 461 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); 462 if (!mrt) 463 return ERR_PTR(-ENOENT); 464 465 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock); 466 } 467 468 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 469 { 470 int n; 471 472 if (v == SEQ_START_TOKEN) { 473 seq_puts(seq, 474 "Group " 475 "Origin " 476 "Iif Pkts Bytes Wrong Oifs\n"); 477 } else { 478 const struct mfc6_cache *mfc = v; 479 const struct mr_mfc_iter *it = seq->private; 480 struct mr_table *mrt = it->mrt; 481 482 seq_printf(seq, "%pI6 %pI6 %-3hd", 483 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, 484 mfc->_c.mfc_parent); 485 486 if (it->cache != &mrt->mfc_unres_queue) { 487 seq_printf(seq, " %8lu %8lu %8lu", 488 mfc->_c.mfc_un.res.pkt, 489 mfc->_c.mfc_un.res.bytes, 490 mfc->_c.mfc_un.res.wrong_if); 491 for (n = mfc->_c.mfc_un.res.minvif; 492 n < mfc->_c.mfc_un.res.maxvif; n++) { 493 if (VIF_EXISTS(mrt, n) && 494 mfc->_c.mfc_un.res.ttls[n] < 255) 495 seq_printf(seq, 496 " %2d:%-3d", n, 497 mfc->_c.mfc_un.res.ttls[n]); 498 } 499 } else { 500 /* unresolved mfc_caches don't contain 501 * pkt, bytes and wrong_if values 502 */ 503 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); 504 } 505 seq_putc(seq, '\n'); 506 } 507 return 0; 508 } 509 510 static const struct seq_operations ipmr_mfc_seq_ops = { 511 .start = ipmr_mfc_seq_start, 512 .next = mr_mfc_seq_next, 513 .stop = mr_mfc_seq_stop, 514 .show = ipmr_mfc_seq_show, 515 }; 516 #endif 517 518 #ifdef CONFIG_IPV6_PIMSM_V2 519 520 static int pim6_rcv(struct sk_buff *skb) 521 { 522 struct pimreghdr *pim; 523 struct ipv6hdr *encap; 524 struct net_device *reg_dev = NULL; 525 struct net *net = dev_net(skb->dev); 526 struct mr_table *mrt; 527 struct flowi6 fl6 = { 528 .flowi6_iif = skb->dev->ifindex, 529 .flowi6_mark = skb->mark, 530 }; 531 int reg_vif_num; 532 533 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) 534 goto drop; 535 536 pim = (struct pimreghdr *)skb_transport_header(skb); 537 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) || 538 (pim->flags & PIM_NULL_REGISTER) || 539 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 540 sizeof(*pim), IPPROTO_PIM, 541 csum_partial((void *)pim, sizeof(*pim), 0)) && 542 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 543 goto drop; 544 545 /* check if the inner packet is destined to mcast group */ 546 encap = (struct ipv6hdr *)(skb_transport_header(skb) + 547 sizeof(*pim)); 548 549 if (!ipv6_addr_is_multicast(&encap->daddr) || 550 encap->payload_len == 0 || 551 ntohs(encap->payload_len) + sizeof(*pim) > skb->len) 552 goto drop; 553 554 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0) 555 goto drop; 556 reg_vif_num = mrt->mroute_reg_vif_num; 557 558 read_lock(&mrt_lock); 559 if (reg_vif_num >= 0) 560 reg_dev = mrt->vif_table[reg_vif_num].dev; 561 if (reg_dev) 562 dev_hold(reg_dev); 563 read_unlock(&mrt_lock); 564 565 if (!reg_dev) 566 goto drop; 567 568 skb->mac_header = skb->network_header; 569 skb_pull(skb, (u8 *)encap - skb->data); 570 skb_reset_network_header(skb); 571 skb->protocol = htons(ETH_P_IPV6); 572 skb->ip_summed = CHECKSUM_NONE; 573 574 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev)); 575 576 netif_rx(skb); 577 578 dev_put(reg_dev); 579 return 0; 580 drop: 581 kfree_skb(skb); 582 return 0; 583 } 584 585 static const struct inet6_protocol pim6_protocol = { 586 .handler = pim6_rcv, 587 }; 588 589 /* Service routines creating virtual interfaces: PIMREG */ 590 591 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, 592 struct net_device *dev) 593 { 594 struct net *net = dev_net(dev); 595 struct mr_table *mrt; 596 struct flowi6 fl6 = { 597 .flowi6_oif = dev->ifindex, 598 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, 599 .flowi6_mark = skb->mark, 600 }; 601 602 if (!pskb_inet_may_pull(skb)) 603 goto tx_err; 604 605 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0) 606 goto tx_err; 607 608 read_lock(&mrt_lock); 609 dev->stats.tx_bytes += skb->len; 610 dev->stats.tx_packets++; 611 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT); 612 read_unlock(&mrt_lock); 613 kfree_skb(skb); 614 return NETDEV_TX_OK; 615 616 tx_err: 617 dev->stats.tx_errors++; 618 kfree_skb(skb); 619 return NETDEV_TX_OK; 620 } 621 622 static int reg_vif_get_iflink(const struct net_device *dev) 623 { 624 return 0; 625 } 626 627 static const struct net_device_ops reg_vif_netdev_ops = { 628 .ndo_start_xmit = reg_vif_xmit, 629 .ndo_get_iflink = reg_vif_get_iflink, 630 }; 631 632 static void reg_vif_setup(struct net_device *dev) 633 { 634 dev->type = ARPHRD_PIMREG; 635 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 636 dev->flags = IFF_NOARP; 637 dev->netdev_ops = ®_vif_netdev_ops; 638 dev->needs_free_netdev = true; 639 dev->features |= NETIF_F_NETNS_LOCAL; 640 } 641 642 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt) 643 { 644 struct net_device *dev; 645 char name[IFNAMSIZ]; 646 647 if (mrt->id == RT6_TABLE_DFLT) 648 sprintf(name, "pim6reg"); 649 else 650 sprintf(name, "pim6reg%u", mrt->id); 651 652 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); 653 if (!dev) 654 return NULL; 655 656 dev_net_set(dev, net); 657 658 if (register_netdevice(dev)) { 659 free_netdev(dev); 660 return NULL; 661 } 662 663 if (dev_open(dev, NULL)) 664 goto failure; 665 666 dev_hold(dev); 667 return dev; 668 669 failure: 670 unregister_netdevice(dev); 671 return NULL; 672 } 673 #endif 674 675 static int call_ip6mr_vif_entry_notifiers(struct net *net, 676 enum fib_event_type event_type, 677 struct vif_device *vif, 678 mifi_t vif_index, u32 tb_id) 679 { 680 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type, 681 vif, vif_index, tb_id, 682 &net->ipv6.ipmr_seq); 683 } 684 685 static int call_ip6mr_mfc_entry_notifiers(struct net *net, 686 enum fib_event_type event_type, 687 struct mfc6_cache *mfc, u32 tb_id) 688 { 689 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type, 690 &mfc->_c, tb_id, &net->ipv6.ipmr_seq); 691 } 692 693 /* Delete a VIF entry */ 694 static int mif6_delete(struct mr_table *mrt, int vifi, int notify, 695 struct list_head *head) 696 { 697 struct vif_device *v; 698 struct net_device *dev; 699 struct inet6_dev *in6_dev; 700 701 if (vifi < 0 || vifi >= mrt->maxvif) 702 return -EADDRNOTAVAIL; 703 704 v = &mrt->vif_table[vifi]; 705 706 if (VIF_EXISTS(mrt, vifi)) 707 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net), 708 FIB_EVENT_VIF_DEL, v, vifi, 709 mrt->id); 710 711 write_lock_bh(&mrt_lock); 712 dev = v->dev; 713 v->dev = NULL; 714 715 if (!dev) { 716 write_unlock_bh(&mrt_lock); 717 return -EADDRNOTAVAIL; 718 } 719 720 #ifdef CONFIG_IPV6_PIMSM_V2 721 if (vifi == mrt->mroute_reg_vif_num) 722 mrt->mroute_reg_vif_num = -1; 723 #endif 724 725 if (vifi + 1 == mrt->maxvif) { 726 int tmp; 727 for (tmp = vifi - 1; tmp >= 0; tmp--) { 728 if (VIF_EXISTS(mrt, tmp)) 729 break; 730 } 731 mrt->maxvif = tmp + 1; 732 } 733 734 write_unlock_bh(&mrt_lock); 735 736 dev_set_allmulti(dev, -1); 737 738 in6_dev = __in6_dev_get(dev); 739 if (in6_dev) { 740 in6_dev->cnf.mc_forwarding--; 741 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, 742 NETCONFA_MC_FORWARDING, 743 dev->ifindex, &in6_dev->cnf); 744 } 745 746 if ((v->flags & MIFF_REGISTER) && !notify) 747 unregister_netdevice_queue(dev, head); 748 749 dev_put(dev); 750 return 0; 751 } 752 753 static inline void ip6mr_cache_free_rcu(struct rcu_head *head) 754 { 755 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu); 756 757 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c); 758 } 759 760 static inline void ip6mr_cache_free(struct mfc6_cache *c) 761 { 762 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu); 763 } 764 765 /* Destroy an unresolved cache entry, killing queued skbs 766 and reporting error to netlink readers. 767 */ 768 769 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c) 770 { 771 struct net *net = read_pnet(&mrt->net); 772 struct sk_buff *skb; 773 774 atomic_dec(&mrt->cache_resolve_queue_len); 775 776 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) { 777 if (ipv6_hdr(skb)->version == 0) { 778 struct nlmsghdr *nlh = skb_pull(skb, 779 sizeof(struct ipv6hdr)); 780 nlh->nlmsg_type = NLMSG_ERROR; 781 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); 782 skb_trim(skb, nlh->nlmsg_len); 783 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT; 784 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 785 } else 786 kfree_skb(skb); 787 } 788 789 ip6mr_cache_free(c); 790 } 791 792 793 /* Timer process for all the unresolved queue. */ 794 795 static void ipmr_do_expire_process(struct mr_table *mrt) 796 { 797 unsigned long now = jiffies; 798 unsigned long expires = 10 * HZ; 799 struct mr_mfc *c, *next; 800 801 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 802 if (time_after(c->mfc_un.unres.expires, now)) { 803 /* not yet... */ 804 unsigned long interval = c->mfc_un.unres.expires - now; 805 if (interval < expires) 806 expires = interval; 807 continue; 808 } 809 810 list_del(&c->list); 811 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); 812 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); 813 } 814 815 if (!list_empty(&mrt->mfc_unres_queue)) 816 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); 817 } 818 819 static void ipmr_expire_process(struct timer_list *t) 820 { 821 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer); 822 823 if (!spin_trylock(&mfc_unres_lock)) { 824 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1); 825 return; 826 } 827 828 if (!list_empty(&mrt->mfc_unres_queue)) 829 ipmr_do_expire_process(mrt); 830 831 spin_unlock(&mfc_unres_lock); 832 } 833 834 /* Fill oifs list. It is called under write locked mrt_lock. */ 835 836 static void ip6mr_update_thresholds(struct mr_table *mrt, 837 struct mr_mfc *cache, 838 unsigned char *ttls) 839 { 840 int vifi; 841 842 cache->mfc_un.res.minvif = MAXMIFS; 843 cache->mfc_un.res.maxvif = 0; 844 memset(cache->mfc_un.res.ttls, 255, MAXMIFS); 845 846 for (vifi = 0; vifi < mrt->maxvif; vifi++) { 847 if (VIF_EXISTS(mrt, vifi) && 848 ttls[vifi] && ttls[vifi] < 255) { 849 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 850 if (cache->mfc_un.res.minvif > vifi) 851 cache->mfc_un.res.minvif = vifi; 852 if (cache->mfc_un.res.maxvif <= vifi) 853 cache->mfc_un.res.maxvif = vifi + 1; 854 } 855 } 856 cache->mfc_un.res.lastuse = jiffies; 857 } 858 859 static int mif6_add(struct net *net, struct mr_table *mrt, 860 struct mif6ctl *vifc, int mrtsock) 861 { 862 int vifi = vifc->mif6c_mifi; 863 struct vif_device *v = &mrt->vif_table[vifi]; 864 struct net_device *dev; 865 struct inet6_dev *in6_dev; 866 int err; 867 868 /* Is vif busy ? */ 869 if (VIF_EXISTS(mrt, vifi)) 870 return -EADDRINUSE; 871 872 switch (vifc->mif6c_flags) { 873 #ifdef CONFIG_IPV6_PIMSM_V2 874 case MIFF_REGISTER: 875 /* 876 * Special Purpose VIF in PIM 877 * All the packets will be sent to the daemon 878 */ 879 if (mrt->mroute_reg_vif_num >= 0) 880 return -EADDRINUSE; 881 dev = ip6mr_reg_vif(net, mrt); 882 if (!dev) 883 return -ENOBUFS; 884 err = dev_set_allmulti(dev, 1); 885 if (err) { 886 unregister_netdevice(dev); 887 dev_put(dev); 888 return err; 889 } 890 break; 891 #endif 892 case 0: 893 dev = dev_get_by_index(net, vifc->mif6c_pifi); 894 if (!dev) 895 return -EADDRNOTAVAIL; 896 err = dev_set_allmulti(dev, 1); 897 if (err) { 898 dev_put(dev); 899 return err; 900 } 901 break; 902 default: 903 return -EINVAL; 904 } 905 906 in6_dev = __in6_dev_get(dev); 907 if (in6_dev) { 908 in6_dev->cnf.mc_forwarding++; 909 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, 910 NETCONFA_MC_FORWARDING, 911 dev->ifindex, &in6_dev->cnf); 912 } 913 914 /* Fill in the VIF structures */ 915 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold, 916 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0), 917 MIFF_REGISTER); 918 919 /* And finish update writing critical data */ 920 write_lock_bh(&mrt_lock); 921 v->dev = dev; 922 #ifdef CONFIG_IPV6_PIMSM_V2 923 if (v->flags & MIFF_REGISTER) 924 mrt->mroute_reg_vif_num = vifi; 925 #endif 926 if (vifi + 1 > mrt->maxvif) 927 mrt->maxvif = vifi + 1; 928 write_unlock_bh(&mrt_lock); 929 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, 930 v, vifi, mrt->id); 931 return 0; 932 } 933 934 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt, 935 const struct in6_addr *origin, 936 const struct in6_addr *mcastgrp) 937 { 938 struct mfc6_cache_cmp_arg arg = { 939 .mf6c_origin = *origin, 940 .mf6c_mcastgrp = *mcastgrp, 941 }; 942 943 return mr_mfc_find(mrt, &arg); 944 } 945 946 /* Look for a (*,G) entry */ 947 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt, 948 struct in6_addr *mcastgrp, 949 mifi_t mifi) 950 { 951 struct mfc6_cache_cmp_arg arg = { 952 .mf6c_origin = in6addr_any, 953 .mf6c_mcastgrp = *mcastgrp, 954 }; 955 956 if (ipv6_addr_any(mcastgrp)) 957 return mr_mfc_find_any_parent(mrt, mifi); 958 return mr_mfc_find_any(mrt, mifi, &arg); 959 } 960 961 /* Look for a (S,G,iif) entry if parent != -1 */ 962 static struct mfc6_cache * 963 ip6mr_cache_find_parent(struct mr_table *mrt, 964 const struct in6_addr *origin, 965 const struct in6_addr *mcastgrp, 966 int parent) 967 { 968 struct mfc6_cache_cmp_arg arg = { 969 .mf6c_origin = *origin, 970 .mf6c_mcastgrp = *mcastgrp, 971 }; 972 973 return mr_mfc_find_parent(mrt, &arg, parent); 974 } 975 976 /* Allocate a multicast cache entry */ 977 static struct mfc6_cache *ip6mr_cache_alloc(void) 978 { 979 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 980 if (!c) 981 return NULL; 982 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; 983 c->_c.mfc_un.res.minvif = MAXMIFS; 984 c->_c.free = ip6mr_cache_free_rcu; 985 refcount_set(&c->_c.mfc_un.res.refcount, 1); 986 return c; 987 } 988 989 static struct mfc6_cache *ip6mr_cache_alloc_unres(void) 990 { 991 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 992 if (!c) 993 return NULL; 994 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved); 995 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ; 996 return c; 997 } 998 999 /* 1000 * A cache entry has gone into a resolved state from queued 1001 */ 1002 1003 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt, 1004 struct mfc6_cache *uc, struct mfc6_cache *c) 1005 { 1006 struct sk_buff *skb; 1007 1008 /* 1009 * Play the pending entries through our router 1010 */ 1011 1012 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) { 1013 if (ipv6_hdr(skb)->version == 0) { 1014 struct nlmsghdr *nlh = skb_pull(skb, 1015 sizeof(struct ipv6hdr)); 1016 1017 if (mr_fill_mroute(mrt, skb, &c->_c, 1018 nlmsg_data(nlh)) > 0) { 1019 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; 1020 } else { 1021 nlh->nlmsg_type = NLMSG_ERROR; 1022 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); 1023 skb_trim(skb, nlh->nlmsg_len); 1024 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE; 1025 } 1026 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 1027 } else 1028 ip6_mr_forward(net, mrt, skb->dev, skb, c); 1029 } 1030 } 1031 1032 /* 1033 * Bounce a cache query up to pim6sd and netlink. 1034 * 1035 * Called under mrt_lock. 1036 */ 1037 1038 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, 1039 mifi_t mifi, int assert) 1040 { 1041 struct sock *mroute6_sk; 1042 struct sk_buff *skb; 1043 struct mrt6msg *msg; 1044 int ret; 1045 1046 #ifdef CONFIG_IPV6_PIMSM_V2 1047 if (assert == MRT6MSG_WHOLEPKT) 1048 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt) 1049 +sizeof(*msg)); 1050 else 1051 #endif 1052 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC); 1053 1054 if (!skb) 1055 return -ENOBUFS; 1056 1057 /* I suppose that internal messages 1058 * do not require checksums */ 1059 1060 skb->ip_summed = CHECKSUM_UNNECESSARY; 1061 1062 #ifdef CONFIG_IPV6_PIMSM_V2 1063 if (assert == MRT6MSG_WHOLEPKT) { 1064 /* Ugly, but we have no choice with this interface. 1065 Duplicate old header, fix length etc. 1066 And all this only to mangle msg->im6_msgtype and 1067 to set msg->im6_mbz to "mbz" :-) 1068 */ 1069 skb_push(skb, -skb_network_offset(pkt)); 1070 1071 skb_push(skb, sizeof(*msg)); 1072 skb_reset_transport_header(skb); 1073 msg = (struct mrt6msg *)skb_transport_header(skb); 1074 msg->im6_mbz = 0; 1075 msg->im6_msgtype = MRT6MSG_WHOLEPKT; 1076 msg->im6_mif = mrt->mroute_reg_vif_num; 1077 msg->im6_pad = 0; 1078 msg->im6_src = ipv6_hdr(pkt)->saddr; 1079 msg->im6_dst = ipv6_hdr(pkt)->daddr; 1080 1081 skb->ip_summed = CHECKSUM_UNNECESSARY; 1082 } else 1083 #endif 1084 { 1085 /* 1086 * Copy the IP header 1087 */ 1088 1089 skb_put(skb, sizeof(struct ipv6hdr)); 1090 skb_reset_network_header(skb); 1091 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr)); 1092 1093 /* 1094 * Add our header 1095 */ 1096 skb_put(skb, sizeof(*msg)); 1097 skb_reset_transport_header(skb); 1098 msg = (struct mrt6msg *)skb_transport_header(skb); 1099 1100 msg->im6_mbz = 0; 1101 msg->im6_msgtype = assert; 1102 msg->im6_mif = mifi; 1103 msg->im6_pad = 0; 1104 msg->im6_src = ipv6_hdr(pkt)->saddr; 1105 msg->im6_dst = ipv6_hdr(pkt)->daddr; 1106 1107 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 1108 skb->ip_summed = CHECKSUM_UNNECESSARY; 1109 } 1110 1111 rcu_read_lock(); 1112 mroute6_sk = rcu_dereference(mrt->mroute_sk); 1113 if (!mroute6_sk) { 1114 rcu_read_unlock(); 1115 kfree_skb(skb); 1116 return -EINVAL; 1117 } 1118 1119 mrt6msg_netlink_event(mrt, skb); 1120 1121 /* Deliver to user space multicast routing algorithms */ 1122 ret = sock_queue_rcv_skb(mroute6_sk, skb); 1123 rcu_read_unlock(); 1124 if (ret < 0) { 1125 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n"); 1126 kfree_skb(skb); 1127 } 1128 1129 return ret; 1130 } 1131 1132 /* Queue a packet for resolution. It gets locked cache entry! */ 1133 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi, 1134 struct sk_buff *skb, struct net_device *dev) 1135 { 1136 struct mfc6_cache *c; 1137 bool found = false; 1138 int err; 1139 1140 spin_lock_bh(&mfc_unres_lock); 1141 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) { 1142 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && 1143 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { 1144 found = true; 1145 break; 1146 } 1147 } 1148 1149 if (!found) { 1150 /* 1151 * Create a new entry if allowable 1152 */ 1153 1154 c = ip6mr_cache_alloc_unres(); 1155 if (!c) { 1156 spin_unlock_bh(&mfc_unres_lock); 1157 1158 kfree_skb(skb); 1159 return -ENOBUFS; 1160 } 1161 1162 /* Fill in the new cache entry */ 1163 c->_c.mfc_parent = -1; 1164 c->mf6c_origin = ipv6_hdr(skb)->saddr; 1165 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr; 1166 1167 /* 1168 * Reflect first query at pim6sd 1169 */ 1170 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE); 1171 if (err < 0) { 1172 /* If the report failed throw the cache entry 1173 out - Brad Parker 1174 */ 1175 spin_unlock_bh(&mfc_unres_lock); 1176 1177 ip6mr_cache_free(c); 1178 kfree_skb(skb); 1179 return err; 1180 } 1181 1182 atomic_inc(&mrt->cache_resolve_queue_len); 1183 list_add(&c->_c.list, &mrt->mfc_unres_queue); 1184 mr6_netlink_event(mrt, c, RTM_NEWROUTE); 1185 1186 ipmr_do_expire_process(mrt); 1187 } 1188 1189 /* See if we can append the packet */ 1190 if (c->_c.mfc_un.unres.unresolved.qlen > 3) { 1191 kfree_skb(skb); 1192 err = -ENOBUFS; 1193 } else { 1194 if (dev) { 1195 skb->dev = dev; 1196 skb->skb_iif = dev->ifindex; 1197 } 1198 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); 1199 err = 0; 1200 } 1201 1202 spin_unlock_bh(&mfc_unres_lock); 1203 return err; 1204 } 1205 1206 /* 1207 * MFC6 cache manipulation by user space 1208 */ 1209 1210 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc, 1211 int parent) 1212 { 1213 struct mfc6_cache *c; 1214 1215 /* The entries are added/deleted only under RTNL */ 1216 rcu_read_lock(); 1217 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr, 1218 &mfc->mf6cc_mcastgrp.sin6_addr, parent); 1219 rcu_read_unlock(); 1220 if (!c) 1221 return -ENOENT; 1222 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params); 1223 list_del_rcu(&c->_c.list); 1224 1225 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), 1226 FIB_EVENT_ENTRY_DEL, c, mrt->id); 1227 mr6_netlink_event(mrt, c, RTM_DELROUTE); 1228 mr_cache_put(&c->_c); 1229 return 0; 1230 } 1231 1232 static int ip6mr_device_event(struct notifier_block *this, 1233 unsigned long event, void *ptr) 1234 { 1235 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1236 struct net *net = dev_net(dev); 1237 struct mr_table *mrt; 1238 struct vif_device *v; 1239 int ct; 1240 1241 if (event != NETDEV_UNREGISTER) 1242 return NOTIFY_DONE; 1243 1244 ip6mr_for_each_table(mrt, net) { 1245 v = &mrt->vif_table[0]; 1246 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1247 if (v->dev == dev) 1248 mif6_delete(mrt, ct, 1, NULL); 1249 } 1250 } 1251 1252 return NOTIFY_DONE; 1253 } 1254 1255 static unsigned int ip6mr_seq_read(struct net *net) 1256 { 1257 ASSERT_RTNL(); 1258 1259 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net); 1260 } 1261 1262 static int ip6mr_dump(struct net *net, struct notifier_block *nb, 1263 struct netlink_ext_ack *extack) 1264 { 1265 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump, 1266 ip6mr_mr_table_iter, &mrt_lock, extack); 1267 } 1268 1269 static struct notifier_block ip6_mr_notifier = { 1270 .notifier_call = ip6mr_device_event 1271 }; 1272 1273 static const struct fib_notifier_ops ip6mr_notifier_ops_template = { 1274 .family = RTNL_FAMILY_IP6MR, 1275 .fib_seq_read = ip6mr_seq_read, 1276 .fib_dump = ip6mr_dump, 1277 .owner = THIS_MODULE, 1278 }; 1279 1280 static int __net_init ip6mr_notifier_init(struct net *net) 1281 { 1282 struct fib_notifier_ops *ops; 1283 1284 net->ipv6.ipmr_seq = 0; 1285 1286 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net); 1287 if (IS_ERR(ops)) 1288 return PTR_ERR(ops); 1289 1290 net->ipv6.ip6mr_notifier_ops = ops; 1291 1292 return 0; 1293 } 1294 1295 static void __net_exit ip6mr_notifier_exit(struct net *net) 1296 { 1297 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops); 1298 net->ipv6.ip6mr_notifier_ops = NULL; 1299 } 1300 1301 /* Setup for IP multicast routing */ 1302 static int __net_init ip6mr_net_init(struct net *net) 1303 { 1304 int err; 1305 1306 err = ip6mr_notifier_init(net); 1307 if (err) 1308 return err; 1309 1310 err = ip6mr_rules_init(net); 1311 if (err < 0) 1312 goto ip6mr_rules_fail; 1313 1314 #ifdef CONFIG_PROC_FS 1315 err = -ENOMEM; 1316 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops, 1317 sizeof(struct mr_vif_iter))) 1318 goto proc_vif_fail; 1319 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops, 1320 sizeof(struct mr_mfc_iter))) 1321 goto proc_cache_fail; 1322 #endif 1323 1324 return 0; 1325 1326 #ifdef CONFIG_PROC_FS 1327 proc_cache_fail: 1328 remove_proc_entry("ip6_mr_vif", net->proc_net); 1329 proc_vif_fail: 1330 ip6mr_rules_exit(net); 1331 #endif 1332 ip6mr_rules_fail: 1333 ip6mr_notifier_exit(net); 1334 return err; 1335 } 1336 1337 static void __net_exit ip6mr_net_exit(struct net *net) 1338 { 1339 #ifdef CONFIG_PROC_FS 1340 remove_proc_entry("ip6_mr_cache", net->proc_net); 1341 remove_proc_entry("ip6_mr_vif", net->proc_net); 1342 #endif 1343 ip6mr_rules_exit(net); 1344 ip6mr_notifier_exit(net); 1345 } 1346 1347 static struct pernet_operations ip6mr_net_ops = { 1348 .init = ip6mr_net_init, 1349 .exit = ip6mr_net_exit, 1350 }; 1351 1352 int __init ip6_mr_init(void) 1353 { 1354 int err; 1355 1356 mrt_cachep = kmem_cache_create("ip6_mrt_cache", 1357 sizeof(struct mfc6_cache), 1358 0, SLAB_HWCACHE_ALIGN, 1359 NULL); 1360 if (!mrt_cachep) 1361 return -ENOMEM; 1362 1363 err = register_pernet_subsys(&ip6mr_net_ops); 1364 if (err) 1365 goto reg_pernet_fail; 1366 1367 err = register_netdevice_notifier(&ip6_mr_notifier); 1368 if (err) 1369 goto reg_notif_fail; 1370 #ifdef CONFIG_IPV6_PIMSM_V2 1371 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) { 1372 pr_err("%s: can't add PIM protocol\n", __func__); 1373 err = -EAGAIN; 1374 goto add_proto_fail; 1375 } 1376 #endif 1377 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE, 1378 NULL, ip6mr_rtm_dumproute, 0); 1379 if (err == 0) 1380 return 0; 1381 1382 #ifdef CONFIG_IPV6_PIMSM_V2 1383 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); 1384 add_proto_fail: 1385 unregister_netdevice_notifier(&ip6_mr_notifier); 1386 #endif 1387 reg_notif_fail: 1388 unregister_pernet_subsys(&ip6mr_net_ops); 1389 reg_pernet_fail: 1390 kmem_cache_destroy(mrt_cachep); 1391 return err; 1392 } 1393 1394 void ip6_mr_cleanup(void) 1395 { 1396 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); 1397 #ifdef CONFIG_IPV6_PIMSM_V2 1398 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); 1399 #endif 1400 unregister_netdevice_notifier(&ip6_mr_notifier); 1401 unregister_pernet_subsys(&ip6mr_net_ops); 1402 kmem_cache_destroy(mrt_cachep); 1403 } 1404 1405 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt, 1406 struct mf6cctl *mfc, int mrtsock, int parent) 1407 { 1408 unsigned char ttls[MAXMIFS]; 1409 struct mfc6_cache *uc, *c; 1410 struct mr_mfc *_uc; 1411 bool found; 1412 int i, err; 1413 1414 if (mfc->mf6cc_parent >= MAXMIFS) 1415 return -ENFILE; 1416 1417 memset(ttls, 255, MAXMIFS); 1418 for (i = 0; i < MAXMIFS; i++) { 1419 if (IF_ISSET(i, &mfc->mf6cc_ifset)) 1420 ttls[i] = 1; 1421 } 1422 1423 /* The entries are added/deleted only under RTNL */ 1424 rcu_read_lock(); 1425 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr, 1426 &mfc->mf6cc_mcastgrp.sin6_addr, parent); 1427 rcu_read_unlock(); 1428 if (c) { 1429 write_lock_bh(&mrt_lock); 1430 c->_c.mfc_parent = mfc->mf6cc_parent; 1431 ip6mr_update_thresholds(mrt, &c->_c, ttls); 1432 if (!mrtsock) 1433 c->_c.mfc_flags |= MFC_STATIC; 1434 write_unlock_bh(&mrt_lock); 1435 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, 1436 c, mrt->id); 1437 mr6_netlink_event(mrt, c, RTM_NEWROUTE); 1438 return 0; 1439 } 1440 1441 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) && 1442 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) 1443 return -EINVAL; 1444 1445 c = ip6mr_cache_alloc(); 1446 if (!c) 1447 return -ENOMEM; 1448 1449 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; 1450 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; 1451 c->_c.mfc_parent = mfc->mf6cc_parent; 1452 ip6mr_update_thresholds(mrt, &c->_c, ttls); 1453 if (!mrtsock) 1454 c->_c.mfc_flags |= MFC_STATIC; 1455 1456 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode, 1457 ip6mr_rht_params); 1458 if (err) { 1459 pr_err("ip6mr: rhtable insert error %d\n", err); 1460 ip6mr_cache_free(c); 1461 return err; 1462 } 1463 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list); 1464 1465 /* Check to see if we resolved a queued list. If so we 1466 * need to send on the frames and tidy up. 1467 */ 1468 found = false; 1469 spin_lock_bh(&mfc_unres_lock); 1470 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) { 1471 uc = (struct mfc6_cache *)_uc; 1472 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && 1473 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { 1474 list_del(&_uc->list); 1475 atomic_dec(&mrt->cache_resolve_queue_len); 1476 found = true; 1477 break; 1478 } 1479 } 1480 if (list_empty(&mrt->mfc_unres_queue)) 1481 del_timer(&mrt->ipmr_expire_timer); 1482 spin_unlock_bh(&mfc_unres_lock); 1483 1484 if (found) { 1485 ip6mr_cache_resolve(net, mrt, uc, c); 1486 ip6mr_cache_free(uc); 1487 } 1488 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, 1489 c, mrt->id); 1490 mr6_netlink_event(mrt, c, RTM_NEWROUTE); 1491 return 0; 1492 } 1493 1494 /* 1495 * Close the multicast socket, and clear the vif tables etc 1496 */ 1497 1498 static void mroute_clean_tables(struct mr_table *mrt, int flags) 1499 { 1500 struct mr_mfc *c, *tmp; 1501 LIST_HEAD(list); 1502 int i; 1503 1504 /* Shut down all active vif entries */ 1505 if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) { 1506 for (i = 0; i < mrt->maxvif; i++) { 1507 if (((mrt->vif_table[i].flags & VIFF_STATIC) && 1508 !(flags & MRT6_FLUSH_MIFS_STATIC)) || 1509 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS))) 1510 continue; 1511 mif6_delete(mrt, i, 0, &list); 1512 } 1513 unregister_netdevice_many(&list); 1514 } 1515 1516 /* Wipe the cache */ 1517 if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) { 1518 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) { 1519 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) || 1520 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC))) 1521 continue; 1522 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); 1523 list_del_rcu(&c->list); 1524 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), 1525 FIB_EVENT_ENTRY_DEL, 1526 (struct mfc6_cache *)c, mrt->id); 1527 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); 1528 mr_cache_put(c); 1529 } 1530 } 1531 1532 if (flags & MRT6_FLUSH_MFC) { 1533 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { 1534 spin_lock_bh(&mfc_unres_lock); 1535 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { 1536 list_del(&c->list); 1537 mr6_netlink_event(mrt, (struct mfc6_cache *)c, 1538 RTM_DELROUTE); 1539 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); 1540 } 1541 spin_unlock_bh(&mfc_unres_lock); 1542 } 1543 } 1544 } 1545 1546 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk) 1547 { 1548 int err = 0; 1549 struct net *net = sock_net(sk); 1550 1551 rtnl_lock(); 1552 write_lock_bh(&mrt_lock); 1553 if (rtnl_dereference(mrt->mroute_sk)) { 1554 err = -EADDRINUSE; 1555 } else { 1556 rcu_assign_pointer(mrt->mroute_sk, sk); 1557 sock_set_flag(sk, SOCK_RCU_FREE); 1558 net->ipv6.devconf_all->mc_forwarding++; 1559 } 1560 write_unlock_bh(&mrt_lock); 1561 1562 if (!err) 1563 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 1564 NETCONFA_MC_FORWARDING, 1565 NETCONFA_IFINDEX_ALL, 1566 net->ipv6.devconf_all); 1567 rtnl_unlock(); 1568 1569 return err; 1570 } 1571 1572 int ip6mr_sk_done(struct sock *sk) 1573 { 1574 int err = -EACCES; 1575 struct net *net = sock_net(sk); 1576 struct mr_table *mrt; 1577 1578 if (sk->sk_type != SOCK_RAW || 1579 inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1580 return err; 1581 1582 rtnl_lock(); 1583 ip6mr_for_each_table(mrt, net) { 1584 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1585 write_lock_bh(&mrt_lock); 1586 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1587 /* Note that mroute_sk had SOCK_RCU_FREE set, 1588 * so the RCU grace period before sk freeing 1589 * is guaranteed by sk_destruct() 1590 */ 1591 net->ipv6.devconf_all->mc_forwarding--; 1592 write_unlock_bh(&mrt_lock); 1593 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 1594 NETCONFA_MC_FORWARDING, 1595 NETCONFA_IFINDEX_ALL, 1596 net->ipv6.devconf_all); 1597 1598 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC); 1599 err = 0; 1600 break; 1601 } 1602 } 1603 rtnl_unlock(); 1604 1605 return err; 1606 } 1607 1608 bool mroute6_is_socket(struct net *net, struct sk_buff *skb) 1609 { 1610 struct mr_table *mrt; 1611 struct flowi6 fl6 = { 1612 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, 1613 .flowi6_oif = skb->dev->ifindex, 1614 .flowi6_mark = skb->mark, 1615 }; 1616 1617 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0) 1618 return NULL; 1619 1620 return rcu_access_pointer(mrt->mroute_sk); 1621 } 1622 EXPORT_SYMBOL(mroute6_is_socket); 1623 1624 /* 1625 * Socket options and virtual interface manipulation. The whole 1626 * virtual interface system is a complete heap, but unfortunately 1627 * that's how BSD mrouted happens to think. Maybe one day with a proper 1628 * MOSPF/PIM router set up we can clean this up. 1629 */ 1630 1631 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) 1632 { 1633 int ret, parent = 0; 1634 struct mif6ctl vif; 1635 struct mf6cctl mfc; 1636 mifi_t mifi; 1637 struct net *net = sock_net(sk); 1638 struct mr_table *mrt; 1639 1640 if (sk->sk_type != SOCK_RAW || 1641 inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1642 return -EOPNOTSUPP; 1643 1644 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1645 if (!mrt) 1646 return -ENOENT; 1647 1648 if (optname != MRT6_INIT) { 1649 if (sk != rcu_access_pointer(mrt->mroute_sk) && 1650 !ns_capable(net->user_ns, CAP_NET_ADMIN)) 1651 return -EACCES; 1652 } 1653 1654 switch (optname) { 1655 case MRT6_INIT: 1656 if (optlen < sizeof(int)) 1657 return -EINVAL; 1658 1659 return ip6mr_sk_init(mrt, sk); 1660 1661 case MRT6_DONE: 1662 return ip6mr_sk_done(sk); 1663 1664 case MRT6_ADD_MIF: 1665 if (optlen < sizeof(vif)) 1666 return -EINVAL; 1667 if (copy_from_user(&vif, optval, sizeof(vif))) 1668 return -EFAULT; 1669 if (vif.mif6c_mifi >= MAXMIFS) 1670 return -ENFILE; 1671 rtnl_lock(); 1672 ret = mif6_add(net, mrt, &vif, 1673 sk == rtnl_dereference(mrt->mroute_sk)); 1674 rtnl_unlock(); 1675 return ret; 1676 1677 case MRT6_DEL_MIF: 1678 if (optlen < sizeof(mifi_t)) 1679 return -EINVAL; 1680 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1681 return -EFAULT; 1682 rtnl_lock(); 1683 ret = mif6_delete(mrt, mifi, 0, NULL); 1684 rtnl_unlock(); 1685 return ret; 1686 1687 /* 1688 * Manipulate the forwarding caches. These live 1689 * in a sort of kernel/user symbiosis. 1690 */ 1691 case MRT6_ADD_MFC: 1692 case MRT6_DEL_MFC: 1693 parent = -1; 1694 fallthrough; 1695 case MRT6_ADD_MFC_PROXY: 1696 case MRT6_DEL_MFC_PROXY: 1697 if (optlen < sizeof(mfc)) 1698 return -EINVAL; 1699 if (copy_from_user(&mfc, optval, sizeof(mfc))) 1700 return -EFAULT; 1701 if (parent == 0) 1702 parent = mfc.mf6cc_parent; 1703 rtnl_lock(); 1704 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY) 1705 ret = ip6mr_mfc_delete(mrt, &mfc, parent); 1706 else 1707 ret = ip6mr_mfc_add(net, mrt, &mfc, 1708 sk == 1709 rtnl_dereference(mrt->mroute_sk), 1710 parent); 1711 rtnl_unlock(); 1712 return ret; 1713 1714 case MRT6_FLUSH: 1715 { 1716 int flags; 1717 1718 if (optlen != sizeof(flags)) 1719 return -EINVAL; 1720 if (get_user(flags, (int __user *)optval)) 1721 return -EFAULT; 1722 rtnl_lock(); 1723 mroute_clean_tables(mrt, flags); 1724 rtnl_unlock(); 1725 return 0; 1726 } 1727 1728 /* 1729 * Control PIM assert (to activate pim will activate assert) 1730 */ 1731 case MRT6_ASSERT: 1732 { 1733 int v; 1734 1735 if (optlen != sizeof(v)) 1736 return -EINVAL; 1737 if (get_user(v, (int __user *)optval)) 1738 return -EFAULT; 1739 mrt->mroute_do_assert = v; 1740 return 0; 1741 } 1742 1743 #ifdef CONFIG_IPV6_PIMSM_V2 1744 case MRT6_PIM: 1745 { 1746 int v; 1747 1748 if (optlen != sizeof(v)) 1749 return -EINVAL; 1750 if (get_user(v, (int __user *)optval)) 1751 return -EFAULT; 1752 v = !!v; 1753 rtnl_lock(); 1754 ret = 0; 1755 if (v != mrt->mroute_do_pim) { 1756 mrt->mroute_do_pim = v; 1757 mrt->mroute_do_assert = v; 1758 } 1759 rtnl_unlock(); 1760 return ret; 1761 } 1762 1763 #endif 1764 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 1765 case MRT6_TABLE: 1766 { 1767 u32 v; 1768 1769 if (optlen != sizeof(u32)) 1770 return -EINVAL; 1771 if (get_user(v, (u32 __user *)optval)) 1772 return -EFAULT; 1773 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */ 1774 if (v != RT_TABLE_DEFAULT && v >= 100000000) 1775 return -EINVAL; 1776 if (sk == rcu_access_pointer(mrt->mroute_sk)) 1777 return -EBUSY; 1778 1779 rtnl_lock(); 1780 ret = 0; 1781 mrt = ip6mr_new_table(net, v); 1782 if (IS_ERR(mrt)) 1783 ret = PTR_ERR(mrt); 1784 else 1785 raw6_sk(sk)->ip6mr_table = v; 1786 rtnl_unlock(); 1787 return ret; 1788 } 1789 #endif 1790 /* 1791 * Spurious command, or MRT6_VERSION which you cannot 1792 * set. 1793 */ 1794 default: 1795 return -ENOPROTOOPT; 1796 } 1797 } 1798 1799 /* 1800 * Getsock opt support for the multicast routing system. 1801 */ 1802 1803 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, 1804 int __user *optlen) 1805 { 1806 int olr; 1807 int val; 1808 struct net *net = sock_net(sk); 1809 struct mr_table *mrt; 1810 1811 if (sk->sk_type != SOCK_RAW || 1812 inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1813 return -EOPNOTSUPP; 1814 1815 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1816 if (!mrt) 1817 return -ENOENT; 1818 1819 switch (optname) { 1820 case MRT6_VERSION: 1821 val = 0x0305; 1822 break; 1823 #ifdef CONFIG_IPV6_PIMSM_V2 1824 case MRT6_PIM: 1825 val = mrt->mroute_do_pim; 1826 break; 1827 #endif 1828 case MRT6_ASSERT: 1829 val = mrt->mroute_do_assert; 1830 break; 1831 default: 1832 return -ENOPROTOOPT; 1833 } 1834 1835 if (get_user(olr, optlen)) 1836 return -EFAULT; 1837 1838 olr = min_t(int, olr, sizeof(int)); 1839 if (olr < 0) 1840 return -EINVAL; 1841 1842 if (put_user(olr, optlen)) 1843 return -EFAULT; 1844 if (copy_to_user(optval, &val, olr)) 1845 return -EFAULT; 1846 return 0; 1847 } 1848 1849 /* 1850 * The IP multicast ioctl support routines. 1851 */ 1852 1853 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) 1854 { 1855 struct sioc_sg_req6 sr; 1856 struct sioc_mif_req6 vr; 1857 struct vif_device *vif; 1858 struct mfc6_cache *c; 1859 struct net *net = sock_net(sk); 1860 struct mr_table *mrt; 1861 1862 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1863 if (!mrt) 1864 return -ENOENT; 1865 1866 switch (cmd) { 1867 case SIOCGETMIFCNT_IN6: 1868 if (copy_from_user(&vr, arg, sizeof(vr))) 1869 return -EFAULT; 1870 if (vr.mifi >= mrt->maxvif) 1871 return -EINVAL; 1872 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif); 1873 read_lock(&mrt_lock); 1874 vif = &mrt->vif_table[vr.mifi]; 1875 if (VIF_EXISTS(mrt, vr.mifi)) { 1876 vr.icount = vif->pkt_in; 1877 vr.ocount = vif->pkt_out; 1878 vr.ibytes = vif->bytes_in; 1879 vr.obytes = vif->bytes_out; 1880 read_unlock(&mrt_lock); 1881 1882 if (copy_to_user(arg, &vr, sizeof(vr))) 1883 return -EFAULT; 1884 return 0; 1885 } 1886 read_unlock(&mrt_lock); 1887 return -EADDRNOTAVAIL; 1888 case SIOCGETSGCNT_IN6: 1889 if (copy_from_user(&sr, arg, sizeof(sr))) 1890 return -EFAULT; 1891 1892 rcu_read_lock(); 1893 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); 1894 if (c) { 1895 sr.pktcnt = c->_c.mfc_un.res.pkt; 1896 sr.bytecnt = c->_c.mfc_un.res.bytes; 1897 sr.wrong_if = c->_c.mfc_un.res.wrong_if; 1898 rcu_read_unlock(); 1899 1900 if (copy_to_user(arg, &sr, sizeof(sr))) 1901 return -EFAULT; 1902 return 0; 1903 } 1904 rcu_read_unlock(); 1905 return -EADDRNOTAVAIL; 1906 default: 1907 return -ENOIOCTLCMD; 1908 } 1909 } 1910 1911 #ifdef CONFIG_COMPAT 1912 struct compat_sioc_sg_req6 { 1913 struct sockaddr_in6 src; 1914 struct sockaddr_in6 grp; 1915 compat_ulong_t pktcnt; 1916 compat_ulong_t bytecnt; 1917 compat_ulong_t wrong_if; 1918 }; 1919 1920 struct compat_sioc_mif_req6 { 1921 mifi_t mifi; 1922 compat_ulong_t icount; 1923 compat_ulong_t ocount; 1924 compat_ulong_t ibytes; 1925 compat_ulong_t obytes; 1926 }; 1927 1928 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 1929 { 1930 struct compat_sioc_sg_req6 sr; 1931 struct compat_sioc_mif_req6 vr; 1932 struct vif_device *vif; 1933 struct mfc6_cache *c; 1934 struct net *net = sock_net(sk); 1935 struct mr_table *mrt; 1936 1937 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); 1938 if (!mrt) 1939 return -ENOENT; 1940 1941 switch (cmd) { 1942 case SIOCGETMIFCNT_IN6: 1943 if (copy_from_user(&vr, arg, sizeof(vr))) 1944 return -EFAULT; 1945 if (vr.mifi >= mrt->maxvif) 1946 return -EINVAL; 1947 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif); 1948 read_lock(&mrt_lock); 1949 vif = &mrt->vif_table[vr.mifi]; 1950 if (VIF_EXISTS(mrt, vr.mifi)) { 1951 vr.icount = vif->pkt_in; 1952 vr.ocount = vif->pkt_out; 1953 vr.ibytes = vif->bytes_in; 1954 vr.obytes = vif->bytes_out; 1955 read_unlock(&mrt_lock); 1956 1957 if (copy_to_user(arg, &vr, sizeof(vr))) 1958 return -EFAULT; 1959 return 0; 1960 } 1961 read_unlock(&mrt_lock); 1962 return -EADDRNOTAVAIL; 1963 case SIOCGETSGCNT_IN6: 1964 if (copy_from_user(&sr, arg, sizeof(sr))) 1965 return -EFAULT; 1966 1967 rcu_read_lock(); 1968 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); 1969 if (c) { 1970 sr.pktcnt = c->_c.mfc_un.res.pkt; 1971 sr.bytecnt = c->_c.mfc_un.res.bytes; 1972 sr.wrong_if = c->_c.mfc_un.res.wrong_if; 1973 rcu_read_unlock(); 1974 1975 if (copy_to_user(arg, &sr, sizeof(sr))) 1976 return -EFAULT; 1977 return 0; 1978 } 1979 rcu_read_unlock(); 1980 return -EADDRNOTAVAIL; 1981 default: 1982 return -ENOIOCTLCMD; 1983 } 1984 } 1985 #endif 1986 1987 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 1988 { 1989 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 1990 IPSTATS_MIB_OUTFORWDATAGRAMS); 1991 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)), 1992 IPSTATS_MIB_OUTOCTETS, skb->len); 1993 return dst_output(net, sk, skb); 1994 } 1995 1996 /* 1997 * Processing handlers for ip6mr_forward 1998 */ 1999 2000 static int ip6mr_forward2(struct net *net, struct mr_table *mrt, 2001 struct sk_buff *skb, int vifi) 2002 { 2003 struct ipv6hdr *ipv6h; 2004 struct vif_device *vif = &mrt->vif_table[vifi]; 2005 struct net_device *dev; 2006 struct dst_entry *dst; 2007 struct flowi6 fl6; 2008 2009 if (!vif->dev) 2010 goto out_free; 2011 2012 #ifdef CONFIG_IPV6_PIMSM_V2 2013 if (vif->flags & MIFF_REGISTER) { 2014 vif->pkt_out++; 2015 vif->bytes_out += skb->len; 2016 vif->dev->stats.tx_bytes += skb->len; 2017 vif->dev->stats.tx_packets++; 2018 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); 2019 goto out_free; 2020 } 2021 #endif 2022 2023 ipv6h = ipv6_hdr(skb); 2024 2025 fl6 = (struct flowi6) { 2026 .flowi6_oif = vif->link, 2027 .daddr = ipv6h->daddr, 2028 }; 2029 2030 dst = ip6_route_output(net, NULL, &fl6); 2031 if (dst->error) { 2032 dst_release(dst); 2033 goto out_free; 2034 } 2035 2036 skb_dst_drop(skb); 2037 skb_dst_set(skb, dst); 2038 2039 /* 2040 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 2041 * not only before forwarding, but after forwarding on all output 2042 * interfaces. It is clear, if mrouter runs a multicasting 2043 * program, it should receive packets not depending to what interface 2044 * program is joined. 2045 * If we will not make it, the program will have to join on all 2046 * interfaces. On the other hand, multihoming host (or router, but 2047 * not mrouter) cannot join to more than one interface - it will 2048 * result in receiving multiple packets. 2049 */ 2050 dev = vif->dev; 2051 skb->dev = dev; 2052 vif->pkt_out++; 2053 vif->bytes_out += skb->len; 2054 2055 /* We are about to write */ 2056 /* XXX: extension headers? */ 2057 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev))) 2058 goto out_free; 2059 2060 ipv6h = ipv6_hdr(skb); 2061 ipv6h->hop_limit--; 2062 2063 IP6CB(skb)->flags |= IP6SKB_FORWARDED; 2064 2065 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, 2066 net, NULL, skb, skb->dev, dev, 2067 ip6mr_forward2_finish); 2068 2069 out_free: 2070 kfree_skb(skb); 2071 return 0; 2072 } 2073 2074 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev) 2075 { 2076 int ct; 2077 2078 for (ct = mrt->maxvif - 1; ct >= 0; ct--) { 2079 if (mrt->vif_table[ct].dev == dev) 2080 break; 2081 } 2082 return ct; 2083 } 2084 2085 static void ip6_mr_forward(struct net *net, struct mr_table *mrt, 2086 struct net_device *dev, struct sk_buff *skb, 2087 struct mfc6_cache *c) 2088 { 2089 int psend = -1; 2090 int vif, ct; 2091 int true_vifi = ip6mr_find_vif(mrt, dev); 2092 2093 vif = c->_c.mfc_parent; 2094 c->_c.mfc_un.res.pkt++; 2095 c->_c.mfc_un.res.bytes += skb->len; 2096 c->_c.mfc_un.res.lastuse = jiffies; 2097 2098 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) { 2099 struct mfc6_cache *cache_proxy; 2100 2101 /* For an (*,G) entry, we only check that the incoming 2102 * interface is part of the static tree. 2103 */ 2104 rcu_read_lock(); 2105 cache_proxy = mr_mfc_find_any_parent(mrt, vif); 2106 if (cache_proxy && 2107 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) { 2108 rcu_read_unlock(); 2109 goto forward; 2110 } 2111 rcu_read_unlock(); 2112 } 2113 2114 /* 2115 * Wrong interface: drop packet and (maybe) send PIM assert. 2116 */ 2117 if (mrt->vif_table[vif].dev != dev) { 2118 c->_c.mfc_un.res.wrong_if++; 2119 2120 if (true_vifi >= 0 && mrt->mroute_do_assert && 2121 /* pimsm uses asserts, when switching from RPT to SPT, 2122 so that we cannot check that packet arrived on an oif. 2123 It is bad, but otherwise we would need to move pretty 2124 large chunk of pimd to kernel. Ough... --ANK 2125 */ 2126 (mrt->mroute_do_pim || 2127 c->_c.mfc_un.res.ttls[true_vifi] < 255) && 2128 time_after(jiffies, 2129 c->_c.mfc_un.res.last_assert + 2130 MFC_ASSERT_THRESH)) { 2131 c->_c.mfc_un.res.last_assert = jiffies; 2132 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF); 2133 } 2134 goto dont_forward; 2135 } 2136 2137 forward: 2138 mrt->vif_table[vif].pkt_in++; 2139 mrt->vif_table[vif].bytes_in += skb->len; 2140 2141 /* 2142 * Forward the frame 2143 */ 2144 if (ipv6_addr_any(&c->mf6c_origin) && 2145 ipv6_addr_any(&c->mf6c_mcastgrp)) { 2146 if (true_vifi >= 0 && 2147 true_vifi != c->_c.mfc_parent && 2148 ipv6_hdr(skb)->hop_limit > 2149 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) { 2150 /* It's an (*,*) entry and the packet is not coming from 2151 * the upstream: forward the packet to the upstream 2152 * only. 2153 */ 2154 psend = c->_c.mfc_parent; 2155 goto last_forward; 2156 } 2157 goto dont_forward; 2158 } 2159 for (ct = c->_c.mfc_un.res.maxvif - 1; 2160 ct >= c->_c.mfc_un.res.minvif; ct--) { 2161 /* For (*,G) entry, don't forward to the incoming interface */ 2162 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) && 2163 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) { 2164 if (psend != -1) { 2165 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 2166 if (skb2) 2167 ip6mr_forward2(net, mrt, skb2, psend); 2168 } 2169 psend = ct; 2170 } 2171 } 2172 last_forward: 2173 if (psend != -1) { 2174 ip6mr_forward2(net, mrt, skb, psend); 2175 return; 2176 } 2177 2178 dont_forward: 2179 kfree_skb(skb); 2180 } 2181 2182 2183 /* 2184 * Multicast packets for forwarding arrive here 2185 */ 2186 2187 int ip6_mr_input(struct sk_buff *skb) 2188 { 2189 struct mfc6_cache *cache; 2190 struct net *net = dev_net(skb->dev); 2191 struct mr_table *mrt; 2192 struct flowi6 fl6 = { 2193 .flowi6_iif = skb->dev->ifindex, 2194 .flowi6_mark = skb->mark, 2195 }; 2196 int err; 2197 struct net_device *dev; 2198 2199 /* skb->dev passed in is the master dev for vrfs. 2200 * Get the proper interface that does have a vif associated with it. 2201 */ 2202 dev = skb->dev; 2203 if (netif_is_l3_master(skb->dev)) { 2204 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); 2205 if (!dev) { 2206 kfree_skb(skb); 2207 return -ENODEV; 2208 } 2209 } 2210 2211 err = ip6mr_fib_lookup(net, &fl6, &mrt); 2212 if (err < 0) { 2213 kfree_skb(skb); 2214 return err; 2215 } 2216 2217 read_lock(&mrt_lock); 2218 cache = ip6mr_cache_find(mrt, 2219 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); 2220 if (!cache) { 2221 int vif = ip6mr_find_vif(mrt, dev); 2222 2223 if (vif >= 0) 2224 cache = ip6mr_cache_find_any(mrt, 2225 &ipv6_hdr(skb)->daddr, 2226 vif); 2227 } 2228 2229 /* 2230 * No usable cache entry 2231 */ 2232 if (!cache) { 2233 int vif; 2234 2235 vif = ip6mr_find_vif(mrt, dev); 2236 if (vif >= 0) { 2237 int err = ip6mr_cache_unresolved(mrt, vif, skb, dev); 2238 read_unlock(&mrt_lock); 2239 2240 return err; 2241 } 2242 read_unlock(&mrt_lock); 2243 kfree_skb(skb); 2244 return -ENODEV; 2245 } 2246 2247 ip6_mr_forward(net, mrt, dev, skb, cache); 2248 2249 read_unlock(&mrt_lock); 2250 2251 return 0; 2252 } 2253 2254 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, 2255 u32 portid) 2256 { 2257 int err; 2258 struct mr_table *mrt; 2259 struct mfc6_cache *cache; 2260 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); 2261 2262 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); 2263 if (!mrt) 2264 return -ENOENT; 2265 2266 read_lock(&mrt_lock); 2267 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); 2268 if (!cache && skb->dev) { 2269 int vif = ip6mr_find_vif(mrt, skb->dev); 2270 2271 if (vif >= 0) 2272 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr, 2273 vif); 2274 } 2275 2276 if (!cache) { 2277 struct sk_buff *skb2; 2278 struct ipv6hdr *iph; 2279 struct net_device *dev; 2280 int vif; 2281 2282 dev = skb->dev; 2283 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) { 2284 read_unlock(&mrt_lock); 2285 return -ENODEV; 2286 } 2287 2288 /* really correct? */ 2289 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); 2290 if (!skb2) { 2291 read_unlock(&mrt_lock); 2292 return -ENOMEM; 2293 } 2294 2295 NETLINK_CB(skb2).portid = portid; 2296 skb_reset_transport_header(skb2); 2297 2298 skb_put(skb2, sizeof(struct ipv6hdr)); 2299 skb_reset_network_header(skb2); 2300 2301 iph = ipv6_hdr(skb2); 2302 iph->version = 0; 2303 iph->priority = 0; 2304 iph->flow_lbl[0] = 0; 2305 iph->flow_lbl[1] = 0; 2306 iph->flow_lbl[2] = 0; 2307 iph->payload_len = 0; 2308 iph->nexthdr = IPPROTO_NONE; 2309 iph->hop_limit = 0; 2310 iph->saddr = rt->rt6i_src.addr; 2311 iph->daddr = rt->rt6i_dst.addr; 2312 2313 err = ip6mr_cache_unresolved(mrt, vif, skb2, dev); 2314 read_unlock(&mrt_lock); 2315 2316 return err; 2317 } 2318 2319 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm); 2320 read_unlock(&mrt_lock); 2321 return err; 2322 } 2323 2324 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2325 u32 portid, u32 seq, struct mfc6_cache *c, int cmd, 2326 int flags) 2327 { 2328 struct nlmsghdr *nlh; 2329 struct rtmsg *rtm; 2330 int err; 2331 2332 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); 2333 if (!nlh) 2334 return -EMSGSIZE; 2335 2336 rtm = nlmsg_data(nlh); 2337 rtm->rtm_family = RTNL_FAMILY_IP6MR; 2338 rtm->rtm_dst_len = 128; 2339 rtm->rtm_src_len = 128; 2340 rtm->rtm_tos = 0; 2341 rtm->rtm_table = mrt->id; 2342 if (nla_put_u32(skb, RTA_TABLE, mrt->id)) 2343 goto nla_put_failure; 2344 rtm->rtm_type = RTN_MULTICAST; 2345 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2346 if (c->_c.mfc_flags & MFC_STATIC) 2347 rtm->rtm_protocol = RTPROT_STATIC; 2348 else 2349 rtm->rtm_protocol = RTPROT_MROUTED; 2350 rtm->rtm_flags = 0; 2351 2352 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) || 2353 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp)) 2354 goto nla_put_failure; 2355 err = mr_fill_mroute(mrt, skb, &c->_c, rtm); 2356 /* do not break the dump if cache is unresolved */ 2357 if (err < 0 && err != -ENOENT) 2358 goto nla_put_failure; 2359 2360 nlmsg_end(skb, nlh); 2361 return 0; 2362 2363 nla_put_failure: 2364 nlmsg_cancel(skb, nlh); 2365 return -EMSGSIZE; 2366 } 2367 2368 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2369 u32 portid, u32 seq, struct mr_mfc *c, 2370 int cmd, int flags) 2371 { 2372 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c, 2373 cmd, flags); 2374 } 2375 2376 static int mr6_msgsize(bool unresolved, int maxvif) 2377 { 2378 size_t len = 2379 NLMSG_ALIGN(sizeof(struct rtmsg)) 2380 + nla_total_size(4) /* RTA_TABLE */ 2381 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */ 2382 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */ 2383 ; 2384 2385 if (!unresolved) 2386 len = len 2387 + nla_total_size(4) /* RTA_IIF */ 2388 + nla_total_size(0) /* RTA_MULTIPATH */ 2389 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop)) 2390 /* RTA_MFC_STATS */ 2391 + nla_total_size_64bit(sizeof(struct rta_mfc_stats)) 2392 ; 2393 2394 return len; 2395 } 2396 2397 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc, 2398 int cmd) 2399 { 2400 struct net *net = read_pnet(&mrt->net); 2401 struct sk_buff *skb; 2402 int err = -ENOBUFS; 2403 2404 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif), 2405 GFP_ATOMIC); 2406 if (!skb) 2407 goto errout; 2408 2409 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); 2410 if (err < 0) 2411 goto errout; 2412 2413 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC); 2414 return; 2415 2416 errout: 2417 kfree_skb(skb); 2418 if (err < 0) 2419 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err); 2420 } 2421 2422 static size_t mrt6msg_netlink_msgsize(size_t payloadlen) 2423 { 2424 size_t len = 2425 NLMSG_ALIGN(sizeof(struct rtgenmsg)) 2426 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */ 2427 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */ 2428 /* IP6MRA_CREPORT_SRC_ADDR */ 2429 + nla_total_size(sizeof(struct in6_addr)) 2430 /* IP6MRA_CREPORT_DST_ADDR */ 2431 + nla_total_size(sizeof(struct in6_addr)) 2432 /* IP6MRA_CREPORT_PKT */ 2433 + nla_total_size(payloadlen) 2434 ; 2435 2436 return len; 2437 } 2438 2439 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt) 2440 { 2441 struct net *net = read_pnet(&mrt->net); 2442 struct nlmsghdr *nlh; 2443 struct rtgenmsg *rtgenm; 2444 struct mrt6msg *msg; 2445 struct sk_buff *skb; 2446 struct nlattr *nla; 2447 int payloadlen; 2448 2449 payloadlen = pkt->len - sizeof(struct mrt6msg); 2450 msg = (struct mrt6msg *)skb_transport_header(pkt); 2451 2452 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC); 2453 if (!skb) 2454 goto errout; 2455 2456 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT, 2457 sizeof(struct rtgenmsg), 0); 2458 if (!nlh) 2459 goto errout; 2460 rtgenm = nlmsg_data(nlh); 2461 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR; 2462 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) || 2463 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) || 2464 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR, 2465 &msg->im6_src) || 2466 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR, 2467 &msg->im6_dst)) 2468 goto nla_put_failure; 2469 2470 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen); 2471 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg), 2472 nla_data(nla), payloadlen)) 2473 goto nla_put_failure; 2474 2475 nlmsg_end(skb, nlh); 2476 2477 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC); 2478 return; 2479 2480 nla_put_failure: 2481 nlmsg_cancel(skb, nlh); 2482 errout: 2483 kfree_skb(skb); 2484 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS); 2485 } 2486 2487 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2488 { 2489 const struct nlmsghdr *nlh = cb->nlh; 2490 struct fib_dump_filter filter = {}; 2491 int err; 2492 2493 if (cb->strict_check) { 2494 err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh, 2495 &filter, cb); 2496 if (err < 0) 2497 return err; 2498 } 2499 2500 if (filter.table_id) { 2501 struct mr_table *mrt; 2502 2503 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); 2504 if (!mrt) { 2505 if (filter.dump_all_families) 2506 return skb->len; 2507 2508 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist"); 2509 return -ENOENT; 2510 } 2511 err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute, 2512 &mfc_unres_lock, &filter); 2513 return skb->len ? : err; 2514 } 2515 2516 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter, 2517 _ip6mr_fill_mroute, &mfc_unres_lock, &filter); 2518 } 2519