1 /* 2 * IP multicast routing support for mrouted 3.6/3.8 3 * 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk> 5 * Linux Consultancy and Custom Driver Development 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 * 12 * Fixes: 13 * Michael Chastain : Incorrect size of copying. 14 * Alan Cox : Added the cache manager code 15 * Alan Cox : Fixed the clone/copy bug and device race. 16 * Mike McLagan : Routing by source 17 * Malcolm Beattie : Buffer handling fixes. 18 * Alexey Kuznetsov : Double buffer free and other fixes. 19 * SVR Anand : Fixed several multicast bugs and problems. 20 * Alexey Kuznetsov : Status, optimisations and more. 21 * Brad Parker : Better behaviour on mrouted upcall 22 * overflow. 23 * Carlos Picoto : PIMv1 Support 24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header 25 * Relax this requirement to work with older peers. 26 * 27 */ 28 29 #include <asm/uaccess.h> 30 #include <linux/types.h> 31 #include <linux/capability.h> 32 #include <linux/errno.h> 33 #include <linux/timer.h> 34 #include <linux/mm.h> 35 #include <linux/kernel.h> 36 #include <linux/fcntl.h> 37 #include <linux/stat.h> 38 #include <linux/socket.h> 39 #include <linux/in.h> 40 #include <linux/inet.h> 41 #include <linux/netdevice.h> 42 #include <linux/inetdevice.h> 43 #include <linux/igmp.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <linux/mroute.h> 47 #include <linux/init.h> 48 #include <linux/if_ether.h> 49 #include <linux/slab.h> 50 #include <net/net_namespace.h> 51 #include <net/ip.h> 52 #include <net/protocol.h> 53 #include <linux/skbuff.h> 54 #include <net/route.h> 55 #include <net/sock.h> 56 #include <net/icmp.h> 57 #include <net/udp.h> 58 #include <net/raw.h> 59 #include <linux/notifier.h> 60 #include <linux/if_arp.h> 61 #include <linux/netfilter_ipv4.h> 62 #include <linux/compat.h> 63 #include <linux/export.h> 64 #include <net/ipip.h> 65 #include <net/checksum.h> 66 #include <net/netlink.h> 67 #include <net/fib_rules.h> 68 69 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 70 #define CONFIG_IP_PIMSM 1 71 #endif 72 73 struct mr_table { 74 struct list_head list; 75 #ifdef CONFIG_NET_NS 76 struct net *net; 77 #endif 78 u32 id; 79 struct sock __rcu *mroute_sk; 80 struct timer_list ipmr_expire_timer; 81 struct list_head mfc_unres_queue; 82 struct list_head mfc_cache_array[MFC_LINES]; 83 struct vif_device vif_table[MAXVIFS]; 84 int maxvif; 85 atomic_t cache_resolve_queue_len; 86 int mroute_do_assert; 87 int mroute_do_pim; 88 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 89 int mroute_reg_vif_num; 90 #endif 91 }; 92 93 struct ipmr_rule { 94 struct fib_rule common; 95 }; 96 97 struct ipmr_result { 98 struct mr_table *mrt; 99 }; 100 101 /* Big lock, protecting vif table, mrt cache and mroute socket state. 102 * Note that the changes are semaphored via rtnl_lock. 103 */ 104 105 static DEFINE_RWLOCK(mrt_lock); 106 107 /* 108 * Multicast router control variables 109 */ 110 111 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) 112 113 /* Special spinlock for queue of unresolved entries */ 114 static DEFINE_SPINLOCK(mfc_unres_lock); 115 116 /* We return to original Alan's scheme. Hash table of resolved 117 * entries is changed only in process context and protected 118 * with weak lock mrt_lock. Queue of unresolved entries is protected 119 * with strong spinlock mfc_unres_lock. 120 * 121 * In this case data path is free of exclusive locks at all. 122 */ 123 124 static struct kmem_cache *mrt_cachep __read_mostly; 125 126 static struct mr_table *ipmr_new_table(struct net *net, u32 id); 127 static void ipmr_free_table(struct mr_table *mrt); 128 129 static int ip_mr_forward(struct net *net, struct mr_table *mrt, 130 struct sk_buff *skb, struct mfc_cache *cache, 131 int local); 132 static int ipmr_cache_report(struct mr_table *mrt, 133 struct sk_buff *pkt, vifi_t vifi, int assert); 134 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 135 struct mfc_cache *c, struct rtmsg *rtm); 136 static void mroute_clean_tables(struct mr_table *mrt); 137 static void ipmr_expire_process(unsigned long arg); 138 139 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 140 #define ipmr_for_each_table(mrt, net) \ 141 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) 142 143 static struct mr_table *ipmr_get_table(struct net *net, u32 id) 144 { 145 struct mr_table *mrt; 146 147 ipmr_for_each_table(mrt, net) { 148 if (mrt->id == id) 149 return mrt; 150 } 151 return NULL; 152 } 153 154 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 155 struct mr_table **mrt) 156 { 157 struct ipmr_result res; 158 struct fib_lookup_arg arg = { .result = &res, }; 159 int err; 160 161 err = fib_rules_lookup(net->ipv4.mr_rules_ops, 162 flowi4_to_flowi(flp4), 0, &arg); 163 if (err < 0) 164 return err; 165 *mrt = res.mrt; 166 return 0; 167 } 168 169 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, 170 int flags, struct fib_lookup_arg *arg) 171 { 172 struct ipmr_result *res = arg->result; 173 struct mr_table *mrt; 174 175 switch (rule->action) { 176 case FR_ACT_TO_TBL: 177 break; 178 case FR_ACT_UNREACHABLE: 179 return -ENETUNREACH; 180 case FR_ACT_PROHIBIT: 181 return -EACCES; 182 case FR_ACT_BLACKHOLE: 183 default: 184 return -EINVAL; 185 } 186 187 mrt = ipmr_get_table(rule->fr_net, rule->table); 188 if (mrt == NULL) 189 return -EAGAIN; 190 res->mrt = mrt; 191 return 0; 192 } 193 194 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) 195 { 196 return 1; 197 } 198 199 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { 200 FRA_GENERIC_POLICY, 201 }; 202 203 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 204 struct fib_rule_hdr *frh, struct nlattr **tb) 205 { 206 return 0; 207 } 208 209 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 210 struct nlattr **tb) 211 { 212 return 1; 213 } 214 215 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 216 struct fib_rule_hdr *frh) 217 { 218 frh->dst_len = 0; 219 frh->src_len = 0; 220 frh->tos = 0; 221 return 0; 222 } 223 224 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { 225 .family = RTNL_FAMILY_IPMR, 226 .rule_size = sizeof(struct ipmr_rule), 227 .addr_size = sizeof(u32), 228 .action = ipmr_rule_action, 229 .match = ipmr_rule_match, 230 .configure = ipmr_rule_configure, 231 .compare = ipmr_rule_compare, 232 .default_pref = fib_default_rule_pref, 233 .fill = ipmr_rule_fill, 234 .nlgroup = RTNLGRP_IPV4_RULE, 235 .policy = ipmr_rule_policy, 236 .owner = THIS_MODULE, 237 }; 238 239 static int __net_init ipmr_rules_init(struct net *net) 240 { 241 struct fib_rules_ops *ops; 242 struct mr_table *mrt; 243 int err; 244 245 ops = fib_rules_register(&ipmr_rules_ops_template, net); 246 if (IS_ERR(ops)) 247 return PTR_ERR(ops); 248 249 INIT_LIST_HEAD(&net->ipv4.mr_tables); 250 251 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 252 if (mrt == NULL) { 253 err = -ENOMEM; 254 goto err1; 255 } 256 257 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); 258 if (err < 0) 259 goto err2; 260 261 net->ipv4.mr_rules_ops = ops; 262 return 0; 263 264 err2: 265 kfree(mrt); 266 err1: 267 fib_rules_unregister(ops); 268 return err; 269 } 270 271 static void __net_exit ipmr_rules_exit(struct net *net) 272 { 273 struct mr_table *mrt, *next; 274 275 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 276 list_del(&mrt->list); 277 ipmr_free_table(mrt); 278 } 279 fib_rules_unregister(net->ipv4.mr_rules_ops); 280 } 281 #else 282 #define ipmr_for_each_table(mrt, net) \ 283 for (mrt = net->ipv4.mrt; mrt; mrt = NULL) 284 285 static struct mr_table *ipmr_get_table(struct net *net, u32 id) 286 { 287 return net->ipv4.mrt; 288 } 289 290 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 291 struct mr_table **mrt) 292 { 293 *mrt = net->ipv4.mrt; 294 return 0; 295 } 296 297 static int __net_init ipmr_rules_init(struct net *net) 298 { 299 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 300 return net->ipv4.mrt ? 0 : -ENOMEM; 301 } 302 303 static void __net_exit ipmr_rules_exit(struct net *net) 304 { 305 ipmr_free_table(net->ipv4.mrt); 306 } 307 #endif 308 309 static struct mr_table *ipmr_new_table(struct net *net, u32 id) 310 { 311 struct mr_table *mrt; 312 unsigned int i; 313 314 mrt = ipmr_get_table(net, id); 315 if (mrt != NULL) 316 return mrt; 317 318 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 319 if (mrt == NULL) 320 return NULL; 321 write_pnet(&mrt->net, net); 322 mrt->id = id; 323 324 /* Forwarding cache */ 325 for (i = 0; i < MFC_LINES; i++) 326 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); 327 328 INIT_LIST_HEAD(&mrt->mfc_unres_queue); 329 330 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, 331 (unsigned long)mrt); 332 333 #ifdef CONFIG_IP_PIMSM 334 mrt->mroute_reg_vif_num = -1; 335 #endif 336 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 337 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); 338 #endif 339 return mrt; 340 } 341 342 static void ipmr_free_table(struct mr_table *mrt) 343 { 344 del_timer_sync(&mrt->ipmr_expire_timer); 345 mroute_clean_tables(mrt); 346 kfree(mrt); 347 } 348 349 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 350 351 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 352 { 353 struct net *net = dev_net(dev); 354 355 dev_close(dev); 356 357 dev = __dev_get_by_name(net, "tunl0"); 358 if (dev) { 359 const struct net_device_ops *ops = dev->netdev_ops; 360 struct ifreq ifr; 361 struct ip_tunnel_parm p; 362 363 memset(&p, 0, sizeof(p)); 364 p.iph.daddr = v->vifc_rmt_addr.s_addr; 365 p.iph.saddr = v->vifc_lcl_addr.s_addr; 366 p.iph.version = 4; 367 p.iph.ihl = 5; 368 p.iph.protocol = IPPROTO_IPIP; 369 sprintf(p.name, "dvmrp%d", v->vifc_vifi); 370 ifr.ifr_ifru.ifru_data = (__force void __user *)&p; 371 372 if (ops->ndo_do_ioctl) { 373 mm_segment_t oldfs = get_fs(); 374 375 set_fs(KERNEL_DS); 376 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL); 377 set_fs(oldfs); 378 } 379 } 380 } 381 382 static 383 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) 384 { 385 struct net_device *dev; 386 387 dev = __dev_get_by_name(net, "tunl0"); 388 389 if (dev) { 390 const struct net_device_ops *ops = dev->netdev_ops; 391 int err; 392 struct ifreq ifr; 393 struct ip_tunnel_parm p; 394 struct in_device *in_dev; 395 396 memset(&p, 0, sizeof(p)); 397 p.iph.daddr = v->vifc_rmt_addr.s_addr; 398 p.iph.saddr = v->vifc_lcl_addr.s_addr; 399 p.iph.version = 4; 400 p.iph.ihl = 5; 401 p.iph.protocol = IPPROTO_IPIP; 402 sprintf(p.name, "dvmrp%d", v->vifc_vifi); 403 ifr.ifr_ifru.ifru_data = (__force void __user *)&p; 404 405 if (ops->ndo_do_ioctl) { 406 mm_segment_t oldfs = get_fs(); 407 408 set_fs(KERNEL_DS); 409 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); 410 set_fs(oldfs); 411 } else { 412 err = -EOPNOTSUPP; 413 } 414 dev = NULL; 415 416 if (err == 0 && 417 (dev = __dev_get_by_name(net, p.name)) != NULL) { 418 dev->flags |= IFF_MULTICAST; 419 420 in_dev = __in_dev_get_rtnl(dev); 421 if (in_dev == NULL) 422 goto failure; 423 424 ipv4_devconf_setall(in_dev); 425 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; 426 427 if (dev_open(dev)) 428 goto failure; 429 dev_hold(dev); 430 } 431 } 432 return dev; 433 434 failure: 435 /* allow the register to be completed before unregistering. */ 436 rtnl_unlock(); 437 rtnl_lock(); 438 439 unregister_netdevice(dev); 440 return NULL; 441 } 442 443 #ifdef CONFIG_IP_PIMSM 444 445 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 446 { 447 struct net *net = dev_net(dev); 448 struct mr_table *mrt; 449 struct flowi4 fl4 = { 450 .flowi4_oif = dev->ifindex, 451 .flowi4_iif = skb->skb_iif, 452 .flowi4_mark = skb->mark, 453 }; 454 int err; 455 456 err = ipmr_fib_lookup(net, &fl4, &mrt); 457 if (err < 0) { 458 kfree_skb(skb); 459 return err; 460 } 461 462 read_lock(&mrt_lock); 463 dev->stats.tx_bytes += skb->len; 464 dev->stats.tx_packets++; 465 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); 466 read_unlock(&mrt_lock); 467 kfree_skb(skb); 468 return NETDEV_TX_OK; 469 } 470 471 static const struct net_device_ops reg_vif_netdev_ops = { 472 .ndo_start_xmit = reg_vif_xmit, 473 }; 474 475 static void reg_vif_setup(struct net_device *dev) 476 { 477 dev->type = ARPHRD_PIMREG; 478 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 479 dev->flags = IFF_NOARP; 480 dev->netdev_ops = ®_vif_netdev_ops, 481 dev->destructor = free_netdev; 482 dev->features |= NETIF_F_NETNS_LOCAL; 483 } 484 485 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) 486 { 487 struct net_device *dev; 488 struct in_device *in_dev; 489 char name[IFNAMSIZ]; 490 491 if (mrt->id == RT_TABLE_DEFAULT) 492 sprintf(name, "pimreg"); 493 else 494 sprintf(name, "pimreg%u", mrt->id); 495 496 dev = alloc_netdev(0, name, reg_vif_setup); 497 498 if (dev == NULL) 499 return NULL; 500 501 dev_net_set(dev, net); 502 503 if (register_netdevice(dev)) { 504 free_netdev(dev); 505 return NULL; 506 } 507 dev->iflink = 0; 508 509 rcu_read_lock(); 510 in_dev = __in_dev_get_rcu(dev); 511 if (!in_dev) { 512 rcu_read_unlock(); 513 goto failure; 514 } 515 516 ipv4_devconf_setall(in_dev); 517 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; 518 rcu_read_unlock(); 519 520 if (dev_open(dev)) 521 goto failure; 522 523 dev_hold(dev); 524 525 return dev; 526 527 failure: 528 /* allow the register to be completed before unregistering. */ 529 rtnl_unlock(); 530 rtnl_lock(); 531 532 unregister_netdevice(dev); 533 return NULL; 534 } 535 #endif 536 537 /** 538 * vif_delete - Delete a VIF entry 539 * @notify: Set to 1, if the caller is a notifier_call 540 */ 541 542 static int vif_delete(struct mr_table *mrt, int vifi, int notify, 543 struct list_head *head) 544 { 545 struct vif_device *v; 546 struct net_device *dev; 547 struct in_device *in_dev; 548 549 if (vifi < 0 || vifi >= mrt->maxvif) 550 return -EADDRNOTAVAIL; 551 552 v = &mrt->vif_table[vifi]; 553 554 write_lock_bh(&mrt_lock); 555 dev = v->dev; 556 v->dev = NULL; 557 558 if (!dev) { 559 write_unlock_bh(&mrt_lock); 560 return -EADDRNOTAVAIL; 561 } 562 563 #ifdef CONFIG_IP_PIMSM 564 if (vifi == mrt->mroute_reg_vif_num) 565 mrt->mroute_reg_vif_num = -1; 566 #endif 567 568 if (vifi + 1 == mrt->maxvif) { 569 int tmp; 570 571 for (tmp = vifi - 1; tmp >= 0; tmp--) { 572 if (VIF_EXISTS(mrt, tmp)) 573 break; 574 } 575 mrt->maxvif = tmp+1; 576 } 577 578 write_unlock_bh(&mrt_lock); 579 580 dev_set_allmulti(dev, -1); 581 582 in_dev = __in_dev_get_rtnl(dev); 583 if (in_dev) { 584 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; 585 ip_rt_multicast_event(in_dev); 586 } 587 588 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) 589 unregister_netdevice_queue(dev, head); 590 591 dev_put(dev); 592 return 0; 593 } 594 595 static void ipmr_cache_free_rcu(struct rcu_head *head) 596 { 597 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu); 598 599 kmem_cache_free(mrt_cachep, c); 600 } 601 602 static inline void ipmr_cache_free(struct mfc_cache *c) 603 { 604 call_rcu(&c->rcu, ipmr_cache_free_rcu); 605 } 606 607 /* Destroy an unresolved cache entry, killing queued skbs 608 * and reporting error to netlink readers. 609 */ 610 611 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 612 { 613 struct net *net = read_pnet(&mrt->net); 614 struct sk_buff *skb; 615 struct nlmsgerr *e; 616 617 atomic_dec(&mrt->cache_resolve_queue_len); 618 619 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 620 if (ip_hdr(skb)->version == 0) { 621 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 622 nlh->nlmsg_type = NLMSG_ERROR; 623 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 624 skb_trim(skb, nlh->nlmsg_len); 625 e = NLMSG_DATA(nlh); 626 e->error = -ETIMEDOUT; 627 memset(&e->msg, 0, sizeof(e->msg)); 628 629 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 630 } else { 631 kfree_skb(skb); 632 } 633 } 634 635 ipmr_cache_free(c); 636 } 637 638 639 /* Timer process for the unresolved queue. */ 640 641 static void ipmr_expire_process(unsigned long arg) 642 { 643 struct mr_table *mrt = (struct mr_table *)arg; 644 unsigned long now; 645 unsigned long expires; 646 struct mfc_cache *c, *next; 647 648 if (!spin_trylock(&mfc_unres_lock)) { 649 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); 650 return; 651 } 652 653 if (list_empty(&mrt->mfc_unres_queue)) 654 goto out; 655 656 now = jiffies; 657 expires = 10*HZ; 658 659 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 660 if (time_after(c->mfc_un.unres.expires, now)) { 661 unsigned long interval = c->mfc_un.unres.expires - now; 662 if (interval < expires) 663 expires = interval; 664 continue; 665 } 666 667 list_del(&c->list); 668 ipmr_destroy_unres(mrt, c); 669 } 670 671 if (!list_empty(&mrt->mfc_unres_queue)) 672 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); 673 674 out: 675 spin_unlock(&mfc_unres_lock); 676 } 677 678 /* Fill oifs list. It is called under write locked mrt_lock. */ 679 680 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, 681 unsigned char *ttls) 682 { 683 int vifi; 684 685 cache->mfc_un.res.minvif = MAXVIFS; 686 cache->mfc_un.res.maxvif = 0; 687 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 688 689 for (vifi = 0; vifi < mrt->maxvif; vifi++) { 690 if (VIF_EXISTS(mrt, vifi) && 691 ttls[vifi] && ttls[vifi] < 255) { 692 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 693 if (cache->mfc_un.res.minvif > vifi) 694 cache->mfc_un.res.minvif = vifi; 695 if (cache->mfc_un.res.maxvif <= vifi) 696 cache->mfc_un.res.maxvif = vifi + 1; 697 } 698 } 699 } 700 701 static int vif_add(struct net *net, struct mr_table *mrt, 702 struct vifctl *vifc, int mrtsock) 703 { 704 int vifi = vifc->vifc_vifi; 705 struct vif_device *v = &mrt->vif_table[vifi]; 706 struct net_device *dev; 707 struct in_device *in_dev; 708 int err; 709 710 /* Is vif busy ? */ 711 if (VIF_EXISTS(mrt, vifi)) 712 return -EADDRINUSE; 713 714 switch (vifc->vifc_flags) { 715 #ifdef CONFIG_IP_PIMSM 716 case VIFF_REGISTER: 717 /* 718 * Special Purpose VIF in PIM 719 * All the packets will be sent to the daemon 720 */ 721 if (mrt->mroute_reg_vif_num >= 0) 722 return -EADDRINUSE; 723 dev = ipmr_reg_vif(net, mrt); 724 if (!dev) 725 return -ENOBUFS; 726 err = dev_set_allmulti(dev, 1); 727 if (err) { 728 unregister_netdevice(dev); 729 dev_put(dev); 730 return err; 731 } 732 break; 733 #endif 734 case VIFF_TUNNEL: 735 dev = ipmr_new_tunnel(net, vifc); 736 if (!dev) 737 return -ENOBUFS; 738 err = dev_set_allmulti(dev, 1); 739 if (err) { 740 ipmr_del_tunnel(dev, vifc); 741 dev_put(dev); 742 return err; 743 } 744 break; 745 746 case VIFF_USE_IFINDEX: 747 case 0: 748 if (vifc->vifc_flags == VIFF_USE_IFINDEX) { 749 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); 750 if (dev && __in_dev_get_rtnl(dev) == NULL) { 751 dev_put(dev); 752 return -EADDRNOTAVAIL; 753 } 754 } else { 755 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); 756 } 757 if (!dev) 758 return -EADDRNOTAVAIL; 759 err = dev_set_allmulti(dev, 1); 760 if (err) { 761 dev_put(dev); 762 return err; 763 } 764 break; 765 default: 766 return -EINVAL; 767 } 768 769 in_dev = __in_dev_get_rtnl(dev); 770 if (!in_dev) { 771 dev_put(dev); 772 return -EADDRNOTAVAIL; 773 } 774 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 775 ip_rt_multicast_event(in_dev); 776 777 /* Fill in the VIF structures */ 778 779 v->rate_limit = vifc->vifc_rate_limit; 780 v->local = vifc->vifc_lcl_addr.s_addr; 781 v->remote = vifc->vifc_rmt_addr.s_addr; 782 v->flags = vifc->vifc_flags; 783 if (!mrtsock) 784 v->flags |= VIFF_STATIC; 785 v->threshold = vifc->vifc_threshold; 786 v->bytes_in = 0; 787 v->bytes_out = 0; 788 v->pkt_in = 0; 789 v->pkt_out = 0; 790 v->link = dev->ifindex; 791 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) 792 v->link = dev->iflink; 793 794 /* And finish update writing critical data */ 795 write_lock_bh(&mrt_lock); 796 v->dev = dev; 797 #ifdef CONFIG_IP_PIMSM 798 if (v->flags & VIFF_REGISTER) 799 mrt->mroute_reg_vif_num = vifi; 800 #endif 801 if (vifi+1 > mrt->maxvif) 802 mrt->maxvif = vifi+1; 803 write_unlock_bh(&mrt_lock); 804 return 0; 805 } 806 807 /* called with rcu_read_lock() */ 808 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, 809 __be32 origin, 810 __be32 mcastgrp) 811 { 812 int line = MFC_HASH(mcastgrp, origin); 813 struct mfc_cache *c; 814 815 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) { 816 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) 817 return c; 818 } 819 return NULL; 820 } 821 822 /* 823 * Allocate a multicast cache entry 824 */ 825 static struct mfc_cache *ipmr_cache_alloc(void) 826 { 827 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 828 829 if (c) 830 c->mfc_un.res.minvif = MAXVIFS; 831 return c; 832 } 833 834 static struct mfc_cache *ipmr_cache_alloc_unres(void) 835 { 836 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 837 838 if (c) { 839 skb_queue_head_init(&c->mfc_un.unres.unresolved); 840 c->mfc_un.unres.expires = jiffies + 10*HZ; 841 } 842 return c; 843 } 844 845 /* 846 * A cache entry has gone into a resolved state from queued 847 */ 848 849 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, 850 struct mfc_cache *uc, struct mfc_cache *c) 851 { 852 struct sk_buff *skb; 853 struct nlmsgerr *e; 854 855 /* Play the pending entries through our router */ 856 857 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { 858 if (ip_hdr(skb)->version == 0) { 859 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 860 861 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 862 nlh->nlmsg_len = skb_tail_pointer(skb) - 863 (u8 *)nlh; 864 } else { 865 nlh->nlmsg_type = NLMSG_ERROR; 866 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 867 skb_trim(skb, nlh->nlmsg_len); 868 e = NLMSG_DATA(nlh); 869 e->error = -EMSGSIZE; 870 memset(&e->msg, 0, sizeof(e->msg)); 871 } 872 873 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 874 } else { 875 ip_mr_forward(net, mrt, skb, c, 0); 876 } 877 } 878 } 879 880 /* 881 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted 882 * expects the following bizarre scheme. 883 * 884 * Called under mrt_lock. 885 */ 886 887 static int ipmr_cache_report(struct mr_table *mrt, 888 struct sk_buff *pkt, vifi_t vifi, int assert) 889 { 890 struct sk_buff *skb; 891 const int ihl = ip_hdrlen(pkt); 892 struct igmphdr *igmp; 893 struct igmpmsg *msg; 894 struct sock *mroute_sk; 895 int ret; 896 897 #ifdef CONFIG_IP_PIMSM 898 if (assert == IGMPMSG_WHOLEPKT) 899 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); 900 else 901 #endif 902 skb = alloc_skb(128, GFP_ATOMIC); 903 904 if (!skb) 905 return -ENOBUFS; 906 907 #ifdef CONFIG_IP_PIMSM 908 if (assert == IGMPMSG_WHOLEPKT) { 909 /* Ugly, but we have no choice with this interface. 910 * Duplicate old header, fix ihl, length etc. 911 * And all this only to mangle msg->im_msgtype and 912 * to set msg->im_mbz to "mbz" :-) 913 */ 914 skb_push(skb, sizeof(struct iphdr)); 915 skb_reset_network_header(skb); 916 skb_reset_transport_header(skb); 917 msg = (struct igmpmsg *)skb_network_header(skb); 918 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 919 msg->im_msgtype = IGMPMSG_WHOLEPKT; 920 msg->im_mbz = 0; 921 msg->im_vif = mrt->mroute_reg_vif_num; 922 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 923 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 924 sizeof(struct iphdr)); 925 } else 926 #endif 927 { 928 929 /* Copy the IP header */ 930 931 skb->network_header = skb->tail; 932 skb_put(skb, ihl); 933 skb_copy_to_linear_data(skb, pkt->data, ihl); 934 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ 935 msg = (struct igmpmsg *)skb_network_header(skb); 936 msg->im_vif = vifi; 937 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 938 939 /* Add our header */ 940 941 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); 942 igmp->type = 943 msg->im_msgtype = assert; 944 igmp->code = 0; 945 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ 946 skb->transport_header = skb->network_header; 947 } 948 949 rcu_read_lock(); 950 mroute_sk = rcu_dereference(mrt->mroute_sk); 951 if (mroute_sk == NULL) { 952 rcu_read_unlock(); 953 kfree_skb(skb); 954 return -EINVAL; 955 } 956 957 /* Deliver to mrouted */ 958 959 ret = sock_queue_rcv_skb(mroute_sk, skb); 960 rcu_read_unlock(); 961 if (ret < 0) { 962 net_warn_ratelimited("mroute: pending queue full, dropping entries\n"); 963 kfree_skb(skb); 964 } 965 966 return ret; 967 } 968 969 /* 970 * Queue a packet for resolution. It gets locked cache entry! 971 */ 972 973 static int 974 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) 975 { 976 bool found = false; 977 int err; 978 struct mfc_cache *c; 979 const struct iphdr *iph = ip_hdr(skb); 980 981 spin_lock_bh(&mfc_unres_lock); 982 list_for_each_entry(c, &mrt->mfc_unres_queue, list) { 983 if (c->mfc_mcastgrp == iph->daddr && 984 c->mfc_origin == iph->saddr) { 985 found = true; 986 break; 987 } 988 } 989 990 if (!found) { 991 /* Create a new entry if allowable */ 992 993 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || 994 (c = ipmr_cache_alloc_unres()) == NULL) { 995 spin_unlock_bh(&mfc_unres_lock); 996 997 kfree_skb(skb); 998 return -ENOBUFS; 999 } 1000 1001 /* Fill in the new cache entry */ 1002 1003 c->mfc_parent = -1; 1004 c->mfc_origin = iph->saddr; 1005 c->mfc_mcastgrp = iph->daddr; 1006 1007 /* Reflect first query at mrouted. */ 1008 1009 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); 1010 if (err < 0) { 1011 /* If the report failed throw the cache entry 1012 out - Brad Parker 1013 */ 1014 spin_unlock_bh(&mfc_unres_lock); 1015 1016 ipmr_cache_free(c); 1017 kfree_skb(skb); 1018 return err; 1019 } 1020 1021 atomic_inc(&mrt->cache_resolve_queue_len); 1022 list_add(&c->list, &mrt->mfc_unres_queue); 1023 1024 if (atomic_read(&mrt->cache_resolve_queue_len) == 1) 1025 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); 1026 } 1027 1028 /* See if we can append the packet */ 1029 1030 if (c->mfc_un.unres.unresolved.qlen > 3) { 1031 kfree_skb(skb); 1032 err = -ENOBUFS; 1033 } else { 1034 skb_queue_tail(&c->mfc_un.unres.unresolved, skb); 1035 err = 0; 1036 } 1037 1038 spin_unlock_bh(&mfc_unres_lock); 1039 return err; 1040 } 1041 1042 /* 1043 * MFC cache manipulation by user space mroute daemon 1044 */ 1045 1046 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) 1047 { 1048 int line; 1049 struct mfc_cache *c, *next; 1050 1051 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1052 1053 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { 1054 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1055 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1056 list_del_rcu(&c->list); 1057 1058 ipmr_cache_free(c); 1059 return 0; 1060 } 1061 } 1062 return -ENOENT; 1063 } 1064 1065 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, 1066 struct mfcctl *mfc, int mrtsock) 1067 { 1068 bool found = false; 1069 int line; 1070 struct mfc_cache *uc, *c; 1071 1072 if (mfc->mfcc_parent >= MAXVIFS) 1073 return -ENFILE; 1074 1075 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1076 1077 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { 1078 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1079 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1080 found = true; 1081 break; 1082 } 1083 } 1084 1085 if (found) { 1086 write_lock_bh(&mrt_lock); 1087 c->mfc_parent = mfc->mfcc_parent; 1088 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); 1089 if (!mrtsock) 1090 c->mfc_flags |= MFC_STATIC; 1091 write_unlock_bh(&mrt_lock); 1092 return 0; 1093 } 1094 1095 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 1096 return -EINVAL; 1097 1098 c = ipmr_cache_alloc(); 1099 if (c == NULL) 1100 return -ENOMEM; 1101 1102 c->mfc_origin = mfc->mfcc_origin.s_addr; 1103 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 1104 c->mfc_parent = mfc->mfcc_parent; 1105 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); 1106 if (!mrtsock) 1107 c->mfc_flags |= MFC_STATIC; 1108 1109 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); 1110 1111 /* 1112 * Check to see if we resolved a queued list. If so we 1113 * need to send on the frames and tidy up. 1114 */ 1115 found = false; 1116 spin_lock_bh(&mfc_unres_lock); 1117 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { 1118 if (uc->mfc_origin == c->mfc_origin && 1119 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1120 list_del(&uc->list); 1121 atomic_dec(&mrt->cache_resolve_queue_len); 1122 found = true; 1123 break; 1124 } 1125 } 1126 if (list_empty(&mrt->mfc_unres_queue)) 1127 del_timer(&mrt->ipmr_expire_timer); 1128 spin_unlock_bh(&mfc_unres_lock); 1129 1130 if (found) { 1131 ipmr_cache_resolve(net, mrt, uc, c); 1132 ipmr_cache_free(uc); 1133 } 1134 return 0; 1135 } 1136 1137 /* 1138 * Close the multicast socket, and clear the vif tables etc 1139 */ 1140 1141 static void mroute_clean_tables(struct mr_table *mrt) 1142 { 1143 int i; 1144 LIST_HEAD(list); 1145 struct mfc_cache *c, *next; 1146 1147 /* Shut down all active vif entries */ 1148 1149 for (i = 0; i < mrt->maxvif; i++) { 1150 if (!(mrt->vif_table[i].flags & VIFF_STATIC)) 1151 vif_delete(mrt, i, 0, &list); 1152 } 1153 unregister_netdevice_many(&list); 1154 1155 /* Wipe the cache */ 1156 1157 for (i = 0; i < MFC_LINES; i++) { 1158 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1159 if (c->mfc_flags & MFC_STATIC) 1160 continue; 1161 list_del_rcu(&c->list); 1162 ipmr_cache_free(c); 1163 } 1164 } 1165 1166 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { 1167 spin_lock_bh(&mfc_unres_lock); 1168 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 1169 list_del(&c->list); 1170 ipmr_destroy_unres(mrt, c); 1171 } 1172 spin_unlock_bh(&mfc_unres_lock); 1173 } 1174 } 1175 1176 /* called from ip_ra_control(), before an RCU grace period, 1177 * we dont need to call synchronize_rcu() here 1178 */ 1179 static void mrtsock_destruct(struct sock *sk) 1180 { 1181 struct net *net = sock_net(sk); 1182 struct mr_table *mrt; 1183 1184 rtnl_lock(); 1185 ipmr_for_each_table(mrt, net) { 1186 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1187 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1188 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1189 mroute_clean_tables(mrt); 1190 } 1191 } 1192 rtnl_unlock(); 1193 } 1194 1195 /* 1196 * Socket options and virtual interface manipulation. The whole 1197 * virtual interface system is a complete heap, but unfortunately 1198 * that's how BSD mrouted happens to think. Maybe one day with a proper 1199 * MOSPF/PIM router set up we can clean this up. 1200 */ 1201 1202 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) 1203 { 1204 int ret; 1205 struct vifctl vif; 1206 struct mfcctl mfc; 1207 struct net *net = sock_net(sk); 1208 struct mr_table *mrt; 1209 1210 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1211 if (mrt == NULL) 1212 return -ENOENT; 1213 1214 if (optname != MRT_INIT) { 1215 if (sk != rcu_access_pointer(mrt->mroute_sk) && 1216 !capable(CAP_NET_ADMIN)) 1217 return -EACCES; 1218 } 1219 1220 switch (optname) { 1221 case MRT_INIT: 1222 if (sk->sk_type != SOCK_RAW || 1223 inet_sk(sk)->inet_num != IPPROTO_IGMP) 1224 return -EOPNOTSUPP; 1225 if (optlen != sizeof(int)) 1226 return -ENOPROTOOPT; 1227 1228 rtnl_lock(); 1229 if (rtnl_dereference(mrt->mroute_sk)) { 1230 rtnl_unlock(); 1231 return -EADDRINUSE; 1232 } 1233 1234 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1235 if (ret == 0) { 1236 rcu_assign_pointer(mrt->mroute_sk, sk); 1237 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1238 } 1239 rtnl_unlock(); 1240 return ret; 1241 case MRT_DONE: 1242 if (sk != rcu_access_pointer(mrt->mroute_sk)) 1243 return -EACCES; 1244 return ip_ra_control(sk, 0, NULL); 1245 case MRT_ADD_VIF: 1246 case MRT_DEL_VIF: 1247 if (optlen != sizeof(vif)) 1248 return -EINVAL; 1249 if (copy_from_user(&vif, optval, sizeof(vif))) 1250 return -EFAULT; 1251 if (vif.vifc_vifi >= MAXVIFS) 1252 return -ENFILE; 1253 rtnl_lock(); 1254 if (optname == MRT_ADD_VIF) { 1255 ret = vif_add(net, mrt, &vif, 1256 sk == rtnl_dereference(mrt->mroute_sk)); 1257 } else { 1258 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); 1259 } 1260 rtnl_unlock(); 1261 return ret; 1262 1263 /* 1264 * Manipulate the forwarding caches. These live 1265 * in a sort of kernel/user symbiosis. 1266 */ 1267 case MRT_ADD_MFC: 1268 case MRT_DEL_MFC: 1269 if (optlen != sizeof(mfc)) 1270 return -EINVAL; 1271 if (copy_from_user(&mfc, optval, sizeof(mfc))) 1272 return -EFAULT; 1273 rtnl_lock(); 1274 if (optname == MRT_DEL_MFC) 1275 ret = ipmr_mfc_delete(mrt, &mfc); 1276 else 1277 ret = ipmr_mfc_add(net, mrt, &mfc, 1278 sk == rtnl_dereference(mrt->mroute_sk)); 1279 rtnl_unlock(); 1280 return ret; 1281 /* 1282 * Control PIM assert. 1283 */ 1284 case MRT_ASSERT: 1285 { 1286 int v; 1287 if (get_user(v, (int __user *)optval)) 1288 return -EFAULT; 1289 mrt->mroute_do_assert = (v) ? 1 : 0; 1290 return 0; 1291 } 1292 #ifdef CONFIG_IP_PIMSM 1293 case MRT_PIM: 1294 { 1295 int v; 1296 1297 if (get_user(v, (int __user *)optval)) 1298 return -EFAULT; 1299 v = (v) ? 1 : 0; 1300 1301 rtnl_lock(); 1302 ret = 0; 1303 if (v != mrt->mroute_do_pim) { 1304 mrt->mroute_do_pim = v; 1305 mrt->mroute_do_assert = v; 1306 } 1307 rtnl_unlock(); 1308 return ret; 1309 } 1310 #endif 1311 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 1312 case MRT_TABLE: 1313 { 1314 u32 v; 1315 1316 if (optlen != sizeof(u32)) 1317 return -EINVAL; 1318 if (get_user(v, (u32 __user *)optval)) 1319 return -EFAULT; 1320 1321 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ 1322 if (v != RT_TABLE_DEFAULT && v >= 1000000000) 1323 return -EINVAL; 1324 1325 rtnl_lock(); 1326 ret = 0; 1327 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1328 ret = -EBUSY; 1329 } else { 1330 if (!ipmr_new_table(net, v)) 1331 ret = -ENOMEM; 1332 raw_sk(sk)->ipmr_table = v; 1333 } 1334 rtnl_unlock(); 1335 return ret; 1336 } 1337 #endif 1338 /* 1339 * Spurious command, or MRT_VERSION which you cannot 1340 * set. 1341 */ 1342 default: 1343 return -ENOPROTOOPT; 1344 } 1345 } 1346 1347 /* 1348 * Getsock opt support for the multicast routing system. 1349 */ 1350 1351 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) 1352 { 1353 int olr; 1354 int val; 1355 struct net *net = sock_net(sk); 1356 struct mr_table *mrt; 1357 1358 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1359 if (mrt == NULL) 1360 return -ENOENT; 1361 1362 if (optname != MRT_VERSION && 1363 #ifdef CONFIG_IP_PIMSM 1364 optname != MRT_PIM && 1365 #endif 1366 optname != MRT_ASSERT) 1367 return -ENOPROTOOPT; 1368 1369 if (get_user(olr, optlen)) 1370 return -EFAULT; 1371 1372 olr = min_t(unsigned int, olr, sizeof(int)); 1373 if (olr < 0) 1374 return -EINVAL; 1375 1376 if (put_user(olr, optlen)) 1377 return -EFAULT; 1378 if (optname == MRT_VERSION) 1379 val = 0x0305; 1380 #ifdef CONFIG_IP_PIMSM 1381 else if (optname == MRT_PIM) 1382 val = mrt->mroute_do_pim; 1383 #endif 1384 else 1385 val = mrt->mroute_do_assert; 1386 if (copy_to_user(optval, &val, olr)) 1387 return -EFAULT; 1388 return 0; 1389 } 1390 1391 /* 1392 * The IP multicast ioctl support routines. 1393 */ 1394 1395 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) 1396 { 1397 struct sioc_sg_req sr; 1398 struct sioc_vif_req vr; 1399 struct vif_device *vif; 1400 struct mfc_cache *c; 1401 struct net *net = sock_net(sk); 1402 struct mr_table *mrt; 1403 1404 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1405 if (mrt == NULL) 1406 return -ENOENT; 1407 1408 switch (cmd) { 1409 case SIOCGETVIFCNT: 1410 if (copy_from_user(&vr, arg, sizeof(vr))) 1411 return -EFAULT; 1412 if (vr.vifi >= mrt->maxvif) 1413 return -EINVAL; 1414 read_lock(&mrt_lock); 1415 vif = &mrt->vif_table[vr.vifi]; 1416 if (VIF_EXISTS(mrt, vr.vifi)) { 1417 vr.icount = vif->pkt_in; 1418 vr.ocount = vif->pkt_out; 1419 vr.ibytes = vif->bytes_in; 1420 vr.obytes = vif->bytes_out; 1421 read_unlock(&mrt_lock); 1422 1423 if (copy_to_user(arg, &vr, sizeof(vr))) 1424 return -EFAULT; 1425 return 0; 1426 } 1427 read_unlock(&mrt_lock); 1428 return -EADDRNOTAVAIL; 1429 case SIOCGETSGCNT: 1430 if (copy_from_user(&sr, arg, sizeof(sr))) 1431 return -EFAULT; 1432 1433 rcu_read_lock(); 1434 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1435 if (c) { 1436 sr.pktcnt = c->mfc_un.res.pkt; 1437 sr.bytecnt = c->mfc_un.res.bytes; 1438 sr.wrong_if = c->mfc_un.res.wrong_if; 1439 rcu_read_unlock(); 1440 1441 if (copy_to_user(arg, &sr, sizeof(sr))) 1442 return -EFAULT; 1443 return 0; 1444 } 1445 rcu_read_unlock(); 1446 return -EADDRNOTAVAIL; 1447 default: 1448 return -ENOIOCTLCMD; 1449 } 1450 } 1451 1452 #ifdef CONFIG_COMPAT 1453 struct compat_sioc_sg_req { 1454 struct in_addr src; 1455 struct in_addr grp; 1456 compat_ulong_t pktcnt; 1457 compat_ulong_t bytecnt; 1458 compat_ulong_t wrong_if; 1459 }; 1460 1461 struct compat_sioc_vif_req { 1462 vifi_t vifi; /* Which iface */ 1463 compat_ulong_t icount; 1464 compat_ulong_t ocount; 1465 compat_ulong_t ibytes; 1466 compat_ulong_t obytes; 1467 }; 1468 1469 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 1470 { 1471 struct compat_sioc_sg_req sr; 1472 struct compat_sioc_vif_req vr; 1473 struct vif_device *vif; 1474 struct mfc_cache *c; 1475 struct net *net = sock_net(sk); 1476 struct mr_table *mrt; 1477 1478 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1479 if (mrt == NULL) 1480 return -ENOENT; 1481 1482 switch (cmd) { 1483 case SIOCGETVIFCNT: 1484 if (copy_from_user(&vr, arg, sizeof(vr))) 1485 return -EFAULT; 1486 if (vr.vifi >= mrt->maxvif) 1487 return -EINVAL; 1488 read_lock(&mrt_lock); 1489 vif = &mrt->vif_table[vr.vifi]; 1490 if (VIF_EXISTS(mrt, vr.vifi)) { 1491 vr.icount = vif->pkt_in; 1492 vr.ocount = vif->pkt_out; 1493 vr.ibytes = vif->bytes_in; 1494 vr.obytes = vif->bytes_out; 1495 read_unlock(&mrt_lock); 1496 1497 if (copy_to_user(arg, &vr, sizeof(vr))) 1498 return -EFAULT; 1499 return 0; 1500 } 1501 read_unlock(&mrt_lock); 1502 return -EADDRNOTAVAIL; 1503 case SIOCGETSGCNT: 1504 if (copy_from_user(&sr, arg, sizeof(sr))) 1505 return -EFAULT; 1506 1507 rcu_read_lock(); 1508 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1509 if (c) { 1510 sr.pktcnt = c->mfc_un.res.pkt; 1511 sr.bytecnt = c->mfc_un.res.bytes; 1512 sr.wrong_if = c->mfc_un.res.wrong_if; 1513 rcu_read_unlock(); 1514 1515 if (copy_to_user(arg, &sr, sizeof(sr))) 1516 return -EFAULT; 1517 return 0; 1518 } 1519 rcu_read_unlock(); 1520 return -EADDRNOTAVAIL; 1521 default: 1522 return -ENOIOCTLCMD; 1523 } 1524 } 1525 #endif 1526 1527 1528 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1529 { 1530 struct net_device *dev = ptr; 1531 struct net *net = dev_net(dev); 1532 struct mr_table *mrt; 1533 struct vif_device *v; 1534 int ct; 1535 1536 if (event != NETDEV_UNREGISTER) 1537 return NOTIFY_DONE; 1538 1539 ipmr_for_each_table(mrt, net) { 1540 v = &mrt->vif_table[0]; 1541 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1542 if (v->dev == dev) 1543 vif_delete(mrt, ct, 1, NULL); 1544 } 1545 } 1546 return NOTIFY_DONE; 1547 } 1548 1549 1550 static struct notifier_block ip_mr_notifier = { 1551 .notifier_call = ipmr_device_event, 1552 }; 1553 1554 /* 1555 * Encapsulate a packet by attaching a valid IPIP header to it. 1556 * This avoids tunnel drivers and other mess and gives us the speed so 1557 * important for multicast video. 1558 */ 1559 1560 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1561 { 1562 struct iphdr *iph; 1563 const struct iphdr *old_iph = ip_hdr(skb); 1564 1565 skb_push(skb, sizeof(struct iphdr)); 1566 skb->transport_header = skb->network_header; 1567 skb_reset_network_header(skb); 1568 iph = ip_hdr(skb); 1569 1570 iph->version = 4; 1571 iph->tos = old_iph->tos; 1572 iph->ttl = old_iph->ttl; 1573 iph->frag_off = 0; 1574 iph->daddr = daddr; 1575 iph->saddr = saddr; 1576 iph->protocol = IPPROTO_IPIP; 1577 iph->ihl = 5; 1578 iph->tot_len = htons(skb->len); 1579 ip_select_ident(iph, skb_dst(skb), NULL); 1580 ip_send_check(iph); 1581 1582 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1583 nf_reset(skb); 1584 } 1585 1586 static inline int ipmr_forward_finish(struct sk_buff *skb) 1587 { 1588 struct ip_options *opt = &(IPCB(skb)->opt); 1589 1590 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1591 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); 1592 1593 if (unlikely(opt->optlen)) 1594 ip_forward_options(skb); 1595 1596 return dst_output(skb); 1597 } 1598 1599 /* 1600 * Processing handlers for ipmr_forward 1601 */ 1602 1603 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, 1604 struct sk_buff *skb, struct mfc_cache *c, int vifi) 1605 { 1606 const struct iphdr *iph = ip_hdr(skb); 1607 struct vif_device *vif = &mrt->vif_table[vifi]; 1608 struct net_device *dev; 1609 struct rtable *rt; 1610 struct flowi4 fl4; 1611 int encap = 0; 1612 1613 if (vif->dev == NULL) 1614 goto out_free; 1615 1616 #ifdef CONFIG_IP_PIMSM 1617 if (vif->flags & VIFF_REGISTER) { 1618 vif->pkt_out++; 1619 vif->bytes_out += skb->len; 1620 vif->dev->stats.tx_bytes += skb->len; 1621 vif->dev->stats.tx_packets++; 1622 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); 1623 goto out_free; 1624 } 1625 #endif 1626 1627 if (vif->flags & VIFF_TUNNEL) { 1628 rt = ip_route_output_ports(net, &fl4, NULL, 1629 vif->remote, vif->local, 1630 0, 0, 1631 IPPROTO_IPIP, 1632 RT_TOS(iph->tos), vif->link); 1633 if (IS_ERR(rt)) 1634 goto out_free; 1635 encap = sizeof(struct iphdr); 1636 } else { 1637 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, 1638 0, 0, 1639 IPPROTO_IPIP, 1640 RT_TOS(iph->tos), vif->link); 1641 if (IS_ERR(rt)) 1642 goto out_free; 1643 } 1644 1645 dev = rt->dst.dev; 1646 1647 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { 1648 /* Do not fragment multicasts. Alas, IPv4 does not 1649 * allow to send ICMP, so that packets will disappear 1650 * to blackhole. 1651 */ 1652 1653 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 1654 ip_rt_put(rt); 1655 goto out_free; 1656 } 1657 1658 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; 1659 1660 if (skb_cow(skb, encap)) { 1661 ip_rt_put(rt); 1662 goto out_free; 1663 } 1664 1665 vif->pkt_out++; 1666 vif->bytes_out += skb->len; 1667 1668 skb_dst_drop(skb); 1669 skb_dst_set(skb, &rt->dst); 1670 ip_decrease_ttl(ip_hdr(skb)); 1671 1672 /* FIXME: forward and output firewalls used to be called here. 1673 * What do we do with netfilter? -- RR 1674 */ 1675 if (vif->flags & VIFF_TUNNEL) { 1676 ip_encap(skb, vif->local, vif->remote); 1677 /* FIXME: extra output firewall step used to be here. --RR */ 1678 vif->dev->stats.tx_packets++; 1679 vif->dev->stats.tx_bytes += skb->len; 1680 } 1681 1682 IPCB(skb)->flags |= IPSKB_FORWARDED; 1683 1684 /* 1685 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1686 * not only before forwarding, but after forwarding on all output 1687 * interfaces. It is clear, if mrouter runs a multicasting 1688 * program, it should receive packets not depending to what interface 1689 * program is joined. 1690 * If we will not make it, the program will have to join on all 1691 * interfaces. On the other hand, multihoming host (or router, but 1692 * not mrouter) cannot join to more than one interface - it will 1693 * result in receiving multiple packets. 1694 */ 1695 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, 1696 ipmr_forward_finish); 1697 return; 1698 1699 out_free: 1700 kfree_skb(skb); 1701 } 1702 1703 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) 1704 { 1705 int ct; 1706 1707 for (ct = mrt->maxvif-1; ct >= 0; ct--) { 1708 if (mrt->vif_table[ct].dev == dev) 1709 break; 1710 } 1711 return ct; 1712 } 1713 1714 /* "local" means that we should preserve one skb (for local delivery) */ 1715 1716 static int ip_mr_forward(struct net *net, struct mr_table *mrt, 1717 struct sk_buff *skb, struct mfc_cache *cache, 1718 int local) 1719 { 1720 int psend = -1; 1721 int vif, ct; 1722 1723 vif = cache->mfc_parent; 1724 cache->mfc_un.res.pkt++; 1725 cache->mfc_un.res.bytes += skb->len; 1726 1727 /* 1728 * Wrong interface: drop packet and (maybe) send PIM assert. 1729 */ 1730 if (mrt->vif_table[vif].dev != skb->dev) { 1731 int true_vifi; 1732 1733 if (rt_is_output_route(skb_rtable(skb))) { 1734 /* It is our own packet, looped back. 1735 * Very complicated situation... 1736 * 1737 * The best workaround until routing daemons will be 1738 * fixed is not to redistribute packet, if it was 1739 * send through wrong interface. It means, that 1740 * multicast applications WILL NOT work for 1741 * (S,G), which have default multicast route pointing 1742 * to wrong oif. In any case, it is not a good 1743 * idea to use multicasting applications on router. 1744 */ 1745 goto dont_forward; 1746 } 1747 1748 cache->mfc_un.res.wrong_if++; 1749 true_vifi = ipmr_find_vif(mrt, skb->dev); 1750 1751 if (true_vifi >= 0 && mrt->mroute_do_assert && 1752 /* pimsm uses asserts, when switching from RPT to SPT, 1753 * so that we cannot check that packet arrived on an oif. 1754 * It is bad, but otherwise we would need to move pretty 1755 * large chunk of pimd to kernel. Ough... --ANK 1756 */ 1757 (mrt->mroute_do_pim || 1758 cache->mfc_un.res.ttls[true_vifi] < 255) && 1759 time_after(jiffies, 1760 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1761 cache->mfc_un.res.last_assert = jiffies; 1762 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); 1763 } 1764 goto dont_forward; 1765 } 1766 1767 mrt->vif_table[vif].pkt_in++; 1768 mrt->vif_table[vif].bytes_in += skb->len; 1769 1770 /* 1771 * Forward the frame 1772 */ 1773 for (ct = cache->mfc_un.res.maxvif - 1; 1774 ct >= cache->mfc_un.res.minvif; ct--) { 1775 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { 1776 if (psend != -1) { 1777 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1778 1779 if (skb2) 1780 ipmr_queue_xmit(net, mrt, skb2, cache, 1781 psend); 1782 } 1783 psend = ct; 1784 } 1785 } 1786 if (psend != -1) { 1787 if (local) { 1788 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1789 1790 if (skb2) 1791 ipmr_queue_xmit(net, mrt, skb2, cache, psend); 1792 } else { 1793 ipmr_queue_xmit(net, mrt, skb, cache, psend); 1794 return 0; 1795 } 1796 } 1797 1798 dont_forward: 1799 if (!local) 1800 kfree_skb(skb); 1801 return 0; 1802 } 1803 1804 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb) 1805 { 1806 struct rtable *rt = skb_rtable(skb); 1807 struct iphdr *iph = ip_hdr(skb); 1808 struct flowi4 fl4 = { 1809 .daddr = iph->daddr, 1810 .saddr = iph->saddr, 1811 .flowi4_tos = RT_TOS(iph->tos), 1812 .flowi4_oif = (rt_is_output_route(rt) ? 1813 skb->dev->ifindex : 0), 1814 .flowi4_iif = (rt_is_output_route(rt) ? 1815 LOOPBACK_IFINDEX : 1816 skb->dev->ifindex), 1817 .flowi4_mark = skb->mark, 1818 }; 1819 struct mr_table *mrt; 1820 int err; 1821 1822 err = ipmr_fib_lookup(net, &fl4, &mrt); 1823 if (err) 1824 return ERR_PTR(err); 1825 return mrt; 1826 } 1827 1828 /* 1829 * Multicast packets for forwarding arrive here 1830 * Called with rcu_read_lock(); 1831 */ 1832 1833 int ip_mr_input(struct sk_buff *skb) 1834 { 1835 struct mfc_cache *cache; 1836 struct net *net = dev_net(skb->dev); 1837 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1838 struct mr_table *mrt; 1839 1840 /* Packet is looped back after forward, it should not be 1841 * forwarded second time, but still can be delivered locally. 1842 */ 1843 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1844 goto dont_forward; 1845 1846 mrt = ipmr_rt_fib_lookup(net, skb); 1847 if (IS_ERR(mrt)) { 1848 kfree_skb(skb); 1849 return PTR_ERR(mrt); 1850 } 1851 if (!local) { 1852 if (IPCB(skb)->opt.router_alert) { 1853 if (ip_call_ra_chain(skb)) 1854 return 0; 1855 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) { 1856 /* IGMPv1 (and broken IGMPv2 implementations sort of 1857 * Cisco IOS <= 11.2(8)) do not put router alert 1858 * option to IGMP packets destined to routable 1859 * groups. It is very bad, because it means 1860 * that we can forward NO IGMP messages. 1861 */ 1862 struct sock *mroute_sk; 1863 1864 mroute_sk = rcu_dereference(mrt->mroute_sk); 1865 if (mroute_sk) { 1866 nf_reset(skb); 1867 raw_rcv(mroute_sk, skb); 1868 return 0; 1869 } 1870 } 1871 } 1872 1873 /* already under rcu_read_lock() */ 1874 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1875 1876 /* 1877 * No usable cache entry 1878 */ 1879 if (cache == NULL) { 1880 int vif; 1881 1882 if (local) { 1883 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1884 ip_local_deliver(skb); 1885 if (skb2 == NULL) 1886 return -ENOBUFS; 1887 skb = skb2; 1888 } 1889 1890 read_lock(&mrt_lock); 1891 vif = ipmr_find_vif(mrt, skb->dev); 1892 if (vif >= 0) { 1893 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 1894 read_unlock(&mrt_lock); 1895 1896 return err2; 1897 } 1898 read_unlock(&mrt_lock); 1899 kfree_skb(skb); 1900 return -ENODEV; 1901 } 1902 1903 read_lock(&mrt_lock); 1904 ip_mr_forward(net, mrt, skb, cache, local); 1905 read_unlock(&mrt_lock); 1906 1907 if (local) 1908 return ip_local_deliver(skb); 1909 1910 return 0; 1911 1912 dont_forward: 1913 if (local) 1914 return ip_local_deliver(skb); 1915 kfree_skb(skb); 1916 return 0; 1917 } 1918 1919 #ifdef CONFIG_IP_PIMSM 1920 /* called with rcu_read_lock() */ 1921 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, 1922 unsigned int pimlen) 1923 { 1924 struct net_device *reg_dev = NULL; 1925 struct iphdr *encap; 1926 1927 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1928 /* 1929 * Check that: 1930 * a. packet is really sent to a multicast group 1931 * b. packet is not a NULL-REGISTER 1932 * c. packet is not truncated 1933 */ 1934 if (!ipv4_is_multicast(encap->daddr) || 1935 encap->tot_len == 0 || 1936 ntohs(encap->tot_len) + pimlen > skb->len) 1937 return 1; 1938 1939 read_lock(&mrt_lock); 1940 if (mrt->mroute_reg_vif_num >= 0) 1941 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; 1942 read_unlock(&mrt_lock); 1943 1944 if (reg_dev == NULL) 1945 return 1; 1946 1947 skb->mac_header = skb->network_header; 1948 skb_pull(skb, (u8 *)encap - skb->data); 1949 skb_reset_network_header(skb); 1950 skb->protocol = htons(ETH_P_IP); 1951 skb->ip_summed = CHECKSUM_NONE; 1952 skb->pkt_type = PACKET_HOST; 1953 1954 skb_tunnel_rx(skb, reg_dev); 1955 1956 netif_rx(skb); 1957 1958 return NET_RX_SUCCESS; 1959 } 1960 #endif 1961 1962 #ifdef CONFIG_IP_PIMSM_V1 1963 /* 1964 * Handle IGMP messages of PIMv1 1965 */ 1966 1967 int pim_rcv_v1(struct sk_buff *skb) 1968 { 1969 struct igmphdr *pim; 1970 struct net *net = dev_net(skb->dev); 1971 struct mr_table *mrt; 1972 1973 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1974 goto drop; 1975 1976 pim = igmp_hdr(skb); 1977 1978 mrt = ipmr_rt_fib_lookup(net, skb); 1979 if (IS_ERR(mrt)) 1980 goto drop; 1981 if (!mrt->mroute_do_pim || 1982 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1983 goto drop; 1984 1985 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 1986 drop: 1987 kfree_skb(skb); 1988 } 1989 return 0; 1990 } 1991 #endif 1992 1993 #ifdef CONFIG_IP_PIMSM_V2 1994 static int pim_rcv(struct sk_buff *skb) 1995 { 1996 struct pimreghdr *pim; 1997 struct net *net = dev_net(skb->dev); 1998 struct mr_table *mrt; 1999 2000 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 2001 goto drop; 2002 2003 pim = (struct pimreghdr *)skb_transport_header(skb); 2004 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) || 2005 (pim->flags & PIM_NULL_REGISTER) || 2006 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 2007 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 2008 goto drop; 2009 2010 mrt = ipmr_rt_fib_lookup(net, skb); 2011 if (IS_ERR(mrt)) 2012 goto drop; 2013 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 2014 drop: 2015 kfree_skb(skb); 2016 } 2017 return 0; 2018 } 2019 #endif 2020 2021 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2022 struct mfc_cache *c, struct rtmsg *rtm) 2023 { 2024 int ct; 2025 struct rtnexthop *nhp; 2026 struct nlattr *mp_attr; 2027 2028 /* If cache is unresolved, don't try to parse IIF and OIF */ 2029 if (c->mfc_parent >= MAXVIFS) 2030 return -ENOENT; 2031 2032 if (VIF_EXISTS(mrt, c->mfc_parent) && 2033 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0) 2034 return -EMSGSIZE; 2035 2036 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH))) 2037 return -EMSGSIZE; 2038 2039 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 2040 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { 2041 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) { 2042 nla_nest_cancel(skb, mp_attr); 2043 return -EMSGSIZE; 2044 } 2045 2046 nhp->rtnh_flags = 0; 2047 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 2048 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; 2049 nhp->rtnh_len = sizeof(*nhp); 2050 } 2051 } 2052 2053 nla_nest_end(skb, mp_attr); 2054 2055 rtm->rtm_type = RTN_MULTICAST; 2056 return 1; 2057 } 2058 2059 int ipmr_get_route(struct net *net, struct sk_buff *skb, 2060 __be32 saddr, __be32 daddr, 2061 struct rtmsg *rtm, int nowait) 2062 { 2063 struct mfc_cache *cache; 2064 struct mr_table *mrt; 2065 int err; 2066 2067 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2068 if (mrt == NULL) 2069 return -ENOENT; 2070 2071 rcu_read_lock(); 2072 cache = ipmr_cache_find(mrt, saddr, daddr); 2073 2074 if (cache == NULL) { 2075 struct sk_buff *skb2; 2076 struct iphdr *iph; 2077 struct net_device *dev; 2078 int vif = -1; 2079 2080 if (nowait) { 2081 rcu_read_unlock(); 2082 return -EAGAIN; 2083 } 2084 2085 dev = skb->dev; 2086 read_lock(&mrt_lock); 2087 if (dev) 2088 vif = ipmr_find_vif(mrt, dev); 2089 if (vif < 0) { 2090 read_unlock(&mrt_lock); 2091 rcu_read_unlock(); 2092 return -ENODEV; 2093 } 2094 skb2 = skb_clone(skb, GFP_ATOMIC); 2095 if (!skb2) { 2096 read_unlock(&mrt_lock); 2097 rcu_read_unlock(); 2098 return -ENOMEM; 2099 } 2100 2101 skb_push(skb2, sizeof(struct iphdr)); 2102 skb_reset_network_header(skb2); 2103 iph = ip_hdr(skb2); 2104 iph->ihl = sizeof(struct iphdr) >> 2; 2105 iph->saddr = saddr; 2106 iph->daddr = daddr; 2107 iph->version = 0; 2108 err = ipmr_cache_unresolved(mrt, vif, skb2); 2109 read_unlock(&mrt_lock); 2110 rcu_read_unlock(); 2111 return err; 2112 } 2113 2114 read_lock(&mrt_lock); 2115 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY)) 2116 cache->mfc_flags |= MFC_NOTIFY; 2117 err = __ipmr_fill_mroute(mrt, skb, cache, rtm); 2118 read_unlock(&mrt_lock); 2119 rcu_read_unlock(); 2120 return err; 2121 } 2122 2123 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2124 u32 portid, u32 seq, struct mfc_cache *c) 2125 { 2126 struct nlmsghdr *nlh; 2127 struct rtmsg *rtm; 2128 2129 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2130 if (nlh == NULL) 2131 return -EMSGSIZE; 2132 2133 rtm = nlmsg_data(nlh); 2134 rtm->rtm_family = RTNL_FAMILY_IPMR; 2135 rtm->rtm_dst_len = 32; 2136 rtm->rtm_src_len = 32; 2137 rtm->rtm_tos = 0; 2138 rtm->rtm_table = mrt->id; 2139 if (nla_put_u32(skb, RTA_TABLE, mrt->id)) 2140 goto nla_put_failure; 2141 rtm->rtm_type = RTN_MULTICAST; 2142 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2143 rtm->rtm_protocol = RTPROT_UNSPEC; 2144 rtm->rtm_flags = 0; 2145 2146 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) || 2147 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp)) 2148 goto nla_put_failure; 2149 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) 2150 goto nla_put_failure; 2151 2152 return nlmsg_end(skb, nlh); 2153 2154 nla_put_failure: 2155 nlmsg_cancel(skb, nlh); 2156 return -EMSGSIZE; 2157 } 2158 2159 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2160 { 2161 struct net *net = sock_net(skb->sk); 2162 struct mr_table *mrt; 2163 struct mfc_cache *mfc; 2164 unsigned int t = 0, s_t; 2165 unsigned int h = 0, s_h; 2166 unsigned int e = 0, s_e; 2167 2168 s_t = cb->args[0]; 2169 s_h = cb->args[1]; 2170 s_e = cb->args[2]; 2171 2172 rcu_read_lock(); 2173 ipmr_for_each_table(mrt, net) { 2174 if (t < s_t) 2175 goto next_table; 2176 if (t > s_t) 2177 s_h = 0; 2178 for (h = s_h; h < MFC_LINES; h++) { 2179 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) { 2180 if (e < s_e) 2181 goto next_entry; 2182 if (ipmr_fill_mroute(mrt, skb, 2183 NETLINK_CB(cb->skb).portid, 2184 cb->nlh->nlmsg_seq, 2185 mfc) < 0) 2186 goto done; 2187 next_entry: 2188 e++; 2189 } 2190 e = s_e = 0; 2191 } 2192 s_h = 0; 2193 next_table: 2194 t++; 2195 } 2196 done: 2197 rcu_read_unlock(); 2198 2199 cb->args[2] = e; 2200 cb->args[1] = h; 2201 cb->args[0] = t; 2202 2203 return skb->len; 2204 } 2205 2206 #ifdef CONFIG_PROC_FS 2207 /* 2208 * The /proc interfaces to multicast routing : 2209 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif 2210 */ 2211 struct ipmr_vif_iter { 2212 struct seq_net_private p; 2213 struct mr_table *mrt; 2214 int ct; 2215 }; 2216 2217 static struct vif_device *ipmr_vif_seq_idx(struct net *net, 2218 struct ipmr_vif_iter *iter, 2219 loff_t pos) 2220 { 2221 struct mr_table *mrt = iter->mrt; 2222 2223 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { 2224 if (!VIF_EXISTS(mrt, iter->ct)) 2225 continue; 2226 if (pos-- == 0) 2227 return &mrt->vif_table[iter->ct]; 2228 } 2229 return NULL; 2230 } 2231 2232 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 2233 __acquires(mrt_lock) 2234 { 2235 struct ipmr_vif_iter *iter = seq->private; 2236 struct net *net = seq_file_net(seq); 2237 struct mr_table *mrt; 2238 2239 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2240 if (mrt == NULL) 2241 return ERR_PTR(-ENOENT); 2242 2243 iter->mrt = mrt; 2244 2245 read_lock(&mrt_lock); 2246 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) 2247 : SEQ_START_TOKEN; 2248 } 2249 2250 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2251 { 2252 struct ipmr_vif_iter *iter = seq->private; 2253 struct net *net = seq_file_net(seq); 2254 struct mr_table *mrt = iter->mrt; 2255 2256 ++*pos; 2257 if (v == SEQ_START_TOKEN) 2258 return ipmr_vif_seq_idx(net, iter, 0); 2259 2260 while (++iter->ct < mrt->maxvif) { 2261 if (!VIF_EXISTS(mrt, iter->ct)) 2262 continue; 2263 return &mrt->vif_table[iter->ct]; 2264 } 2265 return NULL; 2266 } 2267 2268 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) 2269 __releases(mrt_lock) 2270 { 2271 read_unlock(&mrt_lock); 2272 } 2273 2274 static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 2275 { 2276 struct ipmr_vif_iter *iter = seq->private; 2277 struct mr_table *mrt = iter->mrt; 2278 2279 if (v == SEQ_START_TOKEN) { 2280 seq_puts(seq, 2281 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); 2282 } else { 2283 const struct vif_device *vif = v; 2284 const char *name = vif->dev ? vif->dev->name : "none"; 2285 2286 seq_printf(seq, 2287 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 2288 vif - mrt->vif_table, 2289 name, vif->bytes_in, vif->pkt_in, 2290 vif->bytes_out, vif->pkt_out, 2291 vif->flags, vif->local, vif->remote); 2292 } 2293 return 0; 2294 } 2295 2296 static const struct seq_operations ipmr_vif_seq_ops = { 2297 .start = ipmr_vif_seq_start, 2298 .next = ipmr_vif_seq_next, 2299 .stop = ipmr_vif_seq_stop, 2300 .show = ipmr_vif_seq_show, 2301 }; 2302 2303 static int ipmr_vif_open(struct inode *inode, struct file *file) 2304 { 2305 return seq_open_net(inode, file, &ipmr_vif_seq_ops, 2306 sizeof(struct ipmr_vif_iter)); 2307 } 2308 2309 static const struct file_operations ipmr_vif_fops = { 2310 .owner = THIS_MODULE, 2311 .open = ipmr_vif_open, 2312 .read = seq_read, 2313 .llseek = seq_lseek, 2314 .release = seq_release_net, 2315 }; 2316 2317 struct ipmr_mfc_iter { 2318 struct seq_net_private p; 2319 struct mr_table *mrt; 2320 struct list_head *cache; 2321 int ct; 2322 }; 2323 2324 2325 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, 2326 struct ipmr_mfc_iter *it, loff_t pos) 2327 { 2328 struct mr_table *mrt = it->mrt; 2329 struct mfc_cache *mfc; 2330 2331 rcu_read_lock(); 2332 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { 2333 it->cache = &mrt->mfc_cache_array[it->ct]; 2334 list_for_each_entry_rcu(mfc, it->cache, list) 2335 if (pos-- == 0) 2336 return mfc; 2337 } 2338 rcu_read_unlock(); 2339 2340 spin_lock_bh(&mfc_unres_lock); 2341 it->cache = &mrt->mfc_unres_queue; 2342 list_for_each_entry(mfc, it->cache, list) 2343 if (pos-- == 0) 2344 return mfc; 2345 spin_unlock_bh(&mfc_unres_lock); 2346 2347 it->cache = NULL; 2348 return NULL; 2349 } 2350 2351 2352 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 2353 { 2354 struct ipmr_mfc_iter *it = seq->private; 2355 struct net *net = seq_file_net(seq); 2356 struct mr_table *mrt; 2357 2358 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2359 if (mrt == NULL) 2360 return ERR_PTR(-ENOENT); 2361 2362 it->mrt = mrt; 2363 it->cache = NULL; 2364 it->ct = 0; 2365 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) 2366 : SEQ_START_TOKEN; 2367 } 2368 2369 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2370 { 2371 struct mfc_cache *mfc = v; 2372 struct ipmr_mfc_iter *it = seq->private; 2373 struct net *net = seq_file_net(seq); 2374 struct mr_table *mrt = it->mrt; 2375 2376 ++*pos; 2377 2378 if (v == SEQ_START_TOKEN) 2379 return ipmr_mfc_seq_idx(net, seq->private, 0); 2380 2381 if (mfc->list.next != it->cache) 2382 return list_entry(mfc->list.next, struct mfc_cache, list); 2383 2384 if (it->cache == &mrt->mfc_unres_queue) 2385 goto end_of_list; 2386 2387 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); 2388 2389 while (++it->ct < MFC_LINES) { 2390 it->cache = &mrt->mfc_cache_array[it->ct]; 2391 if (list_empty(it->cache)) 2392 continue; 2393 return list_first_entry(it->cache, struct mfc_cache, list); 2394 } 2395 2396 /* exhausted cache_array, show unresolved */ 2397 rcu_read_unlock(); 2398 it->cache = &mrt->mfc_unres_queue; 2399 it->ct = 0; 2400 2401 spin_lock_bh(&mfc_unres_lock); 2402 if (!list_empty(it->cache)) 2403 return list_first_entry(it->cache, struct mfc_cache, list); 2404 2405 end_of_list: 2406 spin_unlock_bh(&mfc_unres_lock); 2407 it->cache = NULL; 2408 2409 return NULL; 2410 } 2411 2412 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) 2413 { 2414 struct ipmr_mfc_iter *it = seq->private; 2415 struct mr_table *mrt = it->mrt; 2416 2417 if (it->cache == &mrt->mfc_unres_queue) 2418 spin_unlock_bh(&mfc_unres_lock); 2419 else if (it->cache == &mrt->mfc_cache_array[it->ct]) 2420 rcu_read_unlock(); 2421 } 2422 2423 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 2424 { 2425 int n; 2426 2427 if (v == SEQ_START_TOKEN) { 2428 seq_puts(seq, 2429 "Group Origin Iif Pkts Bytes Wrong Oifs\n"); 2430 } else { 2431 const struct mfc_cache *mfc = v; 2432 const struct ipmr_mfc_iter *it = seq->private; 2433 const struct mr_table *mrt = it->mrt; 2434 2435 seq_printf(seq, "%08X %08X %-3hd", 2436 (__force u32) mfc->mfc_mcastgrp, 2437 (__force u32) mfc->mfc_origin, 2438 mfc->mfc_parent); 2439 2440 if (it->cache != &mrt->mfc_unres_queue) { 2441 seq_printf(seq, " %8lu %8lu %8lu", 2442 mfc->mfc_un.res.pkt, 2443 mfc->mfc_un.res.bytes, 2444 mfc->mfc_un.res.wrong_if); 2445 for (n = mfc->mfc_un.res.minvif; 2446 n < mfc->mfc_un.res.maxvif; n++) { 2447 if (VIF_EXISTS(mrt, n) && 2448 mfc->mfc_un.res.ttls[n] < 255) 2449 seq_printf(seq, 2450 " %2d:%-3d", 2451 n, mfc->mfc_un.res.ttls[n]); 2452 } 2453 } else { 2454 /* unresolved mfc_caches don't contain 2455 * pkt, bytes and wrong_if values 2456 */ 2457 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); 2458 } 2459 seq_putc(seq, '\n'); 2460 } 2461 return 0; 2462 } 2463 2464 static const struct seq_operations ipmr_mfc_seq_ops = { 2465 .start = ipmr_mfc_seq_start, 2466 .next = ipmr_mfc_seq_next, 2467 .stop = ipmr_mfc_seq_stop, 2468 .show = ipmr_mfc_seq_show, 2469 }; 2470 2471 static int ipmr_mfc_open(struct inode *inode, struct file *file) 2472 { 2473 return seq_open_net(inode, file, &ipmr_mfc_seq_ops, 2474 sizeof(struct ipmr_mfc_iter)); 2475 } 2476 2477 static const struct file_operations ipmr_mfc_fops = { 2478 .owner = THIS_MODULE, 2479 .open = ipmr_mfc_open, 2480 .read = seq_read, 2481 .llseek = seq_lseek, 2482 .release = seq_release_net, 2483 }; 2484 #endif 2485 2486 #ifdef CONFIG_IP_PIMSM_V2 2487 static const struct net_protocol pim_protocol = { 2488 .handler = pim_rcv, 2489 .netns_ok = 1, 2490 }; 2491 #endif 2492 2493 2494 /* 2495 * Setup for IP multicast routing 2496 */ 2497 static int __net_init ipmr_net_init(struct net *net) 2498 { 2499 int err; 2500 2501 err = ipmr_rules_init(net); 2502 if (err < 0) 2503 goto fail; 2504 2505 #ifdef CONFIG_PROC_FS 2506 err = -ENOMEM; 2507 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) 2508 goto proc_vif_fail; 2509 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) 2510 goto proc_cache_fail; 2511 #endif 2512 return 0; 2513 2514 #ifdef CONFIG_PROC_FS 2515 proc_cache_fail: 2516 proc_net_remove(net, "ip_mr_vif"); 2517 proc_vif_fail: 2518 ipmr_rules_exit(net); 2519 #endif 2520 fail: 2521 return err; 2522 } 2523 2524 static void __net_exit ipmr_net_exit(struct net *net) 2525 { 2526 #ifdef CONFIG_PROC_FS 2527 proc_net_remove(net, "ip_mr_cache"); 2528 proc_net_remove(net, "ip_mr_vif"); 2529 #endif 2530 ipmr_rules_exit(net); 2531 } 2532 2533 static struct pernet_operations ipmr_net_ops = { 2534 .init = ipmr_net_init, 2535 .exit = ipmr_net_exit, 2536 }; 2537 2538 int __init ip_mr_init(void) 2539 { 2540 int err; 2541 2542 mrt_cachep = kmem_cache_create("ip_mrt_cache", 2543 sizeof(struct mfc_cache), 2544 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 2545 NULL); 2546 if (!mrt_cachep) 2547 return -ENOMEM; 2548 2549 err = register_pernet_subsys(&ipmr_net_ops); 2550 if (err) 2551 goto reg_pernet_fail; 2552 2553 err = register_netdevice_notifier(&ip_mr_notifier); 2554 if (err) 2555 goto reg_notif_fail; 2556 #ifdef CONFIG_IP_PIMSM_V2 2557 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { 2558 pr_err("%s: can't add PIM protocol\n", __func__); 2559 err = -EAGAIN; 2560 goto add_proto_fail; 2561 } 2562 #endif 2563 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, 2564 NULL, ipmr_rtm_dumproute, NULL); 2565 return 0; 2566 2567 #ifdef CONFIG_IP_PIMSM_V2 2568 add_proto_fail: 2569 unregister_netdevice_notifier(&ip_mr_notifier); 2570 #endif 2571 reg_notif_fail: 2572 unregister_pernet_subsys(&ipmr_net_ops); 2573 reg_pernet_fail: 2574 kmem_cache_destroy(mrt_cachep); 2575 return err; 2576 } 2577