1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <net/ip.h> 29 #if IS_ENABLED(CONFIG_IPV6) 30 #include <net/ipv6.h> 31 #include <net/mld.h> 32 #include <net/ip6_checksum.h> 33 #include <net/addrconf.h> 34 #endif 35 36 #include "br_private.h" 37 38 static void br_multicast_start_querier(struct net_bridge *br, 39 struct bridge_mcast_own_query *query); 40 static void br_multicast_add_router(struct net_bridge *br, 41 struct net_bridge_port *port); 42 unsigned int br_mdb_rehash_seq; 43 44 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 45 { 46 if (a->proto != b->proto) 47 return 0; 48 if (a->vid != b->vid) 49 return 0; 50 switch (a->proto) { 51 case htons(ETH_P_IP): 52 return a->u.ip4 == b->u.ip4; 53 #if IS_ENABLED(CONFIG_IPV6) 54 case htons(ETH_P_IPV6): 55 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 56 #endif 57 } 58 return 0; 59 } 60 61 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 62 __u16 vid) 63 { 64 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 65 } 66 67 #if IS_ENABLED(CONFIG_IPV6) 68 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 69 const struct in6_addr *ip, 70 __u16 vid) 71 { 72 return jhash_2words(ipv6_addr_hash(ip), vid, 73 mdb->secret) & (mdb->max - 1); 74 } 75 #endif 76 77 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 78 struct br_ip *ip) 79 { 80 switch (ip->proto) { 81 case htons(ETH_P_IP): 82 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 83 #if IS_ENABLED(CONFIG_IPV6) 84 case htons(ETH_P_IPV6): 85 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 86 #endif 87 } 88 return 0; 89 } 90 91 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 92 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 93 { 94 struct net_bridge_mdb_entry *mp; 95 96 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 97 if (br_ip_equal(&mp->addr, dst)) 98 return mp; 99 } 100 101 return NULL; 102 } 103 104 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 105 struct br_ip *dst) 106 { 107 if (!mdb) 108 return NULL; 109 110 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 111 } 112 113 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 114 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 115 { 116 struct br_ip br_dst; 117 118 br_dst.u.ip4 = dst; 119 br_dst.proto = htons(ETH_P_IP); 120 br_dst.vid = vid; 121 122 return br_mdb_ip_get(mdb, &br_dst); 123 } 124 125 #if IS_ENABLED(CONFIG_IPV6) 126 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 127 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 128 __u16 vid) 129 { 130 struct br_ip br_dst; 131 132 br_dst.u.ip6 = *dst; 133 br_dst.proto = htons(ETH_P_IPV6); 134 br_dst.vid = vid; 135 136 return br_mdb_ip_get(mdb, &br_dst); 137 } 138 #endif 139 140 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 141 struct sk_buff *skb, u16 vid) 142 { 143 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 144 struct br_ip ip; 145 146 if (br->multicast_disabled) 147 return NULL; 148 149 if (BR_INPUT_SKB_CB(skb)->igmp) 150 return NULL; 151 152 ip.proto = skb->protocol; 153 ip.vid = vid; 154 155 switch (skb->protocol) { 156 case htons(ETH_P_IP): 157 ip.u.ip4 = ip_hdr(skb)->daddr; 158 break; 159 #if IS_ENABLED(CONFIG_IPV6) 160 case htons(ETH_P_IPV6): 161 ip.u.ip6 = ipv6_hdr(skb)->daddr; 162 break; 163 #endif 164 default: 165 return NULL; 166 } 167 168 return br_mdb_ip_get(mdb, &ip); 169 } 170 171 static void br_mdb_free(struct rcu_head *head) 172 { 173 struct net_bridge_mdb_htable *mdb = 174 container_of(head, struct net_bridge_mdb_htable, rcu); 175 struct net_bridge_mdb_htable *old = mdb->old; 176 177 mdb->old = NULL; 178 kfree(old->mhash); 179 kfree(old); 180 } 181 182 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 183 struct net_bridge_mdb_htable *old, 184 int elasticity) 185 { 186 struct net_bridge_mdb_entry *mp; 187 int maxlen; 188 int len; 189 int i; 190 191 for (i = 0; i < old->max; i++) 192 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 193 hlist_add_head(&mp->hlist[new->ver], 194 &new->mhash[br_ip_hash(new, &mp->addr)]); 195 196 if (!elasticity) 197 return 0; 198 199 maxlen = 0; 200 for (i = 0; i < new->max; i++) { 201 len = 0; 202 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 203 len++; 204 if (len > maxlen) 205 maxlen = len; 206 } 207 208 return maxlen > elasticity ? -EINVAL : 0; 209 } 210 211 void br_multicast_free_pg(struct rcu_head *head) 212 { 213 struct net_bridge_port_group *p = 214 container_of(head, struct net_bridge_port_group, rcu); 215 216 kfree(p); 217 } 218 219 static void br_multicast_free_group(struct rcu_head *head) 220 { 221 struct net_bridge_mdb_entry *mp = 222 container_of(head, struct net_bridge_mdb_entry, rcu); 223 224 kfree(mp); 225 } 226 227 static void br_multicast_group_expired(unsigned long data) 228 { 229 struct net_bridge_mdb_entry *mp = (void *)data; 230 struct net_bridge *br = mp->br; 231 struct net_bridge_mdb_htable *mdb; 232 233 spin_lock(&br->multicast_lock); 234 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 235 goto out; 236 237 mp->mglist = false; 238 239 if (mp->ports) 240 goto out; 241 242 mdb = mlock_dereference(br->mdb, br); 243 244 hlist_del_rcu(&mp->hlist[mdb->ver]); 245 mdb->size--; 246 247 call_rcu_bh(&mp->rcu, br_multicast_free_group); 248 249 out: 250 spin_unlock(&br->multicast_lock); 251 } 252 253 static void br_multicast_del_pg(struct net_bridge *br, 254 struct net_bridge_port_group *pg) 255 { 256 struct net_bridge_mdb_htable *mdb; 257 struct net_bridge_mdb_entry *mp; 258 struct net_bridge_port_group *p; 259 struct net_bridge_port_group __rcu **pp; 260 261 mdb = mlock_dereference(br->mdb, br); 262 263 mp = br_mdb_ip_get(mdb, &pg->addr); 264 if (WARN_ON(!mp)) 265 return; 266 267 for (pp = &mp->ports; 268 (p = mlock_dereference(*pp, br)) != NULL; 269 pp = &p->next) { 270 if (p != pg) 271 continue; 272 273 rcu_assign_pointer(*pp, p->next); 274 hlist_del_init(&p->mglist); 275 del_timer(&p->timer); 276 call_rcu_bh(&p->rcu, br_multicast_free_pg); 277 278 if (!mp->ports && !mp->mglist && 279 netif_running(br->dev)) 280 mod_timer(&mp->timer, jiffies); 281 282 return; 283 } 284 285 WARN_ON(1); 286 } 287 288 static void br_multicast_port_group_expired(unsigned long data) 289 { 290 struct net_bridge_port_group *pg = (void *)data; 291 struct net_bridge *br = pg->port->br; 292 293 spin_lock(&br->multicast_lock); 294 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 295 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT) 296 goto out; 297 298 br_multicast_del_pg(br, pg); 299 300 out: 301 spin_unlock(&br->multicast_lock); 302 } 303 304 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 305 int elasticity) 306 { 307 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 308 struct net_bridge_mdb_htable *mdb; 309 int err; 310 311 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 312 if (!mdb) 313 return -ENOMEM; 314 315 mdb->max = max; 316 mdb->old = old; 317 318 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 319 if (!mdb->mhash) { 320 kfree(mdb); 321 return -ENOMEM; 322 } 323 324 mdb->size = old ? old->size : 0; 325 mdb->ver = old ? old->ver ^ 1 : 0; 326 327 if (!old || elasticity) 328 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 329 else 330 mdb->secret = old->secret; 331 332 if (!old) 333 goto out; 334 335 err = br_mdb_copy(mdb, old, elasticity); 336 if (err) { 337 kfree(mdb->mhash); 338 kfree(mdb); 339 return err; 340 } 341 342 br_mdb_rehash_seq++; 343 call_rcu_bh(&mdb->rcu, br_mdb_free); 344 345 out: 346 rcu_assign_pointer(*mdbp, mdb); 347 348 return 0; 349 } 350 351 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 352 __be32 group) 353 { 354 struct sk_buff *skb; 355 struct igmphdr *ih; 356 struct ethhdr *eth; 357 struct iphdr *iph; 358 359 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 360 sizeof(*ih) + 4); 361 if (!skb) 362 goto out; 363 364 skb->protocol = htons(ETH_P_IP); 365 366 skb_reset_mac_header(skb); 367 eth = eth_hdr(skb); 368 369 ether_addr_copy(eth->h_source, br->dev->dev_addr); 370 eth->h_dest[0] = 1; 371 eth->h_dest[1] = 0; 372 eth->h_dest[2] = 0x5e; 373 eth->h_dest[3] = 0; 374 eth->h_dest[4] = 0; 375 eth->h_dest[5] = 1; 376 eth->h_proto = htons(ETH_P_IP); 377 skb_put(skb, sizeof(*eth)); 378 379 skb_set_network_header(skb, skb->len); 380 iph = ip_hdr(skb); 381 382 iph->version = 4; 383 iph->ihl = 6; 384 iph->tos = 0xc0; 385 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 386 iph->id = 0; 387 iph->frag_off = htons(IP_DF); 388 iph->ttl = 1; 389 iph->protocol = IPPROTO_IGMP; 390 iph->saddr = br->multicast_query_use_ifaddr ? 391 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 392 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 393 ((u8 *)&iph[1])[0] = IPOPT_RA; 394 ((u8 *)&iph[1])[1] = 4; 395 ((u8 *)&iph[1])[2] = 0; 396 ((u8 *)&iph[1])[3] = 0; 397 ip_send_check(iph); 398 skb_put(skb, 24); 399 400 skb_set_transport_header(skb, skb->len); 401 ih = igmp_hdr(skb); 402 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 403 ih->code = (group ? br->multicast_last_member_interval : 404 br->multicast_query_response_interval) / 405 (HZ / IGMP_TIMER_SCALE); 406 ih->group = group; 407 ih->csum = 0; 408 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 409 skb_put(skb, sizeof(*ih)); 410 411 __skb_pull(skb, sizeof(*eth)); 412 413 out: 414 return skb; 415 } 416 417 #if IS_ENABLED(CONFIG_IPV6) 418 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 419 const struct in6_addr *group) 420 { 421 struct sk_buff *skb; 422 struct ipv6hdr *ip6h; 423 struct mld_msg *mldq; 424 struct ethhdr *eth; 425 u8 *hopopt; 426 unsigned long interval; 427 428 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 429 8 + sizeof(*mldq)); 430 if (!skb) 431 goto out; 432 433 skb->protocol = htons(ETH_P_IPV6); 434 435 /* Ethernet header */ 436 skb_reset_mac_header(skb); 437 eth = eth_hdr(skb); 438 439 ether_addr_copy(eth->h_source, br->dev->dev_addr); 440 eth->h_proto = htons(ETH_P_IPV6); 441 skb_put(skb, sizeof(*eth)); 442 443 /* IPv6 header + HbH option */ 444 skb_set_network_header(skb, skb->len); 445 ip6h = ipv6_hdr(skb); 446 447 *(__force __be32 *)ip6h = htonl(0x60000000); 448 ip6h->payload_len = htons(8 + sizeof(*mldq)); 449 ip6h->nexthdr = IPPROTO_HOPOPTS; 450 ip6h->hop_limit = 1; 451 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 452 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 453 &ip6h->saddr)) { 454 kfree_skb(skb); 455 return NULL; 456 } 457 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 458 459 hopopt = (u8 *)(ip6h + 1); 460 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 461 hopopt[1] = 0; /* length of HbH */ 462 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 463 hopopt[3] = 2; /* Length of RA Option */ 464 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 465 hopopt[5] = 0; 466 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 467 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 468 469 skb_put(skb, sizeof(*ip6h) + 8); 470 471 /* ICMPv6 */ 472 skb_set_transport_header(skb, skb->len); 473 mldq = (struct mld_msg *) icmp6_hdr(skb); 474 475 interval = ipv6_addr_any(group) ? 476 br->multicast_query_response_interval : 477 br->multicast_last_member_interval; 478 479 mldq->mld_type = ICMPV6_MGM_QUERY; 480 mldq->mld_code = 0; 481 mldq->mld_cksum = 0; 482 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 483 mldq->mld_reserved = 0; 484 mldq->mld_mca = *group; 485 486 /* checksum */ 487 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 488 sizeof(*mldq), IPPROTO_ICMPV6, 489 csum_partial(mldq, 490 sizeof(*mldq), 0)); 491 skb_put(skb, sizeof(*mldq)); 492 493 __skb_pull(skb, sizeof(*eth)); 494 495 out: 496 return skb; 497 } 498 #endif 499 500 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 501 struct br_ip *addr) 502 { 503 switch (addr->proto) { 504 case htons(ETH_P_IP): 505 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 506 #if IS_ENABLED(CONFIG_IPV6) 507 case htons(ETH_P_IPV6): 508 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 509 #endif 510 } 511 return NULL; 512 } 513 514 static struct net_bridge_mdb_entry *br_multicast_get_group( 515 struct net_bridge *br, struct net_bridge_port *port, 516 struct br_ip *group, int hash) 517 { 518 struct net_bridge_mdb_htable *mdb; 519 struct net_bridge_mdb_entry *mp; 520 unsigned int count = 0; 521 unsigned int max; 522 int elasticity; 523 int err; 524 525 mdb = rcu_dereference_protected(br->mdb, 1); 526 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 527 count++; 528 if (unlikely(br_ip_equal(group, &mp->addr))) 529 return mp; 530 } 531 532 elasticity = 0; 533 max = mdb->max; 534 535 if (unlikely(count > br->hash_elasticity && count)) { 536 if (net_ratelimit()) 537 br_info(br, "Multicast hash table " 538 "chain limit reached: %s\n", 539 port ? port->dev->name : br->dev->name); 540 541 elasticity = br->hash_elasticity; 542 } 543 544 if (mdb->size >= max) { 545 max *= 2; 546 if (unlikely(max > br->hash_max)) { 547 br_warn(br, "Multicast hash table maximum of %d " 548 "reached, disabling snooping: %s\n", 549 br->hash_max, 550 port ? port->dev->name : br->dev->name); 551 err = -E2BIG; 552 disable: 553 br->multicast_disabled = 1; 554 goto err; 555 } 556 } 557 558 if (max > mdb->max || elasticity) { 559 if (mdb->old) { 560 if (net_ratelimit()) 561 br_info(br, "Multicast hash table " 562 "on fire: %s\n", 563 port ? port->dev->name : br->dev->name); 564 err = -EEXIST; 565 goto err; 566 } 567 568 err = br_mdb_rehash(&br->mdb, max, elasticity); 569 if (err) { 570 br_warn(br, "Cannot rehash multicast " 571 "hash table, disabling snooping: %s, %d, %d\n", 572 port ? port->dev->name : br->dev->name, 573 mdb->size, err); 574 goto disable; 575 } 576 577 err = -EAGAIN; 578 goto err; 579 } 580 581 return NULL; 582 583 err: 584 mp = ERR_PTR(err); 585 return mp; 586 } 587 588 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 589 struct net_bridge_port *port, struct br_ip *group) 590 { 591 struct net_bridge_mdb_htable *mdb; 592 struct net_bridge_mdb_entry *mp; 593 int hash; 594 int err; 595 596 mdb = rcu_dereference_protected(br->mdb, 1); 597 if (!mdb) { 598 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 599 if (err) 600 return ERR_PTR(err); 601 goto rehash; 602 } 603 604 hash = br_ip_hash(mdb, group); 605 mp = br_multicast_get_group(br, port, group, hash); 606 switch (PTR_ERR(mp)) { 607 case 0: 608 break; 609 610 case -EAGAIN: 611 rehash: 612 mdb = rcu_dereference_protected(br->mdb, 1); 613 hash = br_ip_hash(mdb, group); 614 break; 615 616 default: 617 goto out; 618 } 619 620 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 621 if (unlikely(!mp)) 622 return ERR_PTR(-ENOMEM); 623 624 mp->br = br; 625 mp->addr = *group; 626 setup_timer(&mp->timer, br_multicast_group_expired, 627 (unsigned long)mp); 628 629 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 630 mdb->size++; 631 632 out: 633 return mp; 634 } 635 636 struct net_bridge_port_group *br_multicast_new_port_group( 637 struct net_bridge_port *port, 638 struct br_ip *group, 639 struct net_bridge_port_group __rcu *next, 640 unsigned char state) 641 { 642 struct net_bridge_port_group *p; 643 644 p = kzalloc(sizeof(*p), GFP_ATOMIC); 645 if (unlikely(!p)) 646 return NULL; 647 648 p->addr = *group; 649 p->port = port; 650 p->state = state; 651 rcu_assign_pointer(p->next, next); 652 hlist_add_head(&p->mglist, &port->mglist); 653 setup_timer(&p->timer, br_multicast_port_group_expired, 654 (unsigned long)p); 655 return p; 656 } 657 658 static int br_multicast_add_group(struct net_bridge *br, 659 struct net_bridge_port *port, 660 struct br_ip *group) 661 { 662 struct net_bridge_mdb_entry *mp; 663 struct net_bridge_port_group *p; 664 struct net_bridge_port_group __rcu **pp; 665 unsigned long now = jiffies; 666 int err; 667 668 spin_lock(&br->multicast_lock); 669 if (!netif_running(br->dev) || 670 (port && port->state == BR_STATE_DISABLED)) 671 goto out; 672 673 mp = br_multicast_new_group(br, port, group); 674 err = PTR_ERR(mp); 675 if (IS_ERR(mp)) 676 goto err; 677 678 if (!port) { 679 mp->mglist = true; 680 mod_timer(&mp->timer, now + br->multicast_membership_interval); 681 goto out; 682 } 683 684 for (pp = &mp->ports; 685 (p = mlock_dereference(*pp, br)) != NULL; 686 pp = &p->next) { 687 if (p->port == port) 688 goto found; 689 if ((unsigned long)p->port < (unsigned long)port) 690 break; 691 } 692 693 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); 694 if (unlikely(!p)) 695 goto err; 696 rcu_assign_pointer(*pp, p); 697 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 698 699 found: 700 mod_timer(&p->timer, now + br->multicast_membership_interval); 701 out: 702 err = 0; 703 704 err: 705 spin_unlock(&br->multicast_lock); 706 return err; 707 } 708 709 static int br_ip4_multicast_add_group(struct net_bridge *br, 710 struct net_bridge_port *port, 711 __be32 group, 712 __u16 vid) 713 { 714 struct br_ip br_group; 715 716 if (ipv4_is_local_multicast(group)) 717 return 0; 718 719 br_group.u.ip4 = group; 720 br_group.proto = htons(ETH_P_IP); 721 br_group.vid = vid; 722 723 return br_multicast_add_group(br, port, &br_group); 724 } 725 726 #if IS_ENABLED(CONFIG_IPV6) 727 static int br_ip6_multicast_add_group(struct net_bridge *br, 728 struct net_bridge_port *port, 729 const struct in6_addr *group, 730 __u16 vid) 731 { 732 struct br_ip br_group; 733 734 if (ipv6_addr_is_ll_all_nodes(group)) 735 return 0; 736 737 br_group.u.ip6 = *group; 738 br_group.proto = htons(ETH_P_IPV6); 739 br_group.vid = vid; 740 741 return br_multicast_add_group(br, port, &br_group); 742 } 743 #endif 744 745 static void br_multicast_router_expired(unsigned long data) 746 { 747 struct net_bridge_port *port = (void *)data; 748 struct net_bridge *br = port->br; 749 750 spin_lock(&br->multicast_lock); 751 if (port->multicast_router != 1 || 752 timer_pending(&port->multicast_router_timer) || 753 hlist_unhashed(&port->rlist)) 754 goto out; 755 756 hlist_del_init_rcu(&port->rlist); 757 758 out: 759 spin_unlock(&br->multicast_lock); 760 } 761 762 static void br_multicast_local_router_expired(unsigned long data) 763 { 764 } 765 766 static void br_multicast_querier_expired(struct net_bridge *br, 767 struct bridge_mcast_own_query *query) 768 { 769 spin_lock(&br->multicast_lock); 770 if (!netif_running(br->dev) || br->multicast_disabled) 771 goto out; 772 773 br_multicast_start_querier(br, query); 774 775 out: 776 spin_unlock(&br->multicast_lock); 777 } 778 779 static void br_ip4_multicast_querier_expired(unsigned long data) 780 { 781 struct net_bridge *br = (void *)data; 782 783 br_multicast_querier_expired(br, &br->ip4_own_query); 784 } 785 786 #if IS_ENABLED(CONFIG_IPV6) 787 static void br_ip6_multicast_querier_expired(unsigned long data) 788 { 789 struct net_bridge *br = (void *)data; 790 791 br_multicast_querier_expired(br, &br->ip6_own_query); 792 } 793 #endif 794 795 static void br_multicast_select_own_querier(struct net_bridge *br, 796 struct br_ip *ip, 797 struct sk_buff *skb) 798 { 799 if (ip->proto == htons(ETH_P_IP)) 800 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 801 #if IS_ENABLED(CONFIG_IPV6) 802 else 803 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 804 #endif 805 } 806 807 static void __br_multicast_send_query(struct net_bridge *br, 808 struct net_bridge_port *port, 809 struct br_ip *ip) 810 { 811 struct sk_buff *skb; 812 813 skb = br_multicast_alloc_query(br, ip); 814 if (!skb) 815 return; 816 817 if (port) { 818 skb->dev = port->dev; 819 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb, 820 NULL, skb->dev, 821 br_dev_queue_push_xmit); 822 } else { 823 br_multicast_select_own_querier(br, ip, skb); 824 netif_rx(skb); 825 } 826 } 827 828 static void br_multicast_send_query(struct net_bridge *br, 829 struct net_bridge_port *port, 830 struct bridge_mcast_own_query *own_query) 831 { 832 unsigned long time; 833 struct br_ip br_group; 834 struct bridge_mcast_other_query *other_query = NULL; 835 836 if (!netif_running(br->dev) || br->multicast_disabled || 837 !br->multicast_querier) 838 return; 839 840 memset(&br_group.u, 0, sizeof(br_group.u)); 841 842 if (port ? (own_query == &port->ip4_own_query) : 843 (own_query == &br->ip4_own_query)) { 844 other_query = &br->ip4_other_query; 845 br_group.proto = htons(ETH_P_IP); 846 #if IS_ENABLED(CONFIG_IPV6) 847 } else { 848 other_query = &br->ip6_other_query; 849 br_group.proto = htons(ETH_P_IPV6); 850 #endif 851 } 852 853 if (!other_query || timer_pending(&other_query->timer)) 854 return; 855 856 __br_multicast_send_query(br, port, &br_group); 857 858 time = jiffies; 859 time += own_query->startup_sent < br->multicast_startup_query_count ? 860 br->multicast_startup_query_interval : 861 br->multicast_query_interval; 862 mod_timer(&own_query->timer, time); 863 } 864 865 static void 866 br_multicast_port_query_expired(struct net_bridge_port *port, 867 struct bridge_mcast_own_query *query) 868 { 869 struct net_bridge *br = port->br; 870 871 spin_lock(&br->multicast_lock); 872 if (port->state == BR_STATE_DISABLED || 873 port->state == BR_STATE_BLOCKING) 874 goto out; 875 876 if (query->startup_sent < br->multicast_startup_query_count) 877 query->startup_sent++; 878 879 br_multicast_send_query(port->br, port, query); 880 881 out: 882 spin_unlock(&br->multicast_lock); 883 } 884 885 static void br_ip4_multicast_port_query_expired(unsigned long data) 886 { 887 struct net_bridge_port *port = (void *)data; 888 889 br_multicast_port_query_expired(port, &port->ip4_own_query); 890 } 891 892 #if IS_ENABLED(CONFIG_IPV6) 893 static void br_ip6_multicast_port_query_expired(unsigned long data) 894 { 895 struct net_bridge_port *port = (void *)data; 896 897 br_multicast_port_query_expired(port, &port->ip6_own_query); 898 } 899 #endif 900 901 void br_multicast_add_port(struct net_bridge_port *port) 902 { 903 port->multicast_router = 1; 904 905 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 906 (unsigned long)port); 907 setup_timer(&port->ip4_own_query.timer, 908 br_ip4_multicast_port_query_expired, (unsigned long)port); 909 #if IS_ENABLED(CONFIG_IPV6) 910 setup_timer(&port->ip6_own_query.timer, 911 br_ip6_multicast_port_query_expired, (unsigned long)port); 912 #endif 913 } 914 915 void br_multicast_del_port(struct net_bridge_port *port) 916 { 917 del_timer_sync(&port->multicast_router_timer); 918 } 919 920 static void br_multicast_enable(struct bridge_mcast_own_query *query) 921 { 922 query->startup_sent = 0; 923 924 if (try_to_del_timer_sync(&query->timer) >= 0 || 925 del_timer(&query->timer)) 926 mod_timer(&query->timer, jiffies); 927 } 928 929 void br_multicast_enable_port(struct net_bridge_port *port) 930 { 931 struct net_bridge *br = port->br; 932 933 spin_lock(&br->multicast_lock); 934 if (br->multicast_disabled || !netif_running(br->dev)) 935 goto out; 936 937 br_multicast_enable(&port->ip4_own_query); 938 #if IS_ENABLED(CONFIG_IPV6) 939 br_multicast_enable(&port->ip6_own_query); 940 #endif 941 if (port->multicast_router == 2 && hlist_unhashed(&port->rlist)) 942 br_multicast_add_router(br, port); 943 944 out: 945 spin_unlock(&br->multicast_lock); 946 } 947 948 void br_multicast_disable_port(struct net_bridge_port *port) 949 { 950 struct net_bridge *br = port->br; 951 struct net_bridge_port_group *pg; 952 struct hlist_node *n; 953 954 spin_lock(&br->multicast_lock); 955 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 956 br_multicast_del_pg(br, pg); 957 958 if (!hlist_unhashed(&port->rlist)) 959 hlist_del_init_rcu(&port->rlist); 960 del_timer(&port->multicast_router_timer); 961 del_timer(&port->ip4_own_query.timer); 962 #if IS_ENABLED(CONFIG_IPV6) 963 del_timer(&port->ip6_own_query.timer); 964 #endif 965 spin_unlock(&br->multicast_lock); 966 } 967 968 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 969 struct net_bridge_port *port, 970 struct sk_buff *skb, 971 u16 vid) 972 { 973 struct igmpv3_report *ih; 974 struct igmpv3_grec *grec; 975 int i; 976 int len; 977 int num; 978 int type; 979 int err = 0; 980 __be32 group; 981 982 ih = igmpv3_report_hdr(skb); 983 num = ntohs(ih->ngrec); 984 len = sizeof(*ih); 985 986 for (i = 0; i < num; i++) { 987 len += sizeof(*grec); 988 if (!pskb_may_pull(skb, len)) 989 return -EINVAL; 990 991 grec = (void *)(skb->data + len - sizeof(*grec)); 992 group = grec->grec_mca; 993 type = grec->grec_type; 994 995 len += ntohs(grec->grec_nsrcs) * 4; 996 if (!pskb_may_pull(skb, len)) 997 return -EINVAL; 998 999 /* We treat this as an IGMPv2 report for now. */ 1000 switch (type) { 1001 case IGMPV3_MODE_IS_INCLUDE: 1002 case IGMPV3_MODE_IS_EXCLUDE: 1003 case IGMPV3_CHANGE_TO_INCLUDE: 1004 case IGMPV3_CHANGE_TO_EXCLUDE: 1005 case IGMPV3_ALLOW_NEW_SOURCES: 1006 case IGMPV3_BLOCK_OLD_SOURCES: 1007 break; 1008 1009 default: 1010 continue; 1011 } 1012 1013 err = br_ip4_multicast_add_group(br, port, group, vid); 1014 if (err) 1015 break; 1016 } 1017 1018 return err; 1019 } 1020 1021 #if IS_ENABLED(CONFIG_IPV6) 1022 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1023 struct net_bridge_port *port, 1024 struct sk_buff *skb, 1025 u16 vid) 1026 { 1027 struct icmp6hdr *icmp6h; 1028 struct mld2_grec *grec; 1029 int i; 1030 int len; 1031 int num; 1032 int err = 0; 1033 1034 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1035 return -EINVAL; 1036 1037 icmp6h = icmp6_hdr(skb); 1038 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1039 len = sizeof(*icmp6h); 1040 1041 for (i = 0; i < num; i++) { 1042 __be16 *nsrcs, _nsrcs; 1043 1044 nsrcs = skb_header_pointer(skb, 1045 len + offsetof(struct mld2_grec, 1046 grec_nsrcs), 1047 sizeof(_nsrcs), &_nsrcs); 1048 if (!nsrcs) 1049 return -EINVAL; 1050 1051 if (!pskb_may_pull(skb, 1052 len + sizeof(*grec) + 1053 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1054 return -EINVAL; 1055 1056 grec = (struct mld2_grec *)(skb->data + len); 1057 len += sizeof(*grec) + 1058 sizeof(struct in6_addr) * ntohs(*nsrcs); 1059 1060 /* We treat these as MLDv1 reports for now. */ 1061 switch (grec->grec_type) { 1062 case MLD2_MODE_IS_INCLUDE: 1063 case MLD2_MODE_IS_EXCLUDE: 1064 case MLD2_CHANGE_TO_INCLUDE: 1065 case MLD2_CHANGE_TO_EXCLUDE: 1066 case MLD2_ALLOW_NEW_SOURCES: 1067 case MLD2_BLOCK_OLD_SOURCES: 1068 break; 1069 1070 default: 1071 continue; 1072 } 1073 1074 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1075 vid); 1076 if (err) 1077 break; 1078 } 1079 1080 return err; 1081 } 1082 #endif 1083 1084 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1085 struct net_bridge_port *port, 1086 __be32 saddr) 1087 { 1088 if (!timer_pending(&br->ip4_own_query.timer) && 1089 !timer_pending(&br->ip4_other_query.timer)) 1090 goto update; 1091 1092 if (!br->ip4_querier.addr.u.ip4) 1093 goto update; 1094 1095 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1096 goto update; 1097 1098 return false; 1099 1100 update: 1101 br->ip4_querier.addr.u.ip4 = saddr; 1102 1103 /* update protected by general multicast_lock by caller */ 1104 rcu_assign_pointer(br->ip4_querier.port, port); 1105 1106 return true; 1107 } 1108 1109 #if IS_ENABLED(CONFIG_IPV6) 1110 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1111 struct net_bridge_port *port, 1112 struct in6_addr *saddr) 1113 { 1114 if (!timer_pending(&br->ip6_own_query.timer) && 1115 !timer_pending(&br->ip6_other_query.timer)) 1116 goto update; 1117 1118 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1119 goto update; 1120 1121 return false; 1122 1123 update: 1124 br->ip6_querier.addr.u.ip6 = *saddr; 1125 1126 /* update protected by general multicast_lock by caller */ 1127 rcu_assign_pointer(br->ip6_querier.port, port); 1128 1129 return true; 1130 } 1131 #endif 1132 1133 static bool br_multicast_select_querier(struct net_bridge *br, 1134 struct net_bridge_port *port, 1135 struct br_ip *saddr) 1136 { 1137 switch (saddr->proto) { 1138 case htons(ETH_P_IP): 1139 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1140 #if IS_ENABLED(CONFIG_IPV6) 1141 case htons(ETH_P_IPV6): 1142 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1143 #endif 1144 } 1145 1146 return false; 1147 } 1148 1149 static void 1150 br_multicast_update_query_timer(struct net_bridge *br, 1151 struct bridge_mcast_other_query *query, 1152 unsigned long max_delay) 1153 { 1154 if (!timer_pending(&query->timer)) 1155 query->delay_time = jiffies + max_delay; 1156 1157 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1158 } 1159 1160 /* 1161 * Add port to router_list 1162 * list is maintained ordered by pointer value 1163 * and locked by br->multicast_lock and RCU 1164 */ 1165 static void br_multicast_add_router(struct net_bridge *br, 1166 struct net_bridge_port *port) 1167 { 1168 struct net_bridge_port *p; 1169 struct hlist_node *slot = NULL; 1170 1171 if (!hlist_unhashed(&port->rlist)) 1172 return; 1173 1174 hlist_for_each_entry(p, &br->router_list, rlist) { 1175 if ((unsigned long) port >= (unsigned long) p) 1176 break; 1177 slot = &p->rlist; 1178 } 1179 1180 if (slot) 1181 hlist_add_behind_rcu(&port->rlist, slot); 1182 else 1183 hlist_add_head_rcu(&port->rlist, &br->router_list); 1184 } 1185 1186 static void br_multicast_mark_router(struct net_bridge *br, 1187 struct net_bridge_port *port) 1188 { 1189 unsigned long now = jiffies; 1190 1191 if (!port) { 1192 if (br->multicast_router == 1) 1193 mod_timer(&br->multicast_router_timer, 1194 now + br->multicast_querier_interval); 1195 return; 1196 } 1197 1198 if (port->multicast_router != 1) 1199 return; 1200 1201 br_multicast_add_router(br, port); 1202 1203 mod_timer(&port->multicast_router_timer, 1204 now + br->multicast_querier_interval); 1205 } 1206 1207 static void br_multicast_query_received(struct net_bridge *br, 1208 struct net_bridge_port *port, 1209 struct bridge_mcast_other_query *query, 1210 struct br_ip *saddr, 1211 unsigned long max_delay) 1212 { 1213 if (!br_multicast_select_querier(br, port, saddr)) 1214 return; 1215 1216 br_multicast_update_query_timer(br, query, max_delay); 1217 br_multicast_mark_router(br, port); 1218 } 1219 1220 static int br_ip4_multicast_query(struct net_bridge *br, 1221 struct net_bridge_port *port, 1222 struct sk_buff *skb, 1223 u16 vid) 1224 { 1225 const struct iphdr *iph = ip_hdr(skb); 1226 struct igmphdr *ih = igmp_hdr(skb); 1227 struct net_bridge_mdb_entry *mp; 1228 struct igmpv3_query *ih3; 1229 struct net_bridge_port_group *p; 1230 struct net_bridge_port_group __rcu **pp; 1231 struct br_ip saddr; 1232 unsigned long max_delay; 1233 unsigned long now = jiffies; 1234 __be32 group; 1235 int err = 0; 1236 1237 spin_lock(&br->multicast_lock); 1238 if (!netif_running(br->dev) || 1239 (port && port->state == BR_STATE_DISABLED)) 1240 goto out; 1241 1242 group = ih->group; 1243 1244 if (skb->len == sizeof(*ih)) { 1245 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1246 1247 if (!max_delay) { 1248 max_delay = 10 * HZ; 1249 group = 0; 1250 } 1251 } else if (skb->len >= sizeof(*ih3)) { 1252 ih3 = igmpv3_query_hdr(skb); 1253 if (ih3->nsrcs) 1254 goto out; 1255 1256 max_delay = ih3->code ? 1257 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1258 } else { 1259 goto out; 1260 } 1261 1262 if (!group) { 1263 saddr.proto = htons(ETH_P_IP); 1264 saddr.u.ip4 = iph->saddr; 1265 1266 br_multicast_query_received(br, port, &br->ip4_other_query, 1267 &saddr, max_delay); 1268 goto out; 1269 } 1270 1271 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1272 if (!mp) 1273 goto out; 1274 1275 max_delay *= br->multicast_last_member_count; 1276 1277 if (mp->mglist && 1278 (timer_pending(&mp->timer) ? 1279 time_after(mp->timer.expires, now + max_delay) : 1280 try_to_del_timer_sync(&mp->timer) >= 0)) 1281 mod_timer(&mp->timer, now + max_delay); 1282 1283 for (pp = &mp->ports; 1284 (p = mlock_dereference(*pp, br)) != NULL; 1285 pp = &p->next) { 1286 if (timer_pending(&p->timer) ? 1287 time_after(p->timer.expires, now + max_delay) : 1288 try_to_del_timer_sync(&p->timer) >= 0) 1289 mod_timer(&p->timer, now + max_delay); 1290 } 1291 1292 out: 1293 spin_unlock(&br->multicast_lock); 1294 return err; 1295 } 1296 1297 #if IS_ENABLED(CONFIG_IPV6) 1298 static int br_ip6_multicast_query(struct net_bridge *br, 1299 struct net_bridge_port *port, 1300 struct sk_buff *skb, 1301 u16 vid) 1302 { 1303 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1304 struct mld_msg *mld; 1305 struct net_bridge_mdb_entry *mp; 1306 struct mld2_query *mld2q; 1307 struct net_bridge_port_group *p; 1308 struct net_bridge_port_group __rcu **pp; 1309 struct br_ip saddr; 1310 unsigned long max_delay; 1311 unsigned long now = jiffies; 1312 const struct in6_addr *group = NULL; 1313 bool is_general_query; 1314 int err = 0; 1315 1316 spin_lock(&br->multicast_lock); 1317 if (!netif_running(br->dev) || 1318 (port && port->state == BR_STATE_DISABLED)) 1319 goto out; 1320 1321 if (skb->len == sizeof(*mld)) { 1322 if (!pskb_may_pull(skb, sizeof(*mld))) { 1323 err = -EINVAL; 1324 goto out; 1325 } 1326 mld = (struct mld_msg *) icmp6_hdr(skb); 1327 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1328 if (max_delay) 1329 group = &mld->mld_mca; 1330 } else { 1331 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1332 err = -EINVAL; 1333 goto out; 1334 } 1335 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1336 if (!mld2q->mld2q_nsrcs) 1337 group = &mld2q->mld2q_mca; 1338 1339 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1340 } 1341 1342 is_general_query = group && ipv6_addr_any(group); 1343 1344 if (is_general_query) { 1345 saddr.proto = htons(ETH_P_IPV6); 1346 saddr.u.ip6 = ip6h->saddr; 1347 1348 br_multicast_query_received(br, port, &br->ip6_other_query, 1349 &saddr, max_delay); 1350 goto out; 1351 } else if (!group) { 1352 goto out; 1353 } 1354 1355 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1356 if (!mp) 1357 goto out; 1358 1359 max_delay *= br->multicast_last_member_count; 1360 if (mp->mglist && 1361 (timer_pending(&mp->timer) ? 1362 time_after(mp->timer.expires, now + max_delay) : 1363 try_to_del_timer_sync(&mp->timer) >= 0)) 1364 mod_timer(&mp->timer, now + max_delay); 1365 1366 for (pp = &mp->ports; 1367 (p = mlock_dereference(*pp, br)) != NULL; 1368 pp = &p->next) { 1369 if (timer_pending(&p->timer) ? 1370 time_after(p->timer.expires, now + max_delay) : 1371 try_to_del_timer_sync(&p->timer) >= 0) 1372 mod_timer(&p->timer, now + max_delay); 1373 } 1374 1375 out: 1376 spin_unlock(&br->multicast_lock); 1377 return err; 1378 } 1379 #endif 1380 1381 static void 1382 br_multicast_leave_group(struct net_bridge *br, 1383 struct net_bridge_port *port, 1384 struct br_ip *group, 1385 struct bridge_mcast_other_query *other_query, 1386 struct bridge_mcast_own_query *own_query) 1387 { 1388 struct net_bridge_mdb_htable *mdb; 1389 struct net_bridge_mdb_entry *mp; 1390 struct net_bridge_port_group *p; 1391 unsigned long now; 1392 unsigned long time; 1393 1394 spin_lock(&br->multicast_lock); 1395 if (!netif_running(br->dev) || 1396 (port && port->state == BR_STATE_DISABLED) || 1397 timer_pending(&other_query->timer)) 1398 goto out; 1399 1400 mdb = mlock_dereference(br->mdb, br); 1401 mp = br_mdb_ip_get(mdb, group); 1402 if (!mp) 1403 goto out; 1404 1405 if (br->multicast_querier) { 1406 __br_multicast_send_query(br, port, &mp->addr); 1407 1408 time = jiffies + br->multicast_last_member_count * 1409 br->multicast_last_member_interval; 1410 1411 mod_timer(&own_query->timer, time); 1412 1413 for (p = mlock_dereference(mp->ports, br); 1414 p != NULL; 1415 p = mlock_dereference(p->next, br)) { 1416 if (p->port != port) 1417 continue; 1418 1419 if (!hlist_unhashed(&p->mglist) && 1420 (timer_pending(&p->timer) ? 1421 time_after(p->timer.expires, time) : 1422 try_to_del_timer_sync(&p->timer) >= 0)) { 1423 mod_timer(&p->timer, time); 1424 } 1425 1426 break; 1427 } 1428 } 1429 1430 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1431 struct net_bridge_port_group __rcu **pp; 1432 1433 for (pp = &mp->ports; 1434 (p = mlock_dereference(*pp, br)) != NULL; 1435 pp = &p->next) { 1436 if (p->port != port) 1437 continue; 1438 1439 rcu_assign_pointer(*pp, p->next); 1440 hlist_del_init(&p->mglist); 1441 del_timer(&p->timer); 1442 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1443 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1444 1445 if (!mp->ports && !mp->mglist && 1446 netif_running(br->dev)) 1447 mod_timer(&mp->timer, jiffies); 1448 } 1449 goto out; 1450 } 1451 1452 now = jiffies; 1453 time = now + br->multicast_last_member_count * 1454 br->multicast_last_member_interval; 1455 1456 if (!port) { 1457 if (mp->mglist && 1458 (timer_pending(&mp->timer) ? 1459 time_after(mp->timer.expires, time) : 1460 try_to_del_timer_sync(&mp->timer) >= 0)) { 1461 mod_timer(&mp->timer, time); 1462 } 1463 1464 goto out; 1465 } 1466 1467 for (p = mlock_dereference(mp->ports, br); 1468 p != NULL; 1469 p = mlock_dereference(p->next, br)) { 1470 if (p->port != port) 1471 continue; 1472 1473 if (!hlist_unhashed(&p->mglist) && 1474 (timer_pending(&p->timer) ? 1475 time_after(p->timer.expires, time) : 1476 try_to_del_timer_sync(&p->timer) >= 0)) { 1477 mod_timer(&p->timer, time); 1478 } 1479 1480 break; 1481 } 1482 out: 1483 spin_unlock(&br->multicast_lock); 1484 } 1485 1486 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1487 struct net_bridge_port *port, 1488 __be32 group, 1489 __u16 vid) 1490 { 1491 struct br_ip br_group; 1492 struct bridge_mcast_own_query *own_query; 1493 1494 if (ipv4_is_local_multicast(group)) 1495 return; 1496 1497 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1498 1499 br_group.u.ip4 = group; 1500 br_group.proto = htons(ETH_P_IP); 1501 br_group.vid = vid; 1502 1503 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1504 own_query); 1505 } 1506 1507 #if IS_ENABLED(CONFIG_IPV6) 1508 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1509 struct net_bridge_port *port, 1510 const struct in6_addr *group, 1511 __u16 vid) 1512 { 1513 struct br_ip br_group; 1514 struct bridge_mcast_own_query *own_query; 1515 1516 if (ipv6_addr_is_ll_all_nodes(group)) 1517 return; 1518 1519 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1520 1521 br_group.u.ip6 = *group; 1522 br_group.proto = htons(ETH_P_IPV6); 1523 br_group.vid = vid; 1524 1525 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1526 own_query); 1527 } 1528 #endif 1529 1530 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1531 struct net_bridge_port *port, 1532 struct sk_buff *skb, 1533 u16 vid) 1534 { 1535 struct sk_buff *skb_trimmed = NULL; 1536 struct igmphdr *ih; 1537 int err; 1538 1539 err = ip_mc_check_igmp(skb, &skb_trimmed); 1540 1541 if (err == -ENOMSG) { 1542 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) 1543 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1544 return 0; 1545 } else if (err < 0) { 1546 return err; 1547 } 1548 1549 BR_INPUT_SKB_CB(skb)->igmp = 1; 1550 ih = igmp_hdr(skb); 1551 1552 switch (ih->type) { 1553 case IGMP_HOST_MEMBERSHIP_REPORT: 1554 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1555 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1556 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1557 break; 1558 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1559 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1560 break; 1561 case IGMP_HOST_MEMBERSHIP_QUERY: 1562 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1563 break; 1564 case IGMP_HOST_LEAVE_MESSAGE: 1565 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1566 break; 1567 } 1568 1569 if (skb_trimmed) 1570 kfree_skb(skb_trimmed); 1571 1572 return err; 1573 } 1574 1575 #if IS_ENABLED(CONFIG_IPV6) 1576 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1577 struct net_bridge_port *port, 1578 struct sk_buff *skb, 1579 u16 vid) 1580 { 1581 struct sk_buff *skb_trimmed = NULL; 1582 struct mld_msg *mld; 1583 int err; 1584 1585 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1586 1587 if (err == -ENOMSG) { 1588 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1589 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1590 return 0; 1591 } else if (err < 0) { 1592 return err; 1593 } 1594 1595 BR_INPUT_SKB_CB(skb)->igmp = 1; 1596 mld = (struct mld_msg *)skb_transport_header(skb); 1597 1598 switch (mld->mld_type) { 1599 case ICMPV6_MGM_REPORT: 1600 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1601 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1602 break; 1603 case ICMPV6_MLD2_REPORT: 1604 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1605 break; 1606 case ICMPV6_MGM_QUERY: 1607 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1608 break; 1609 case ICMPV6_MGM_REDUCTION: 1610 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1611 break; 1612 } 1613 1614 if (skb_trimmed) 1615 kfree_skb(skb_trimmed); 1616 1617 return err; 1618 } 1619 #endif 1620 1621 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1622 struct sk_buff *skb, u16 vid) 1623 { 1624 BR_INPUT_SKB_CB(skb)->igmp = 0; 1625 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1626 1627 if (br->multicast_disabled) 1628 return 0; 1629 1630 switch (skb->protocol) { 1631 case htons(ETH_P_IP): 1632 return br_multicast_ipv4_rcv(br, port, skb, vid); 1633 #if IS_ENABLED(CONFIG_IPV6) 1634 case htons(ETH_P_IPV6): 1635 return br_multicast_ipv6_rcv(br, port, skb, vid); 1636 #endif 1637 } 1638 1639 return 0; 1640 } 1641 1642 static void br_multicast_query_expired(struct net_bridge *br, 1643 struct bridge_mcast_own_query *query, 1644 struct bridge_mcast_querier *querier) 1645 { 1646 spin_lock(&br->multicast_lock); 1647 if (query->startup_sent < br->multicast_startup_query_count) 1648 query->startup_sent++; 1649 1650 RCU_INIT_POINTER(querier->port, NULL); 1651 br_multicast_send_query(br, NULL, query); 1652 spin_unlock(&br->multicast_lock); 1653 } 1654 1655 static void br_ip4_multicast_query_expired(unsigned long data) 1656 { 1657 struct net_bridge *br = (void *)data; 1658 1659 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1660 } 1661 1662 #if IS_ENABLED(CONFIG_IPV6) 1663 static void br_ip6_multicast_query_expired(unsigned long data) 1664 { 1665 struct net_bridge *br = (void *)data; 1666 1667 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1668 } 1669 #endif 1670 1671 void br_multicast_init(struct net_bridge *br) 1672 { 1673 br->hash_elasticity = 4; 1674 br->hash_max = 512; 1675 1676 br->multicast_router = 1; 1677 br->multicast_querier = 0; 1678 br->multicast_query_use_ifaddr = 0; 1679 br->multicast_last_member_count = 2; 1680 br->multicast_startup_query_count = 2; 1681 1682 br->multicast_last_member_interval = HZ; 1683 br->multicast_query_response_interval = 10 * HZ; 1684 br->multicast_startup_query_interval = 125 * HZ / 4; 1685 br->multicast_query_interval = 125 * HZ; 1686 br->multicast_querier_interval = 255 * HZ; 1687 br->multicast_membership_interval = 260 * HZ; 1688 1689 br->ip4_other_query.delay_time = 0; 1690 br->ip4_querier.port = NULL; 1691 #if IS_ENABLED(CONFIG_IPV6) 1692 br->ip6_other_query.delay_time = 0; 1693 br->ip6_querier.port = NULL; 1694 #endif 1695 1696 spin_lock_init(&br->multicast_lock); 1697 setup_timer(&br->multicast_router_timer, 1698 br_multicast_local_router_expired, 0); 1699 setup_timer(&br->ip4_other_query.timer, 1700 br_ip4_multicast_querier_expired, (unsigned long)br); 1701 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1702 (unsigned long)br); 1703 #if IS_ENABLED(CONFIG_IPV6) 1704 setup_timer(&br->ip6_other_query.timer, 1705 br_ip6_multicast_querier_expired, (unsigned long)br); 1706 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1707 (unsigned long)br); 1708 #endif 1709 } 1710 1711 static void __br_multicast_open(struct net_bridge *br, 1712 struct bridge_mcast_own_query *query) 1713 { 1714 query->startup_sent = 0; 1715 1716 if (br->multicast_disabled) 1717 return; 1718 1719 mod_timer(&query->timer, jiffies); 1720 } 1721 1722 void br_multicast_open(struct net_bridge *br) 1723 { 1724 __br_multicast_open(br, &br->ip4_own_query); 1725 #if IS_ENABLED(CONFIG_IPV6) 1726 __br_multicast_open(br, &br->ip6_own_query); 1727 #endif 1728 } 1729 1730 void br_multicast_stop(struct net_bridge *br) 1731 { 1732 struct net_bridge_mdb_htable *mdb; 1733 struct net_bridge_mdb_entry *mp; 1734 struct hlist_node *n; 1735 u32 ver; 1736 int i; 1737 1738 del_timer_sync(&br->multicast_router_timer); 1739 del_timer_sync(&br->ip4_other_query.timer); 1740 del_timer_sync(&br->ip4_own_query.timer); 1741 #if IS_ENABLED(CONFIG_IPV6) 1742 del_timer_sync(&br->ip6_other_query.timer); 1743 del_timer_sync(&br->ip6_own_query.timer); 1744 #endif 1745 1746 spin_lock_bh(&br->multicast_lock); 1747 mdb = mlock_dereference(br->mdb, br); 1748 if (!mdb) 1749 goto out; 1750 1751 br->mdb = NULL; 1752 1753 ver = mdb->ver; 1754 for (i = 0; i < mdb->max; i++) { 1755 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1756 hlist[ver]) { 1757 del_timer(&mp->timer); 1758 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1759 } 1760 } 1761 1762 if (mdb->old) { 1763 spin_unlock_bh(&br->multicast_lock); 1764 rcu_barrier_bh(); 1765 spin_lock_bh(&br->multicast_lock); 1766 WARN_ON(mdb->old); 1767 } 1768 1769 mdb->old = mdb; 1770 call_rcu_bh(&mdb->rcu, br_mdb_free); 1771 1772 out: 1773 spin_unlock_bh(&br->multicast_lock); 1774 } 1775 1776 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1777 { 1778 int err = -EINVAL; 1779 1780 spin_lock_bh(&br->multicast_lock); 1781 1782 switch (val) { 1783 case 0: 1784 case 2: 1785 del_timer(&br->multicast_router_timer); 1786 /* fall through */ 1787 case 1: 1788 br->multicast_router = val; 1789 err = 0; 1790 break; 1791 } 1792 1793 spin_unlock_bh(&br->multicast_lock); 1794 1795 return err; 1796 } 1797 1798 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1799 { 1800 struct net_bridge *br = p->br; 1801 int err = -EINVAL; 1802 1803 spin_lock(&br->multicast_lock); 1804 1805 switch (val) { 1806 case 0: 1807 case 1: 1808 case 2: 1809 p->multicast_router = val; 1810 err = 0; 1811 1812 if (val < 2 && !hlist_unhashed(&p->rlist)) 1813 hlist_del_init_rcu(&p->rlist); 1814 1815 if (val == 1) 1816 break; 1817 1818 del_timer(&p->multicast_router_timer); 1819 1820 if (val == 0) 1821 break; 1822 1823 br_multicast_add_router(br, p); 1824 break; 1825 } 1826 1827 spin_unlock(&br->multicast_lock); 1828 1829 return err; 1830 } 1831 1832 static void br_multicast_start_querier(struct net_bridge *br, 1833 struct bridge_mcast_own_query *query) 1834 { 1835 struct net_bridge_port *port; 1836 1837 __br_multicast_open(br, query); 1838 1839 list_for_each_entry(port, &br->port_list, list) { 1840 if (port->state == BR_STATE_DISABLED || 1841 port->state == BR_STATE_BLOCKING) 1842 continue; 1843 1844 if (query == &br->ip4_own_query) 1845 br_multicast_enable(&port->ip4_own_query); 1846 #if IS_ENABLED(CONFIG_IPV6) 1847 else 1848 br_multicast_enable(&port->ip6_own_query); 1849 #endif 1850 } 1851 } 1852 1853 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1854 { 1855 int err = 0; 1856 struct net_bridge_mdb_htable *mdb; 1857 1858 spin_lock_bh(&br->multicast_lock); 1859 if (br->multicast_disabled == !val) 1860 goto unlock; 1861 1862 br->multicast_disabled = !val; 1863 if (br->multicast_disabled) 1864 goto unlock; 1865 1866 if (!netif_running(br->dev)) 1867 goto unlock; 1868 1869 mdb = mlock_dereference(br->mdb, br); 1870 if (mdb) { 1871 if (mdb->old) { 1872 err = -EEXIST; 1873 rollback: 1874 br->multicast_disabled = !!val; 1875 goto unlock; 1876 } 1877 1878 err = br_mdb_rehash(&br->mdb, mdb->max, 1879 br->hash_elasticity); 1880 if (err) 1881 goto rollback; 1882 } 1883 1884 br_multicast_start_querier(br, &br->ip4_own_query); 1885 #if IS_ENABLED(CONFIG_IPV6) 1886 br_multicast_start_querier(br, &br->ip6_own_query); 1887 #endif 1888 1889 unlock: 1890 spin_unlock_bh(&br->multicast_lock); 1891 1892 return err; 1893 } 1894 1895 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1896 { 1897 unsigned long max_delay; 1898 1899 val = !!val; 1900 1901 spin_lock_bh(&br->multicast_lock); 1902 if (br->multicast_querier == val) 1903 goto unlock; 1904 1905 br->multicast_querier = val; 1906 if (!val) 1907 goto unlock; 1908 1909 max_delay = br->multicast_query_response_interval; 1910 1911 if (!timer_pending(&br->ip4_other_query.timer)) 1912 br->ip4_other_query.delay_time = jiffies + max_delay; 1913 1914 br_multicast_start_querier(br, &br->ip4_own_query); 1915 1916 #if IS_ENABLED(CONFIG_IPV6) 1917 if (!timer_pending(&br->ip6_other_query.timer)) 1918 br->ip6_other_query.delay_time = jiffies + max_delay; 1919 1920 br_multicast_start_querier(br, &br->ip6_own_query); 1921 #endif 1922 1923 unlock: 1924 spin_unlock_bh(&br->multicast_lock); 1925 1926 return 0; 1927 } 1928 1929 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1930 { 1931 int err = -EINVAL; 1932 u32 old; 1933 struct net_bridge_mdb_htable *mdb; 1934 1935 spin_lock_bh(&br->multicast_lock); 1936 if (!is_power_of_2(val)) 1937 goto unlock; 1938 1939 mdb = mlock_dereference(br->mdb, br); 1940 if (mdb && val < mdb->size) 1941 goto unlock; 1942 1943 err = 0; 1944 1945 old = br->hash_max; 1946 br->hash_max = val; 1947 1948 if (mdb) { 1949 if (mdb->old) { 1950 err = -EEXIST; 1951 rollback: 1952 br->hash_max = old; 1953 goto unlock; 1954 } 1955 1956 err = br_mdb_rehash(&br->mdb, br->hash_max, 1957 br->hash_elasticity); 1958 if (err) 1959 goto rollback; 1960 } 1961 1962 unlock: 1963 spin_unlock_bh(&br->multicast_lock); 1964 1965 return err; 1966 } 1967 1968 /** 1969 * br_multicast_list_adjacent - Returns snooped multicast addresses 1970 * @dev: The bridge port adjacent to which to retrieve addresses 1971 * @br_ip_list: The list to store found, snooped multicast IP addresses in 1972 * 1973 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 1974 * snooping feature on all bridge ports of dev's bridge device, excluding 1975 * the addresses from dev itself. 1976 * 1977 * Returns the number of items added to br_ip_list. 1978 * 1979 * Notes: 1980 * - br_ip_list needs to be initialized by caller 1981 * - br_ip_list might contain duplicates in the end 1982 * (needs to be taken care of by caller) 1983 * - br_ip_list needs to be freed by caller 1984 */ 1985 int br_multicast_list_adjacent(struct net_device *dev, 1986 struct list_head *br_ip_list) 1987 { 1988 struct net_bridge *br; 1989 struct net_bridge_port *port; 1990 struct net_bridge_port_group *group; 1991 struct br_ip_list *entry; 1992 int count = 0; 1993 1994 rcu_read_lock(); 1995 if (!br_ip_list || !br_port_exists(dev)) 1996 goto unlock; 1997 1998 port = br_port_get_rcu(dev); 1999 if (!port || !port->br) 2000 goto unlock; 2001 2002 br = port->br; 2003 2004 list_for_each_entry_rcu(port, &br->port_list, list) { 2005 if (!port->dev || port->dev == dev) 2006 continue; 2007 2008 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2009 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2010 if (!entry) 2011 goto unlock; 2012 2013 entry->addr = group->addr; 2014 list_add(&entry->list, br_ip_list); 2015 count++; 2016 } 2017 } 2018 2019 unlock: 2020 rcu_read_unlock(); 2021 return count; 2022 } 2023 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2024 2025 /** 2026 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2027 * @dev: The bridge port providing the bridge on which to check for a querier 2028 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2029 * 2030 * Checks whether the given interface has a bridge on top and if so returns 2031 * true if a valid querier exists anywhere on the bridged link layer. 2032 * Otherwise returns false. 2033 */ 2034 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2035 { 2036 struct net_bridge *br; 2037 struct net_bridge_port *port; 2038 struct ethhdr eth; 2039 bool ret = false; 2040 2041 rcu_read_lock(); 2042 if (!br_port_exists(dev)) 2043 goto unlock; 2044 2045 port = br_port_get_rcu(dev); 2046 if (!port || !port->br) 2047 goto unlock; 2048 2049 br = port->br; 2050 2051 memset(ð, 0, sizeof(eth)); 2052 eth.h_proto = htons(proto); 2053 2054 ret = br_multicast_querier_exists(br, ð); 2055 2056 unlock: 2057 rcu_read_unlock(); 2058 return ret; 2059 } 2060 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2061 2062 /** 2063 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2064 * @dev: The bridge port adjacent to which to check for a querier 2065 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2066 * 2067 * Checks whether the given interface has a bridge on top and if so returns 2068 * true if a selected querier is behind one of the other ports of this 2069 * bridge. Otherwise returns false. 2070 */ 2071 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2072 { 2073 struct net_bridge *br; 2074 struct net_bridge_port *port; 2075 bool ret = false; 2076 2077 rcu_read_lock(); 2078 if (!br_port_exists(dev)) 2079 goto unlock; 2080 2081 port = br_port_get_rcu(dev); 2082 if (!port || !port->br) 2083 goto unlock; 2084 2085 br = port->br; 2086 2087 switch (proto) { 2088 case ETH_P_IP: 2089 if (!timer_pending(&br->ip4_other_query.timer) || 2090 rcu_dereference(br->ip4_querier.port) == port) 2091 goto unlock; 2092 break; 2093 #if IS_ENABLED(CONFIG_IPV6) 2094 case ETH_P_IPV6: 2095 if (!timer_pending(&br->ip6_other_query.timer) || 2096 rcu_dereference(br->ip6_querier.port) == port) 2097 goto unlock; 2098 break; 2099 #endif 2100 default: 2101 goto unlock; 2102 } 2103 2104 ret = true; 2105 unlock: 2106 rcu_read_unlock(); 2107 return ret; 2108 } 2109 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2110