1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/if_ether.h> 15 #include <linux/igmp.h> 16 #include <linux/jhash.h> 17 #include <linux/kernel.h> 18 #include <linux/log2.h> 19 #include <linux/netdevice.h> 20 #include <linux/netfilter_bridge.h> 21 #include <linux/random.h> 22 #include <linux/rculist.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 #include <linux/timer.h> 26 #include <linux/inetdevice.h> 27 #include <net/ip.h> 28 #if IS_ENABLED(CONFIG_IPV6) 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #endif 33 34 #include "br_private.h" 35 36 static void br_multicast_start_querier(struct net_bridge *br); 37 unsigned int br_mdb_rehash_seq; 38 39 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 40 { 41 if (a->proto != b->proto) 42 return 0; 43 if (a->vid != b->vid) 44 return 0; 45 switch (a->proto) { 46 case htons(ETH_P_IP): 47 return a->u.ip4 == b->u.ip4; 48 #if IS_ENABLED(CONFIG_IPV6) 49 case htons(ETH_P_IPV6): 50 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 51 #endif 52 } 53 return 0; 54 } 55 56 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 57 __u16 vid) 58 { 59 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 60 } 61 62 #if IS_ENABLED(CONFIG_IPV6) 63 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 64 const struct in6_addr *ip, 65 __u16 vid) 66 { 67 return jhash_2words(ipv6_addr_hash(ip), vid, 68 mdb->secret) & (mdb->max - 1); 69 } 70 #endif 71 72 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 73 struct br_ip *ip) 74 { 75 switch (ip->proto) { 76 case htons(ETH_P_IP): 77 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 78 #if IS_ENABLED(CONFIG_IPV6) 79 case htons(ETH_P_IPV6): 80 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 81 #endif 82 } 83 return 0; 84 } 85 86 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 87 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 88 { 89 struct net_bridge_mdb_entry *mp; 90 91 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 92 if (br_ip_equal(&mp->addr, dst)) 93 return mp; 94 } 95 96 return NULL; 97 } 98 99 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 100 struct br_ip *dst) 101 { 102 if (!mdb) 103 return NULL; 104 105 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 106 } 107 108 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 109 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 110 { 111 struct br_ip br_dst; 112 113 br_dst.u.ip4 = dst; 114 br_dst.proto = htons(ETH_P_IP); 115 br_dst.vid = vid; 116 117 return br_mdb_ip_get(mdb, &br_dst); 118 } 119 120 #if IS_ENABLED(CONFIG_IPV6) 121 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 122 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 123 __u16 vid) 124 { 125 struct br_ip br_dst; 126 127 br_dst.u.ip6 = *dst; 128 br_dst.proto = htons(ETH_P_IPV6); 129 br_dst.vid = vid; 130 131 return br_mdb_ip_get(mdb, &br_dst); 132 } 133 #endif 134 135 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 136 struct sk_buff *skb, u16 vid) 137 { 138 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 139 struct br_ip ip; 140 141 if (br->multicast_disabled) 142 return NULL; 143 144 if (BR_INPUT_SKB_CB(skb)->igmp) 145 return NULL; 146 147 ip.proto = skb->protocol; 148 ip.vid = vid; 149 150 switch (skb->protocol) { 151 case htons(ETH_P_IP): 152 ip.u.ip4 = ip_hdr(skb)->daddr; 153 break; 154 #if IS_ENABLED(CONFIG_IPV6) 155 case htons(ETH_P_IPV6): 156 ip.u.ip6 = ipv6_hdr(skb)->daddr; 157 break; 158 #endif 159 default: 160 return NULL; 161 } 162 163 return br_mdb_ip_get(mdb, &ip); 164 } 165 166 static void br_mdb_free(struct rcu_head *head) 167 { 168 struct net_bridge_mdb_htable *mdb = 169 container_of(head, struct net_bridge_mdb_htable, rcu); 170 struct net_bridge_mdb_htable *old = mdb->old; 171 172 mdb->old = NULL; 173 kfree(old->mhash); 174 kfree(old); 175 } 176 177 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 178 struct net_bridge_mdb_htable *old, 179 int elasticity) 180 { 181 struct net_bridge_mdb_entry *mp; 182 int maxlen; 183 int len; 184 int i; 185 186 for (i = 0; i < old->max; i++) 187 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 188 hlist_add_head(&mp->hlist[new->ver], 189 &new->mhash[br_ip_hash(new, &mp->addr)]); 190 191 if (!elasticity) 192 return 0; 193 194 maxlen = 0; 195 for (i = 0; i < new->max; i++) { 196 len = 0; 197 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 198 len++; 199 if (len > maxlen) 200 maxlen = len; 201 } 202 203 return maxlen > elasticity ? -EINVAL : 0; 204 } 205 206 void br_multicast_free_pg(struct rcu_head *head) 207 { 208 struct net_bridge_port_group *p = 209 container_of(head, struct net_bridge_port_group, rcu); 210 211 kfree(p); 212 } 213 214 static void br_multicast_free_group(struct rcu_head *head) 215 { 216 struct net_bridge_mdb_entry *mp = 217 container_of(head, struct net_bridge_mdb_entry, rcu); 218 219 kfree(mp); 220 } 221 222 static void br_multicast_group_expired(unsigned long data) 223 { 224 struct net_bridge_mdb_entry *mp = (void *)data; 225 struct net_bridge *br = mp->br; 226 struct net_bridge_mdb_htable *mdb; 227 228 spin_lock(&br->multicast_lock); 229 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 230 goto out; 231 232 mp->mglist = false; 233 234 if (mp->ports) 235 goto out; 236 237 mdb = mlock_dereference(br->mdb, br); 238 239 hlist_del_rcu(&mp->hlist[mdb->ver]); 240 mdb->size--; 241 242 call_rcu_bh(&mp->rcu, br_multicast_free_group); 243 244 out: 245 spin_unlock(&br->multicast_lock); 246 } 247 248 static void br_multicast_del_pg(struct net_bridge *br, 249 struct net_bridge_port_group *pg) 250 { 251 struct net_bridge_mdb_htable *mdb; 252 struct net_bridge_mdb_entry *mp; 253 struct net_bridge_port_group *p; 254 struct net_bridge_port_group __rcu **pp; 255 256 mdb = mlock_dereference(br->mdb, br); 257 258 mp = br_mdb_ip_get(mdb, &pg->addr); 259 if (WARN_ON(!mp)) 260 return; 261 262 for (pp = &mp->ports; 263 (p = mlock_dereference(*pp, br)) != NULL; 264 pp = &p->next) { 265 if (p != pg) 266 continue; 267 268 rcu_assign_pointer(*pp, p->next); 269 hlist_del_init(&p->mglist); 270 del_timer(&p->timer); 271 call_rcu_bh(&p->rcu, br_multicast_free_pg); 272 273 if (!mp->ports && !mp->mglist && mp->timer_armed && 274 netif_running(br->dev)) 275 mod_timer(&mp->timer, jiffies); 276 277 return; 278 } 279 280 WARN_ON(1); 281 } 282 283 static void br_multicast_port_group_expired(unsigned long data) 284 { 285 struct net_bridge_port_group *pg = (void *)data; 286 struct net_bridge *br = pg->port->br; 287 288 spin_lock(&br->multicast_lock); 289 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 290 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT) 291 goto out; 292 293 br_multicast_del_pg(br, pg); 294 295 out: 296 spin_unlock(&br->multicast_lock); 297 } 298 299 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 300 int elasticity) 301 { 302 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 303 struct net_bridge_mdb_htable *mdb; 304 int err; 305 306 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 307 if (!mdb) 308 return -ENOMEM; 309 310 mdb->max = max; 311 mdb->old = old; 312 313 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 314 if (!mdb->mhash) { 315 kfree(mdb); 316 return -ENOMEM; 317 } 318 319 mdb->size = old ? old->size : 0; 320 mdb->ver = old ? old->ver ^ 1 : 0; 321 322 if (!old || elasticity) 323 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 324 else 325 mdb->secret = old->secret; 326 327 if (!old) 328 goto out; 329 330 err = br_mdb_copy(mdb, old, elasticity); 331 if (err) { 332 kfree(mdb->mhash); 333 kfree(mdb); 334 return err; 335 } 336 337 br_mdb_rehash_seq++; 338 call_rcu_bh(&mdb->rcu, br_mdb_free); 339 340 out: 341 rcu_assign_pointer(*mdbp, mdb); 342 343 return 0; 344 } 345 346 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 347 __be32 group) 348 { 349 struct sk_buff *skb; 350 struct igmphdr *ih; 351 struct ethhdr *eth; 352 struct iphdr *iph; 353 354 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 355 sizeof(*ih) + 4); 356 if (!skb) 357 goto out; 358 359 skb->protocol = htons(ETH_P_IP); 360 361 skb_reset_mac_header(skb); 362 eth = eth_hdr(skb); 363 364 memcpy(eth->h_source, br->dev->dev_addr, 6); 365 eth->h_dest[0] = 1; 366 eth->h_dest[1] = 0; 367 eth->h_dest[2] = 0x5e; 368 eth->h_dest[3] = 0; 369 eth->h_dest[4] = 0; 370 eth->h_dest[5] = 1; 371 eth->h_proto = htons(ETH_P_IP); 372 skb_put(skb, sizeof(*eth)); 373 374 skb_set_network_header(skb, skb->len); 375 iph = ip_hdr(skb); 376 377 iph->version = 4; 378 iph->ihl = 6; 379 iph->tos = 0xc0; 380 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 381 iph->id = 0; 382 iph->frag_off = htons(IP_DF); 383 iph->ttl = 1; 384 iph->protocol = IPPROTO_IGMP; 385 iph->saddr = br->multicast_query_use_ifaddr ? 386 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 387 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 388 ((u8 *)&iph[1])[0] = IPOPT_RA; 389 ((u8 *)&iph[1])[1] = 4; 390 ((u8 *)&iph[1])[2] = 0; 391 ((u8 *)&iph[1])[3] = 0; 392 ip_send_check(iph); 393 skb_put(skb, 24); 394 395 skb_set_transport_header(skb, skb->len); 396 ih = igmp_hdr(skb); 397 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 398 ih->code = (group ? br->multicast_last_member_interval : 399 br->multicast_query_response_interval) / 400 (HZ / IGMP_TIMER_SCALE); 401 ih->group = group; 402 ih->csum = 0; 403 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 404 skb_put(skb, sizeof(*ih)); 405 406 __skb_pull(skb, sizeof(*eth)); 407 408 out: 409 return skb; 410 } 411 412 #if IS_ENABLED(CONFIG_IPV6) 413 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 414 const struct in6_addr *group) 415 { 416 struct sk_buff *skb; 417 struct ipv6hdr *ip6h; 418 struct mld_msg *mldq; 419 struct ethhdr *eth; 420 u8 *hopopt; 421 unsigned long interval; 422 423 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 424 8 + sizeof(*mldq)); 425 if (!skb) 426 goto out; 427 428 skb->protocol = htons(ETH_P_IPV6); 429 430 /* Ethernet header */ 431 skb_reset_mac_header(skb); 432 eth = eth_hdr(skb); 433 434 memcpy(eth->h_source, br->dev->dev_addr, 6); 435 eth->h_proto = htons(ETH_P_IPV6); 436 skb_put(skb, sizeof(*eth)); 437 438 /* IPv6 header + HbH option */ 439 skb_set_network_header(skb, skb->len); 440 ip6h = ipv6_hdr(skb); 441 442 *(__force __be32 *)ip6h = htonl(0x60000000); 443 ip6h->payload_len = htons(8 + sizeof(*mldq)); 444 ip6h->nexthdr = IPPROTO_HOPOPTS; 445 ip6h->hop_limit = 1; 446 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 447 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 448 &ip6h->saddr)) { 449 kfree_skb(skb); 450 return NULL; 451 } 452 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 453 454 hopopt = (u8 *)(ip6h + 1); 455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 456 hopopt[1] = 0; /* length of HbH */ 457 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 458 hopopt[3] = 2; /* Length of RA Option */ 459 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 460 hopopt[5] = 0; 461 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 462 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 463 464 skb_put(skb, sizeof(*ip6h) + 8); 465 466 /* ICMPv6 */ 467 skb_set_transport_header(skb, skb->len); 468 mldq = (struct mld_msg *) icmp6_hdr(skb); 469 470 interval = ipv6_addr_any(group) ? 471 br->multicast_query_response_interval : 472 br->multicast_last_member_interval; 473 474 mldq->mld_type = ICMPV6_MGM_QUERY; 475 mldq->mld_code = 0; 476 mldq->mld_cksum = 0; 477 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 478 mldq->mld_reserved = 0; 479 mldq->mld_mca = *group; 480 481 /* checksum */ 482 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 483 sizeof(*mldq), IPPROTO_ICMPV6, 484 csum_partial(mldq, 485 sizeof(*mldq), 0)); 486 skb_put(skb, sizeof(*mldq)); 487 488 __skb_pull(skb, sizeof(*eth)); 489 490 out: 491 return skb; 492 } 493 #endif 494 495 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 496 struct br_ip *addr) 497 { 498 switch (addr->proto) { 499 case htons(ETH_P_IP): 500 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 501 #if IS_ENABLED(CONFIG_IPV6) 502 case htons(ETH_P_IPV6): 503 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 504 #endif 505 } 506 return NULL; 507 } 508 509 static struct net_bridge_mdb_entry *br_multicast_get_group( 510 struct net_bridge *br, struct net_bridge_port *port, 511 struct br_ip *group, int hash) 512 { 513 struct net_bridge_mdb_htable *mdb; 514 struct net_bridge_mdb_entry *mp; 515 unsigned int count = 0; 516 unsigned int max; 517 int elasticity; 518 int err; 519 520 mdb = rcu_dereference_protected(br->mdb, 1); 521 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 522 count++; 523 if (unlikely(br_ip_equal(group, &mp->addr))) 524 return mp; 525 } 526 527 elasticity = 0; 528 max = mdb->max; 529 530 if (unlikely(count > br->hash_elasticity && count)) { 531 if (net_ratelimit()) 532 br_info(br, "Multicast hash table " 533 "chain limit reached: %s\n", 534 port ? port->dev->name : br->dev->name); 535 536 elasticity = br->hash_elasticity; 537 } 538 539 if (mdb->size >= max) { 540 max *= 2; 541 if (unlikely(max > br->hash_max)) { 542 br_warn(br, "Multicast hash table maximum of %d " 543 "reached, disabling snooping: %s\n", 544 br->hash_max, 545 port ? port->dev->name : br->dev->name); 546 err = -E2BIG; 547 disable: 548 br->multicast_disabled = 1; 549 goto err; 550 } 551 } 552 553 if (max > mdb->max || elasticity) { 554 if (mdb->old) { 555 if (net_ratelimit()) 556 br_info(br, "Multicast hash table " 557 "on fire: %s\n", 558 port ? port->dev->name : br->dev->name); 559 err = -EEXIST; 560 goto err; 561 } 562 563 err = br_mdb_rehash(&br->mdb, max, elasticity); 564 if (err) { 565 br_warn(br, "Cannot rehash multicast " 566 "hash table, disabling snooping: %s, %d, %d\n", 567 port ? port->dev->name : br->dev->name, 568 mdb->size, err); 569 goto disable; 570 } 571 572 err = -EAGAIN; 573 goto err; 574 } 575 576 return NULL; 577 578 err: 579 mp = ERR_PTR(err); 580 return mp; 581 } 582 583 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 584 struct net_bridge_port *port, struct br_ip *group) 585 { 586 struct net_bridge_mdb_htable *mdb; 587 struct net_bridge_mdb_entry *mp; 588 int hash; 589 int err; 590 591 mdb = rcu_dereference_protected(br->mdb, 1); 592 if (!mdb) { 593 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 594 if (err) 595 return ERR_PTR(err); 596 goto rehash; 597 } 598 599 hash = br_ip_hash(mdb, group); 600 mp = br_multicast_get_group(br, port, group, hash); 601 switch (PTR_ERR(mp)) { 602 case 0: 603 break; 604 605 case -EAGAIN: 606 rehash: 607 mdb = rcu_dereference_protected(br->mdb, 1); 608 hash = br_ip_hash(mdb, group); 609 break; 610 611 default: 612 goto out; 613 } 614 615 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 616 if (unlikely(!mp)) 617 return ERR_PTR(-ENOMEM); 618 619 mp->br = br; 620 mp->addr = *group; 621 622 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 623 mdb->size++; 624 625 out: 626 return mp; 627 } 628 629 struct net_bridge_port_group *br_multicast_new_port_group( 630 struct net_bridge_port *port, 631 struct br_ip *group, 632 struct net_bridge_port_group __rcu *next, 633 unsigned char state) 634 { 635 struct net_bridge_port_group *p; 636 637 p = kzalloc(sizeof(*p), GFP_ATOMIC); 638 if (unlikely(!p)) 639 return NULL; 640 641 p->addr = *group; 642 p->port = port; 643 p->state = state; 644 rcu_assign_pointer(p->next, next); 645 hlist_add_head(&p->mglist, &port->mglist); 646 setup_timer(&p->timer, br_multicast_port_group_expired, 647 (unsigned long)p); 648 return p; 649 } 650 651 static int br_multicast_add_group(struct net_bridge *br, 652 struct net_bridge_port *port, 653 struct br_ip *group) 654 { 655 struct net_bridge_mdb_entry *mp; 656 struct net_bridge_port_group *p; 657 struct net_bridge_port_group __rcu **pp; 658 int err; 659 660 spin_lock(&br->multicast_lock); 661 if (!netif_running(br->dev) || 662 (port && port->state == BR_STATE_DISABLED)) 663 goto out; 664 665 mp = br_multicast_new_group(br, port, group); 666 err = PTR_ERR(mp); 667 if (IS_ERR(mp)) 668 goto err; 669 670 if (!port) { 671 mp->mglist = true; 672 goto out; 673 } 674 675 for (pp = &mp->ports; 676 (p = mlock_dereference(*pp, br)) != NULL; 677 pp = &p->next) { 678 if (p->port == port) 679 goto out; 680 if ((unsigned long)p->port < (unsigned long)port) 681 break; 682 } 683 684 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); 685 if (unlikely(!p)) 686 goto err; 687 rcu_assign_pointer(*pp, p); 688 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 689 690 out: 691 err = 0; 692 693 err: 694 spin_unlock(&br->multicast_lock); 695 return err; 696 } 697 698 static int br_ip4_multicast_add_group(struct net_bridge *br, 699 struct net_bridge_port *port, 700 __be32 group, 701 __u16 vid) 702 { 703 struct br_ip br_group; 704 705 if (ipv4_is_local_multicast(group)) 706 return 0; 707 708 br_group.u.ip4 = group; 709 br_group.proto = htons(ETH_P_IP); 710 br_group.vid = vid; 711 712 return br_multicast_add_group(br, port, &br_group); 713 } 714 715 #if IS_ENABLED(CONFIG_IPV6) 716 static int br_ip6_multicast_add_group(struct net_bridge *br, 717 struct net_bridge_port *port, 718 const struct in6_addr *group, 719 __u16 vid) 720 { 721 struct br_ip br_group; 722 723 if (!ipv6_is_transient_multicast(group)) 724 return 0; 725 726 br_group.u.ip6 = *group; 727 br_group.proto = htons(ETH_P_IPV6); 728 br_group.vid = vid; 729 730 return br_multicast_add_group(br, port, &br_group); 731 } 732 #endif 733 734 static void br_multicast_router_expired(unsigned long data) 735 { 736 struct net_bridge_port *port = (void *)data; 737 struct net_bridge *br = port->br; 738 739 spin_lock(&br->multicast_lock); 740 if (port->multicast_router != 1 || 741 timer_pending(&port->multicast_router_timer) || 742 hlist_unhashed(&port->rlist)) 743 goto out; 744 745 hlist_del_init_rcu(&port->rlist); 746 747 out: 748 spin_unlock(&br->multicast_lock); 749 } 750 751 static void br_multicast_local_router_expired(unsigned long data) 752 { 753 } 754 755 static void br_multicast_querier_expired(unsigned long data) 756 { 757 struct net_bridge *br = (void *)data; 758 759 spin_lock(&br->multicast_lock); 760 if (!netif_running(br->dev) || br->multicast_disabled) 761 goto out; 762 763 br_multicast_start_querier(br); 764 765 out: 766 spin_unlock(&br->multicast_lock); 767 } 768 769 static void __br_multicast_send_query(struct net_bridge *br, 770 struct net_bridge_port *port, 771 struct br_ip *ip) 772 { 773 struct sk_buff *skb; 774 775 skb = br_multicast_alloc_query(br, ip); 776 if (!skb) 777 return; 778 779 if (port) { 780 __skb_push(skb, sizeof(struct ethhdr)); 781 skb->dev = port->dev; 782 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 783 dev_queue_xmit); 784 } else 785 netif_rx(skb); 786 } 787 788 static void br_multicast_send_query(struct net_bridge *br, 789 struct net_bridge_port *port, u32 sent) 790 { 791 unsigned long time; 792 struct br_ip br_group; 793 794 if (!netif_running(br->dev) || br->multicast_disabled || 795 !br->multicast_querier || 796 timer_pending(&br->multicast_querier_timer)) 797 return; 798 799 memset(&br_group.u, 0, sizeof(br_group.u)); 800 801 br_group.proto = htons(ETH_P_IP); 802 __br_multicast_send_query(br, port, &br_group); 803 804 #if IS_ENABLED(CONFIG_IPV6) 805 br_group.proto = htons(ETH_P_IPV6); 806 __br_multicast_send_query(br, port, &br_group); 807 #endif 808 809 time = jiffies; 810 time += sent < br->multicast_startup_query_count ? 811 br->multicast_startup_query_interval : 812 br->multicast_query_interval; 813 mod_timer(port ? &port->multicast_query_timer : 814 &br->multicast_query_timer, time); 815 } 816 817 static void br_multicast_port_query_expired(unsigned long data) 818 { 819 struct net_bridge_port *port = (void *)data; 820 struct net_bridge *br = port->br; 821 822 spin_lock(&br->multicast_lock); 823 if (port->state == BR_STATE_DISABLED || 824 port->state == BR_STATE_BLOCKING) 825 goto out; 826 827 if (port->multicast_startup_queries_sent < 828 br->multicast_startup_query_count) 829 port->multicast_startup_queries_sent++; 830 831 br_multicast_send_query(port->br, port, 832 port->multicast_startup_queries_sent); 833 834 out: 835 spin_unlock(&br->multicast_lock); 836 } 837 838 void br_multicast_add_port(struct net_bridge_port *port) 839 { 840 port->multicast_router = 1; 841 842 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 843 (unsigned long)port); 844 setup_timer(&port->multicast_query_timer, 845 br_multicast_port_query_expired, (unsigned long)port); 846 } 847 848 void br_multicast_del_port(struct net_bridge_port *port) 849 { 850 del_timer_sync(&port->multicast_router_timer); 851 } 852 853 static void __br_multicast_enable_port(struct net_bridge_port *port) 854 { 855 port->multicast_startup_queries_sent = 0; 856 857 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 858 del_timer(&port->multicast_query_timer)) 859 mod_timer(&port->multicast_query_timer, jiffies); 860 } 861 862 void br_multicast_enable_port(struct net_bridge_port *port) 863 { 864 struct net_bridge *br = port->br; 865 866 spin_lock(&br->multicast_lock); 867 if (br->multicast_disabled || !netif_running(br->dev)) 868 goto out; 869 870 __br_multicast_enable_port(port); 871 872 out: 873 spin_unlock(&br->multicast_lock); 874 } 875 876 void br_multicast_disable_port(struct net_bridge_port *port) 877 { 878 struct net_bridge *br = port->br; 879 struct net_bridge_port_group *pg; 880 struct hlist_node *n; 881 882 spin_lock(&br->multicast_lock); 883 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 884 br_multicast_del_pg(br, pg); 885 886 if (!hlist_unhashed(&port->rlist)) 887 hlist_del_init_rcu(&port->rlist); 888 del_timer(&port->multicast_router_timer); 889 del_timer(&port->multicast_query_timer); 890 spin_unlock(&br->multicast_lock); 891 } 892 893 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 894 struct net_bridge_port *port, 895 struct sk_buff *skb) 896 { 897 struct igmpv3_report *ih; 898 struct igmpv3_grec *grec; 899 int i; 900 int len; 901 int num; 902 int type; 903 int err = 0; 904 __be32 group; 905 u16 vid = 0; 906 907 if (!pskb_may_pull(skb, sizeof(*ih))) 908 return -EINVAL; 909 910 br_vlan_get_tag(skb, &vid); 911 ih = igmpv3_report_hdr(skb); 912 num = ntohs(ih->ngrec); 913 len = sizeof(*ih); 914 915 for (i = 0; i < num; i++) { 916 len += sizeof(*grec); 917 if (!pskb_may_pull(skb, len)) 918 return -EINVAL; 919 920 grec = (void *)(skb->data + len - sizeof(*grec)); 921 group = grec->grec_mca; 922 type = grec->grec_type; 923 924 len += ntohs(grec->grec_nsrcs) * 4; 925 if (!pskb_may_pull(skb, len)) 926 return -EINVAL; 927 928 /* We treat this as an IGMPv2 report for now. */ 929 switch (type) { 930 case IGMPV3_MODE_IS_INCLUDE: 931 case IGMPV3_MODE_IS_EXCLUDE: 932 case IGMPV3_CHANGE_TO_INCLUDE: 933 case IGMPV3_CHANGE_TO_EXCLUDE: 934 case IGMPV3_ALLOW_NEW_SOURCES: 935 case IGMPV3_BLOCK_OLD_SOURCES: 936 break; 937 938 default: 939 continue; 940 } 941 942 err = br_ip4_multicast_add_group(br, port, group, vid); 943 if (err) 944 break; 945 } 946 947 return err; 948 } 949 950 #if IS_ENABLED(CONFIG_IPV6) 951 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 952 struct net_bridge_port *port, 953 struct sk_buff *skb) 954 { 955 struct icmp6hdr *icmp6h; 956 struct mld2_grec *grec; 957 int i; 958 int len; 959 int num; 960 int err = 0; 961 u16 vid = 0; 962 963 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 964 return -EINVAL; 965 966 br_vlan_get_tag(skb, &vid); 967 icmp6h = icmp6_hdr(skb); 968 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 969 len = sizeof(*icmp6h); 970 971 for (i = 0; i < num; i++) { 972 __be16 *nsrcs, _nsrcs; 973 974 nsrcs = skb_header_pointer(skb, 975 len + offsetof(struct mld2_grec, 976 grec_nsrcs), 977 sizeof(_nsrcs), &_nsrcs); 978 if (!nsrcs) 979 return -EINVAL; 980 981 if (!pskb_may_pull(skb, 982 len + sizeof(*grec) + 983 sizeof(struct in6_addr) * ntohs(*nsrcs))) 984 return -EINVAL; 985 986 grec = (struct mld2_grec *)(skb->data + len); 987 len += sizeof(*grec) + 988 sizeof(struct in6_addr) * ntohs(*nsrcs); 989 990 /* We treat these as MLDv1 reports for now. */ 991 switch (grec->grec_type) { 992 case MLD2_MODE_IS_INCLUDE: 993 case MLD2_MODE_IS_EXCLUDE: 994 case MLD2_CHANGE_TO_INCLUDE: 995 case MLD2_CHANGE_TO_EXCLUDE: 996 case MLD2_ALLOW_NEW_SOURCES: 997 case MLD2_BLOCK_OLD_SOURCES: 998 break; 999 1000 default: 1001 continue; 1002 } 1003 1004 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1005 vid); 1006 if (!err) 1007 break; 1008 } 1009 1010 return err; 1011 } 1012 #endif 1013 1014 /* 1015 * Add port to router_list 1016 * list is maintained ordered by pointer value 1017 * and locked by br->multicast_lock and RCU 1018 */ 1019 static void br_multicast_add_router(struct net_bridge *br, 1020 struct net_bridge_port *port) 1021 { 1022 struct net_bridge_port *p; 1023 struct hlist_node *slot = NULL; 1024 1025 hlist_for_each_entry(p, &br->router_list, rlist) { 1026 if ((unsigned long) port >= (unsigned long) p) 1027 break; 1028 slot = &p->rlist; 1029 } 1030 1031 if (slot) 1032 hlist_add_after_rcu(slot, &port->rlist); 1033 else 1034 hlist_add_head_rcu(&port->rlist, &br->router_list); 1035 } 1036 1037 static void br_multicast_mark_router(struct net_bridge *br, 1038 struct net_bridge_port *port) 1039 { 1040 unsigned long now = jiffies; 1041 1042 if (!port) { 1043 if (br->multicast_router == 1) 1044 mod_timer(&br->multicast_router_timer, 1045 now + br->multicast_querier_interval); 1046 return; 1047 } 1048 1049 if (port->multicast_router != 1) 1050 return; 1051 1052 if (!hlist_unhashed(&port->rlist)) 1053 goto timer; 1054 1055 br_multicast_add_router(br, port); 1056 1057 timer: 1058 mod_timer(&port->multicast_router_timer, 1059 now + br->multicast_querier_interval); 1060 } 1061 1062 static void br_multicast_query_received(struct net_bridge *br, 1063 struct net_bridge_port *port, 1064 int saddr) 1065 { 1066 if (saddr) 1067 mod_timer(&br->multicast_querier_timer, 1068 jiffies + br->multicast_querier_interval); 1069 else if (timer_pending(&br->multicast_querier_timer)) 1070 return; 1071 1072 br_multicast_mark_router(br, port); 1073 } 1074 1075 static int br_ip4_multicast_query(struct net_bridge *br, 1076 struct net_bridge_port *port, 1077 struct sk_buff *skb) 1078 { 1079 const struct iphdr *iph = ip_hdr(skb); 1080 struct igmphdr *ih = igmp_hdr(skb); 1081 struct net_bridge_mdb_entry *mp; 1082 struct igmpv3_query *ih3; 1083 struct net_bridge_port_group *p; 1084 struct net_bridge_port_group __rcu **pp; 1085 unsigned long max_delay; 1086 unsigned long now = jiffies; 1087 __be32 group; 1088 int err = 0; 1089 u16 vid = 0; 1090 1091 spin_lock(&br->multicast_lock); 1092 if (!netif_running(br->dev) || 1093 (port && port->state == BR_STATE_DISABLED)) 1094 goto out; 1095 1096 br_multicast_query_received(br, port, !!iph->saddr); 1097 1098 group = ih->group; 1099 1100 if (skb->len == sizeof(*ih)) { 1101 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1102 1103 if (!max_delay) { 1104 max_delay = 10 * HZ; 1105 group = 0; 1106 } 1107 } else { 1108 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { 1109 err = -EINVAL; 1110 goto out; 1111 } 1112 1113 ih3 = igmpv3_query_hdr(skb); 1114 if (ih3->nsrcs) 1115 goto out; 1116 1117 max_delay = ih3->code ? 1118 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1119 } 1120 1121 if (!group) 1122 goto out; 1123 1124 br_vlan_get_tag(skb, &vid); 1125 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1126 if (!mp) 1127 goto out; 1128 1129 setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); 1130 mod_timer(&mp->timer, now + br->multicast_membership_interval); 1131 mp->timer_armed = true; 1132 1133 max_delay *= br->multicast_last_member_count; 1134 1135 if (mp->mglist && 1136 (timer_pending(&mp->timer) ? 1137 time_after(mp->timer.expires, now + max_delay) : 1138 try_to_del_timer_sync(&mp->timer) >= 0)) 1139 mod_timer(&mp->timer, now + max_delay); 1140 1141 for (pp = &mp->ports; 1142 (p = mlock_dereference(*pp, br)) != NULL; 1143 pp = &p->next) { 1144 if (timer_pending(&p->timer) ? 1145 time_after(p->timer.expires, now + max_delay) : 1146 try_to_del_timer_sync(&p->timer) >= 0) 1147 mod_timer(&p->timer, now + max_delay); 1148 } 1149 1150 out: 1151 spin_unlock(&br->multicast_lock); 1152 return err; 1153 } 1154 1155 #if IS_ENABLED(CONFIG_IPV6) 1156 static int br_ip6_multicast_query(struct net_bridge *br, 1157 struct net_bridge_port *port, 1158 struct sk_buff *skb) 1159 { 1160 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1161 struct mld_msg *mld; 1162 struct net_bridge_mdb_entry *mp; 1163 struct mld2_query *mld2q; 1164 struct net_bridge_port_group *p; 1165 struct net_bridge_port_group __rcu **pp; 1166 unsigned long max_delay; 1167 unsigned long now = jiffies; 1168 const struct in6_addr *group = NULL; 1169 int err = 0; 1170 u16 vid = 0; 1171 1172 spin_lock(&br->multicast_lock); 1173 if (!netif_running(br->dev) || 1174 (port && port->state == BR_STATE_DISABLED)) 1175 goto out; 1176 1177 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); 1178 1179 if (skb->len == sizeof(*mld)) { 1180 if (!pskb_may_pull(skb, sizeof(*mld))) { 1181 err = -EINVAL; 1182 goto out; 1183 } 1184 mld = (struct mld_msg *) icmp6_hdr(skb); 1185 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1186 if (max_delay) 1187 group = &mld->mld_mca; 1188 } else if (skb->len >= sizeof(*mld2q)) { 1189 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1190 err = -EINVAL; 1191 goto out; 1192 } 1193 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1194 if (!mld2q->mld2q_nsrcs) 1195 group = &mld2q->mld2q_mca; 1196 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; 1197 } 1198 1199 if (!group) 1200 goto out; 1201 1202 br_vlan_get_tag(skb, &vid); 1203 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1204 if (!mp) 1205 goto out; 1206 1207 setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); 1208 mod_timer(&mp->timer, now + br->multicast_membership_interval); 1209 mp->timer_armed = true; 1210 1211 max_delay *= br->multicast_last_member_count; 1212 if (mp->mglist && 1213 (timer_pending(&mp->timer) ? 1214 time_after(mp->timer.expires, now + max_delay) : 1215 try_to_del_timer_sync(&mp->timer) >= 0)) 1216 mod_timer(&mp->timer, now + max_delay); 1217 1218 for (pp = &mp->ports; 1219 (p = mlock_dereference(*pp, br)) != NULL; 1220 pp = &p->next) { 1221 if (timer_pending(&p->timer) ? 1222 time_after(p->timer.expires, now + max_delay) : 1223 try_to_del_timer_sync(&p->timer) >= 0) 1224 mod_timer(&p->timer, now + max_delay); 1225 } 1226 1227 out: 1228 spin_unlock(&br->multicast_lock); 1229 return err; 1230 } 1231 #endif 1232 1233 static void br_multicast_leave_group(struct net_bridge *br, 1234 struct net_bridge_port *port, 1235 struct br_ip *group) 1236 { 1237 struct net_bridge_mdb_htable *mdb; 1238 struct net_bridge_mdb_entry *mp; 1239 struct net_bridge_port_group *p; 1240 unsigned long now; 1241 unsigned long time; 1242 1243 spin_lock(&br->multicast_lock); 1244 if (!netif_running(br->dev) || 1245 (port && port->state == BR_STATE_DISABLED) || 1246 timer_pending(&br->multicast_querier_timer)) 1247 goto out; 1248 1249 mdb = mlock_dereference(br->mdb, br); 1250 mp = br_mdb_ip_get(mdb, group); 1251 if (!mp) 1252 goto out; 1253 1254 if (br->multicast_querier && 1255 !timer_pending(&br->multicast_querier_timer)) { 1256 __br_multicast_send_query(br, port, &mp->addr); 1257 1258 time = jiffies + br->multicast_last_member_count * 1259 br->multicast_last_member_interval; 1260 mod_timer(port ? &port->multicast_query_timer : 1261 &br->multicast_query_timer, time); 1262 1263 for (p = mlock_dereference(mp->ports, br); 1264 p != NULL; 1265 p = mlock_dereference(p->next, br)) { 1266 if (p->port != port) 1267 continue; 1268 1269 if (!hlist_unhashed(&p->mglist) && 1270 (timer_pending(&p->timer) ? 1271 time_after(p->timer.expires, time) : 1272 try_to_del_timer_sync(&p->timer) >= 0)) { 1273 mod_timer(&p->timer, time); 1274 } 1275 1276 break; 1277 } 1278 } 1279 1280 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1281 struct net_bridge_port_group __rcu **pp; 1282 1283 for (pp = &mp->ports; 1284 (p = mlock_dereference(*pp, br)) != NULL; 1285 pp = &p->next) { 1286 if (p->port != port) 1287 continue; 1288 1289 rcu_assign_pointer(*pp, p->next); 1290 hlist_del_init(&p->mglist); 1291 del_timer(&p->timer); 1292 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1293 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1294 1295 if (!mp->ports && !mp->mglist && mp->timer_armed && 1296 netif_running(br->dev)) 1297 mod_timer(&mp->timer, jiffies); 1298 } 1299 goto out; 1300 } 1301 1302 now = jiffies; 1303 time = now + br->multicast_last_member_count * 1304 br->multicast_last_member_interval; 1305 1306 if (!port) { 1307 if (mp->mglist && mp->timer_armed && 1308 (timer_pending(&mp->timer) ? 1309 time_after(mp->timer.expires, time) : 1310 try_to_del_timer_sync(&mp->timer) >= 0)) { 1311 mod_timer(&mp->timer, time); 1312 } 1313 } 1314 1315 out: 1316 spin_unlock(&br->multicast_lock); 1317 } 1318 1319 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1320 struct net_bridge_port *port, 1321 __be32 group, 1322 __u16 vid) 1323 { 1324 struct br_ip br_group; 1325 1326 if (ipv4_is_local_multicast(group)) 1327 return; 1328 1329 br_group.u.ip4 = group; 1330 br_group.proto = htons(ETH_P_IP); 1331 br_group.vid = vid; 1332 1333 br_multicast_leave_group(br, port, &br_group); 1334 } 1335 1336 #if IS_ENABLED(CONFIG_IPV6) 1337 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1338 struct net_bridge_port *port, 1339 const struct in6_addr *group, 1340 __u16 vid) 1341 { 1342 struct br_ip br_group; 1343 1344 if (!ipv6_is_transient_multicast(group)) 1345 return; 1346 1347 br_group.u.ip6 = *group; 1348 br_group.proto = htons(ETH_P_IPV6); 1349 br_group.vid = vid; 1350 1351 br_multicast_leave_group(br, port, &br_group); 1352 } 1353 #endif 1354 1355 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1356 struct net_bridge_port *port, 1357 struct sk_buff *skb) 1358 { 1359 struct sk_buff *skb2 = skb; 1360 const struct iphdr *iph; 1361 struct igmphdr *ih; 1362 unsigned int len; 1363 unsigned int offset; 1364 int err; 1365 u16 vid = 0; 1366 1367 /* We treat OOM as packet loss for now. */ 1368 if (!pskb_may_pull(skb, sizeof(*iph))) 1369 return -EINVAL; 1370 1371 iph = ip_hdr(skb); 1372 1373 if (iph->ihl < 5 || iph->version != 4) 1374 return -EINVAL; 1375 1376 if (!pskb_may_pull(skb, ip_hdrlen(skb))) 1377 return -EINVAL; 1378 1379 iph = ip_hdr(skb); 1380 1381 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1382 return -EINVAL; 1383 1384 if (iph->protocol != IPPROTO_IGMP) { 1385 if (!ipv4_is_local_multicast(iph->daddr)) 1386 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1387 return 0; 1388 } 1389 1390 len = ntohs(iph->tot_len); 1391 if (skb->len < len || len < ip_hdrlen(skb)) 1392 return -EINVAL; 1393 1394 if (skb->len > len) { 1395 skb2 = skb_clone(skb, GFP_ATOMIC); 1396 if (!skb2) 1397 return -ENOMEM; 1398 1399 err = pskb_trim_rcsum(skb2, len); 1400 if (err) 1401 goto err_out; 1402 } 1403 1404 len -= ip_hdrlen(skb2); 1405 offset = skb_network_offset(skb2) + ip_hdrlen(skb2); 1406 __skb_pull(skb2, offset); 1407 skb_reset_transport_header(skb2); 1408 1409 err = -EINVAL; 1410 if (!pskb_may_pull(skb2, sizeof(*ih))) 1411 goto out; 1412 1413 switch (skb2->ip_summed) { 1414 case CHECKSUM_COMPLETE: 1415 if (!csum_fold(skb2->csum)) 1416 break; 1417 /* fall through */ 1418 case CHECKSUM_NONE: 1419 skb2->csum = 0; 1420 if (skb_checksum_complete(skb2)) 1421 goto out; 1422 } 1423 1424 err = 0; 1425 1426 br_vlan_get_tag(skb2, &vid); 1427 BR_INPUT_SKB_CB(skb)->igmp = 1; 1428 ih = igmp_hdr(skb2); 1429 1430 switch (ih->type) { 1431 case IGMP_HOST_MEMBERSHIP_REPORT: 1432 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1433 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1434 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1435 break; 1436 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1437 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1438 break; 1439 case IGMP_HOST_MEMBERSHIP_QUERY: 1440 err = br_ip4_multicast_query(br, port, skb2); 1441 break; 1442 case IGMP_HOST_LEAVE_MESSAGE: 1443 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1444 break; 1445 } 1446 1447 out: 1448 __skb_push(skb2, offset); 1449 err_out: 1450 if (skb2 != skb) 1451 kfree_skb(skb2); 1452 return err; 1453 } 1454 1455 #if IS_ENABLED(CONFIG_IPV6) 1456 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1457 struct net_bridge_port *port, 1458 struct sk_buff *skb) 1459 { 1460 struct sk_buff *skb2; 1461 const struct ipv6hdr *ip6h; 1462 u8 icmp6_type; 1463 u8 nexthdr; 1464 __be16 frag_off; 1465 unsigned int len; 1466 int offset; 1467 int err; 1468 u16 vid = 0; 1469 1470 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1471 return -EINVAL; 1472 1473 ip6h = ipv6_hdr(skb); 1474 1475 /* 1476 * We're interested in MLD messages only. 1477 * - Version is 6 1478 * - MLD has always Router Alert hop-by-hop option 1479 * - But we do not support jumbrograms. 1480 */ 1481 if (ip6h->version != 6 || 1482 ip6h->nexthdr != IPPROTO_HOPOPTS || 1483 ip6h->payload_len == 0) 1484 return 0; 1485 1486 len = ntohs(ip6h->payload_len) + sizeof(*ip6h); 1487 if (skb->len < len) 1488 return -EINVAL; 1489 1490 nexthdr = ip6h->nexthdr; 1491 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off); 1492 1493 if (offset < 0 || nexthdr != IPPROTO_ICMPV6) 1494 return 0; 1495 1496 /* Okay, we found ICMPv6 header */ 1497 skb2 = skb_clone(skb, GFP_ATOMIC); 1498 if (!skb2) 1499 return -ENOMEM; 1500 1501 err = -EINVAL; 1502 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) 1503 goto out; 1504 1505 len -= offset - skb_network_offset(skb2); 1506 1507 __skb_pull(skb2, offset); 1508 skb_reset_transport_header(skb2); 1509 skb_postpull_rcsum(skb2, skb_network_header(skb2), 1510 skb_network_header_len(skb2)); 1511 1512 icmp6_type = icmp6_hdr(skb2)->icmp6_type; 1513 1514 switch (icmp6_type) { 1515 case ICMPV6_MGM_QUERY: 1516 case ICMPV6_MGM_REPORT: 1517 case ICMPV6_MGM_REDUCTION: 1518 case ICMPV6_MLD2_REPORT: 1519 break; 1520 default: 1521 err = 0; 1522 goto out; 1523 } 1524 1525 /* Okay, we found MLD message. Check further. */ 1526 if (skb2->len > len) { 1527 err = pskb_trim_rcsum(skb2, len); 1528 if (err) 1529 goto out; 1530 err = -EINVAL; 1531 } 1532 1533 ip6h = ipv6_hdr(skb2); 1534 1535 switch (skb2->ip_summed) { 1536 case CHECKSUM_COMPLETE: 1537 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, 1538 IPPROTO_ICMPV6, skb2->csum)) 1539 break; 1540 /*FALLTHROUGH*/ 1541 case CHECKSUM_NONE: 1542 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, 1543 &ip6h->daddr, 1544 skb2->len, 1545 IPPROTO_ICMPV6, 0)); 1546 if (__skb_checksum_complete(skb2)) 1547 goto out; 1548 } 1549 1550 err = 0; 1551 1552 br_vlan_get_tag(skb, &vid); 1553 BR_INPUT_SKB_CB(skb)->igmp = 1; 1554 1555 switch (icmp6_type) { 1556 case ICMPV6_MGM_REPORT: 1557 { 1558 struct mld_msg *mld; 1559 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1560 err = -EINVAL; 1561 goto out; 1562 } 1563 mld = (struct mld_msg *)skb_transport_header(skb2); 1564 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1565 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1566 break; 1567 } 1568 case ICMPV6_MLD2_REPORT: 1569 err = br_ip6_multicast_mld2_report(br, port, skb2); 1570 break; 1571 case ICMPV6_MGM_QUERY: 1572 err = br_ip6_multicast_query(br, port, skb2); 1573 break; 1574 case ICMPV6_MGM_REDUCTION: 1575 { 1576 struct mld_msg *mld; 1577 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1578 err = -EINVAL; 1579 goto out; 1580 } 1581 mld = (struct mld_msg *)skb_transport_header(skb2); 1582 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1583 } 1584 } 1585 1586 out: 1587 kfree_skb(skb2); 1588 return err; 1589 } 1590 #endif 1591 1592 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1593 struct sk_buff *skb) 1594 { 1595 BR_INPUT_SKB_CB(skb)->igmp = 0; 1596 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1597 1598 if (br->multicast_disabled) 1599 return 0; 1600 1601 switch (skb->protocol) { 1602 case htons(ETH_P_IP): 1603 return br_multicast_ipv4_rcv(br, port, skb); 1604 #if IS_ENABLED(CONFIG_IPV6) 1605 case htons(ETH_P_IPV6): 1606 return br_multicast_ipv6_rcv(br, port, skb); 1607 #endif 1608 } 1609 1610 return 0; 1611 } 1612 1613 static void br_multicast_query_expired(unsigned long data) 1614 { 1615 struct net_bridge *br = (void *)data; 1616 1617 spin_lock(&br->multicast_lock); 1618 if (br->multicast_startup_queries_sent < 1619 br->multicast_startup_query_count) 1620 br->multicast_startup_queries_sent++; 1621 1622 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1623 1624 spin_unlock(&br->multicast_lock); 1625 } 1626 1627 void br_multicast_init(struct net_bridge *br) 1628 { 1629 br->hash_elasticity = 4; 1630 br->hash_max = 512; 1631 1632 br->multicast_router = 1; 1633 br->multicast_querier = 0; 1634 br->multicast_query_use_ifaddr = 0; 1635 br->multicast_last_member_count = 2; 1636 br->multicast_startup_query_count = 2; 1637 1638 br->multicast_last_member_interval = HZ; 1639 br->multicast_query_response_interval = 10 * HZ; 1640 br->multicast_startup_query_interval = 125 * HZ / 4; 1641 br->multicast_query_interval = 125 * HZ; 1642 br->multicast_querier_interval = 255 * HZ; 1643 br->multicast_membership_interval = 260 * HZ; 1644 1645 spin_lock_init(&br->multicast_lock); 1646 setup_timer(&br->multicast_router_timer, 1647 br_multicast_local_router_expired, 0); 1648 setup_timer(&br->multicast_querier_timer, 1649 br_multicast_querier_expired, (unsigned long)br); 1650 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1651 (unsigned long)br); 1652 } 1653 1654 void br_multicast_open(struct net_bridge *br) 1655 { 1656 br->multicast_startup_queries_sent = 0; 1657 1658 if (br->multicast_disabled) 1659 return; 1660 1661 mod_timer(&br->multicast_query_timer, jiffies); 1662 } 1663 1664 void br_multicast_stop(struct net_bridge *br) 1665 { 1666 struct net_bridge_mdb_htable *mdb; 1667 struct net_bridge_mdb_entry *mp; 1668 struct hlist_node *n; 1669 u32 ver; 1670 int i; 1671 1672 del_timer_sync(&br->multicast_router_timer); 1673 del_timer_sync(&br->multicast_querier_timer); 1674 del_timer_sync(&br->multicast_query_timer); 1675 1676 spin_lock_bh(&br->multicast_lock); 1677 mdb = mlock_dereference(br->mdb, br); 1678 if (!mdb) 1679 goto out; 1680 1681 br->mdb = NULL; 1682 1683 ver = mdb->ver; 1684 for (i = 0; i < mdb->max; i++) { 1685 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1686 hlist[ver]) { 1687 del_timer(&mp->timer); 1688 mp->timer_armed = false; 1689 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1690 } 1691 } 1692 1693 if (mdb->old) { 1694 spin_unlock_bh(&br->multicast_lock); 1695 rcu_barrier_bh(); 1696 spin_lock_bh(&br->multicast_lock); 1697 WARN_ON(mdb->old); 1698 } 1699 1700 mdb->old = mdb; 1701 call_rcu_bh(&mdb->rcu, br_mdb_free); 1702 1703 out: 1704 spin_unlock_bh(&br->multicast_lock); 1705 } 1706 1707 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1708 { 1709 int err = -ENOENT; 1710 1711 spin_lock_bh(&br->multicast_lock); 1712 if (!netif_running(br->dev)) 1713 goto unlock; 1714 1715 switch (val) { 1716 case 0: 1717 case 2: 1718 del_timer(&br->multicast_router_timer); 1719 /* fall through */ 1720 case 1: 1721 br->multicast_router = val; 1722 err = 0; 1723 break; 1724 1725 default: 1726 err = -EINVAL; 1727 break; 1728 } 1729 1730 unlock: 1731 spin_unlock_bh(&br->multicast_lock); 1732 1733 return err; 1734 } 1735 1736 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1737 { 1738 struct net_bridge *br = p->br; 1739 int err = -ENOENT; 1740 1741 spin_lock(&br->multicast_lock); 1742 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) 1743 goto unlock; 1744 1745 switch (val) { 1746 case 0: 1747 case 1: 1748 case 2: 1749 p->multicast_router = val; 1750 err = 0; 1751 1752 if (val < 2 && !hlist_unhashed(&p->rlist)) 1753 hlist_del_init_rcu(&p->rlist); 1754 1755 if (val == 1) 1756 break; 1757 1758 del_timer(&p->multicast_router_timer); 1759 1760 if (val == 0) 1761 break; 1762 1763 br_multicast_add_router(br, p); 1764 break; 1765 1766 default: 1767 err = -EINVAL; 1768 break; 1769 } 1770 1771 unlock: 1772 spin_unlock(&br->multicast_lock); 1773 1774 return err; 1775 } 1776 1777 static void br_multicast_start_querier(struct net_bridge *br) 1778 { 1779 struct net_bridge_port *port; 1780 1781 br_multicast_open(br); 1782 1783 list_for_each_entry(port, &br->port_list, list) { 1784 if (port->state == BR_STATE_DISABLED || 1785 port->state == BR_STATE_BLOCKING) 1786 continue; 1787 1788 __br_multicast_enable_port(port); 1789 } 1790 } 1791 1792 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1793 { 1794 int err = 0; 1795 struct net_bridge_mdb_htable *mdb; 1796 1797 spin_lock_bh(&br->multicast_lock); 1798 if (br->multicast_disabled == !val) 1799 goto unlock; 1800 1801 br->multicast_disabled = !val; 1802 if (br->multicast_disabled) 1803 goto unlock; 1804 1805 if (!netif_running(br->dev)) 1806 goto unlock; 1807 1808 mdb = mlock_dereference(br->mdb, br); 1809 if (mdb) { 1810 if (mdb->old) { 1811 err = -EEXIST; 1812 rollback: 1813 br->multicast_disabled = !!val; 1814 goto unlock; 1815 } 1816 1817 err = br_mdb_rehash(&br->mdb, mdb->max, 1818 br->hash_elasticity); 1819 if (err) 1820 goto rollback; 1821 } 1822 1823 br_multicast_start_querier(br); 1824 1825 unlock: 1826 spin_unlock_bh(&br->multicast_lock); 1827 1828 return err; 1829 } 1830 1831 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1832 { 1833 val = !!val; 1834 1835 spin_lock_bh(&br->multicast_lock); 1836 if (br->multicast_querier == val) 1837 goto unlock; 1838 1839 br->multicast_querier = val; 1840 if (val) 1841 br_multicast_start_querier(br); 1842 1843 unlock: 1844 spin_unlock_bh(&br->multicast_lock); 1845 1846 return 0; 1847 } 1848 1849 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1850 { 1851 int err = -ENOENT; 1852 u32 old; 1853 struct net_bridge_mdb_htable *mdb; 1854 1855 spin_lock(&br->multicast_lock); 1856 if (!netif_running(br->dev)) 1857 goto unlock; 1858 1859 err = -EINVAL; 1860 if (!is_power_of_2(val)) 1861 goto unlock; 1862 1863 mdb = mlock_dereference(br->mdb, br); 1864 if (mdb && val < mdb->size) 1865 goto unlock; 1866 1867 err = 0; 1868 1869 old = br->hash_max; 1870 br->hash_max = val; 1871 1872 if (mdb) { 1873 if (mdb->old) { 1874 err = -EEXIST; 1875 rollback: 1876 br->hash_max = old; 1877 goto unlock; 1878 } 1879 1880 err = br_mdb_rehash(&br->mdb, br->hash_max, 1881 br->hash_elasticity); 1882 if (err) 1883 goto rollback; 1884 } 1885 1886 unlock: 1887 spin_unlock(&br->multicast_lock); 1888 1889 return err; 1890 } 1891