1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/if_ether.h> 15 #include <linux/igmp.h> 16 #include <linux/jhash.h> 17 #include <linux/kernel.h> 18 #include <linux/log2.h> 19 #include <linux/netdevice.h> 20 #include <linux/netfilter_bridge.h> 21 #include <linux/random.h> 22 #include <linux/rculist.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 #include <linux/timer.h> 26 #include <net/ip.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <net/ipv6.h> 29 #include <net/mld.h> 30 #include <net/addrconf.h> 31 #include <net/ip6_checksum.h> 32 #endif 33 34 #include "br_private.h" 35 36 #define mlock_dereference(X, br) \ 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 38 39 #if IS_ENABLED(CONFIG_IPV6) 40 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) 41 { 42 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) 43 return 1; 44 return 0; 45 } 46 #endif 47 48 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 49 { 50 if (a->proto != b->proto) 51 return 0; 52 switch (a->proto) { 53 case htons(ETH_P_IP): 54 return a->u.ip4 == b->u.ip4; 55 #if IS_ENABLED(CONFIG_IPV6) 56 case htons(ETH_P_IPV6): 57 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 58 #endif 59 } 60 return 0; 61 } 62 63 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) 64 { 65 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); 66 } 67 68 #if IS_ENABLED(CONFIG_IPV6) 69 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 70 const struct in6_addr *ip) 71 { 72 return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); 73 } 74 #endif 75 76 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 77 struct br_ip *ip) 78 { 79 switch (ip->proto) { 80 case htons(ETH_P_IP): 81 return __br_ip4_hash(mdb, ip->u.ip4); 82 #if IS_ENABLED(CONFIG_IPV6) 83 case htons(ETH_P_IPV6): 84 return __br_ip6_hash(mdb, &ip->u.ip6); 85 #endif 86 } 87 return 0; 88 } 89 90 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 91 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 92 { 93 struct net_bridge_mdb_entry *mp; 94 struct hlist_node *p; 95 96 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 97 if (br_ip_equal(&mp->addr, dst)) 98 return mp; 99 } 100 101 return NULL; 102 } 103 104 static struct net_bridge_mdb_entry *br_mdb_ip_get( 105 struct net_bridge_mdb_htable *mdb, struct br_ip *dst) 106 { 107 if (!mdb) 108 return NULL; 109 110 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 111 } 112 113 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 114 struct net_bridge_mdb_htable *mdb, __be32 dst) 115 { 116 struct br_ip br_dst; 117 118 br_dst.u.ip4 = dst; 119 br_dst.proto = htons(ETH_P_IP); 120 121 return br_mdb_ip_get(mdb, &br_dst); 122 } 123 124 #if IS_ENABLED(CONFIG_IPV6) 125 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 126 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) 127 { 128 struct br_ip br_dst; 129 130 br_dst.u.ip6 = *dst; 131 br_dst.proto = htons(ETH_P_IPV6); 132 133 return br_mdb_ip_get(mdb, &br_dst); 134 } 135 #endif 136 137 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 138 struct sk_buff *skb) 139 { 140 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 141 struct br_ip ip; 142 143 if (br->multicast_disabled) 144 return NULL; 145 146 if (BR_INPUT_SKB_CB(skb)->igmp) 147 return NULL; 148 149 ip.proto = skb->protocol; 150 151 switch (skb->protocol) { 152 case htons(ETH_P_IP): 153 ip.u.ip4 = ip_hdr(skb)->daddr; 154 break; 155 #if IS_ENABLED(CONFIG_IPV6) 156 case htons(ETH_P_IPV6): 157 ip.u.ip6 = ipv6_hdr(skb)->daddr; 158 break; 159 #endif 160 default: 161 return NULL; 162 } 163 164 return br_mdb_ip_get(mdb, &ip); 165 } 166 167 static void br_mdb_free(struct rcu_head *head) 168 { 169 struct net_bridge_mdb_htable *mdb = 170 container_of(head, struct net_bridge_mdb_htable, rcu); 171 struct net_bridge_mdb_htable *old = mdb->old; 172 173 mdb->old = NULL; 174 kfree(old->mhash); 175 kfree(old); 176 } 177 178 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 179 struct net_bridge_mdb_htable *old, 180 int elasticity) 181 { 182 struct net_bridge_mdb_entry *mp; 183 struct hlist_node *p; 184 int maxlen; 185 int len; 186 int i; 187 188 for (i = 0; i < old->max; i++) 189 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) 190 hlist_add_head(&mp->hlist[new->ver], 191 &new->mhash[br_ip_hash(new, &mp->addr)]); 192 193 if (!elasticity) 194 return 0; 195 196 maxlen = 0; 197 for (i = 0; i < new->max; i++) { 198 len = 0; 199 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) 200 len++; 201 if (len > maxlen) 202 maxlen = len; 203 } 204 205 return maxlen > elasticity ? -EINVAL : 0; 206 } 207 208 static void br_multicast_free_pg(struct rcu_head *head) 209 { 210 struct net_bridge_port_group *p = 211 container_of(head, struct net_bridge_port_group, rcu); 212 213 kfree(p); 214 } 215 216 static void br_multicast_free_group(struct rcu_head *head) 217 { 218 struct net_bridge_mdb_entry *mp = 219 container_of(head, struct net_bridge_mdb_entry, rcu); 220 221 kfree(mp); 222 } 223 224 static void br_multicast_group_expired(unsigned long data) 225 { 226 struct net_bridge_mdb_entry *mp = (void *)data; 227 struct net_bridge *br = mp->br; 228 struct net_bridge_mdb_htable *mdb; 229 230 spin_lock(&br->multicast_lock); 231 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 232 goto out; 233 234 mp->mglist = false; 235 236 if (mp->ports) 237 goto out; 238 239 mdb = mlock_dereference(br->mdb, br); 240 241 hlist_del_rcu(&mp->hlist[mdb->ver]); 242 mdb->size--; 243 244 del_timer(&mp->query_timer); 245 call_rcu_bh(&mp->rcu, br_multicast_free_group); 246 247 out: 248 spin_unlock(&br->multicast_lock); 249 } 250 251 static void br_multicast_del_pg(struct net_bridge *br, 252 struct net_bridge_port_group *pg) 253 { 254 struct net_bridge_mdb_htable *mdb; 255 struct net_bridge_mdb_entry *mp; 256 struct net_bridge_port_group *p; 257 struct net_bridge_port_group __rcu **pp; 258 259 mdb = mlock_dereference(br->mdb, br); 260 261 mp = br_mdb_ip_get(mdb, &pg->addr); 262 if (WARN_ON(!mp)) 263 return; 264 265 for (pp = &mp->ports; 266 (p = mlock_dereference(*pp, br)) != NULL; 267 pp = &p->next) { 268 if (p != pg) 269 continue; 270 271 rcu_assign_pointer(*pp, p->next); 272 hlist_del_init(&p->mglist); 273 del_timer(&p->timer); 274 del_timer(&p->query_timer); 275 call_rcu_bh(&p->rcu, br_multicast_free_pg); 276 277 if (!mp->ports && !mp->mglist && 278 netif_running(br->dev)) 279 mod_timer(&mp->timer, jiffies); 280 281 return; 282 } 283 284 WARN_ON(1); 285 } 286 287 static void br_multicast_port_group_expired(unsigned long data) 288 { 289 struct net_bridge_port_group *pg = (void *)data; 290 struct net_bridge *br = pg->port->br; 291 292 spin_lock(&br->multicast_lock); 293 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 294 hlist_unhashed(&pg->mglist)) 295 goto out; 296 297 br_multicast_del_pg(br, pg); 298 299 out: 300 spin_unlock(&br->multicast_lock); 301 } 302 303 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 304 int elasticity) 305 { 306 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 307 struct net_bridge_mdb_htable *mdb; 308 int err; 309 310 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 311 if (!mdb) 312 return -ENOMEM; 313 314 mdb->max = max; 315 mdb->old = old; 316 317 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 318 if (!mdb->mhash) { 319 kfree(mdb); 320 return -ENOMEM; 321 } 322 323 mdb->size = old ? old->size : 0; 324 mdb->ver = old ? old->ver ^ 1 : 0; 325 326 if (!old || elasticity) 327 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 328 else 329 mdb->secret = old->secret; 330 331 if (!old) 332 goto out; 333 334 err = br_mdb_copy(mdb, old, elasticity); 335 if (err) { 336 kfree(mdb->mhash); 337 kfree(mdb); 338 return err; 339 } 340 341 call_rcu_bh(&mdb->rcu, br_mdb_free); 342 343 out: 344 rcu_assign_pointer(*mdbp, mdb); 345 346 return 0; 347 } 348 349 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 350 __be32 group) 351 { 352 struct sk_buff *skb; 353 struct igmphdr *ih; 354 struct ethhdr *eth; 355 struct iphdr *iph; 356 357 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 358 sizeof(*ih) + 4); 359 if (!skb) 360 goto out; 361 362 skb->protocol = htons(ETH_P_IP); 363 364 skb_reset_mac_header(skb); 365 eth = eth_hdr(skb); 366 367 memcpy(eth->h_source, br->dev->dev_addr, 6); 368 eth->h_dest[0] = 1; 369 eth->h_dest[1] = 0; 370 eth->h_dest[2] = 0x5e; 371 eth->h_dest[3] = 0; 372 eth->h_dest[4] = 0; 373 eth->h_dest[5] = 1; 374 eth->h_proto = htons(ETH_P_IP); 375 skb_put(skb, sizeof(*eth)); 376 377 skb_set_network_header(skb, skb->len); 378 iph = ip_hdr(skb); 379 380 iph->version = 4; 381 iph->ihl = 6; 382 iph->tos = 0xc0; 383 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 384 iph->id = 0; 385 iph->frag_off = htons(IP_DF); 386 iph->ttl = 1; 387 iph->protocol = IPPROTO_IGMP; 388 iph->saddr = 0; 389 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 390 ((u8 *)&iph[1])[0] = IPOPT_RA; 391 ((u8 *)&iph[1])[1] = 4; 392 ((u8 *)&iph[1])[2] = 0; 393 ((u8 *)&iph[1])[3] = 0; 394 ip_send_check(iph); 395 skb_put(skb, 24); 396 397 skb_set_transport_header(skb, skb->len); 398 ih = igmp_hdr(skb); 399 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 400 ih->code = (group ? br->multicast_last_member_interval : 401 br->multicast_query_response_interval) / 402 (HZ / IGMP_TIMER_SCALE); 403 ih->group = group; 404 ih->csum = 0; 405 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 406 skb_put(skb, sizeof(*ih)); 407 408 __skb_pull(skb, sizeof(*eth)); 409 410 out: 411 return skb; 412 } 413 414 #if IS_ENABLED(CONFIG_IPV6) 415 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 416 const struct in6_addr *group) 417 { 418 struct sk_buff *skb; 419 struct ipv6hdr *ip6h; 420 struct mld_msg *mldq; 421 struct ethhdr *eth; 422 u8 *hopopt; 423 unsigned long interval; 424 425 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 426 8 + sizeof(*mldq)); 427 if (!skb) 428 goto out; 429 430 skb->protocol = htons(ETH_P_IPV6); 431 432 /* Ethernet header */ 433 skb_reset_mac_header(skb); 434 eth = eth_hdr(skb); 435 436 memcpy(eth->h_source, br->dev->dev_addr, 6); 437 eth->h_proto = htons(ETH_P_IPV6); 438 skb_put(skb, sizeof(*eth)); 439 440 /* IPv6 header + HbH option */ 441 skb_set_network_header(skb, skb->len); 442 ip6h = ipv6_hdr(skb); 443 444 *(__force __be32 *)ip6h = htonl(0x60000000); 445 ip6h->payload_len = htons(8 + sizeof(*mldq)); 446 ip6h->nexthdr = IPPROTO_HOPOPTS; 447 ip6h->hop_limit = 1; 448 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 449 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 450 &ip6h->saddr)) { 451 kfree_skb(skb); 452 return NULL; 453 } 454 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 455 456 hopopt = (u8 *)(ip6h + 1); 457 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 458 hopopt[1] = 0; /* length of HbH */ 459 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 460 hopopt[3] = 2; /* Length of RA Option */ 461 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 462 hopopt[5] = 0; 463 hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ 464 hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ 465 466 skb_put(skb, sizeof(*ip6h) + 8); 467 468 /* ICMPv6 */ 469 skb_set_transport_header(skb, skb->len); 470 mldq = (struct mld_msg *) icmp6_hdr(skb); 471 472 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : 473 br->multicast_query_response_interval; 474 475 mldq->mld_type = ICMPV6_MGM_QUERY; 476 mldq->mld_code = 0; 477 mldq->mld_cksum = 0; 478 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 479 mldq->mld_reserved = 0; 480 mldq->mld_mca = *group; 481 482 /* checksum */ 483 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 484 sizeof(*mldq), IPPROTO_ICMPV6, 485 csum_partial(mldq, 486 sizeof(*mldq), 0)); 487 skb_put(skb, sizeof(*mldq)); 488 489 __skb_pull(skb, sizeof(*eth)); 490 491 out: 492 return skb; 493 } 494 #endif 495 496 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 497 struct br_ip *addr) 498 { 499 switch (addr->proto) { 500 case htons(ETH_P_IP): 501 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 502 #if IS_ENABLED(CONFIG_IPV6) 503 case htons(ETH_P_IPV6): 504 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 505 #endif 506 } 507 return NULL; 508 } 509 510 static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) 511 { 512 struct net_bridge *br = mp->br; 513 struct sk_buff *skb; 514 515 skb = br_multicast_alloc_query(br, &mp->addr); 516 if (!skb) 517 goto timer; 518 519 netif_rx(skb); 520 521 timer: 522 if (++mp->queries_sent < br->multicast_last_member_count) 523 mod_timer(&mp->query_timer, 524 jiffies + br->multicast_last_member_interval); 525 } 526 527 static void br_multicast_group_query_expired(unsigned long data) 528 { 529 struct net_bridge_mdb_entry *mp = (void *)data; 530 struct net_bridge *br = mp->br; 531 532 spin_lock(&br->multicast_lock); 533 if (!netif_running(br->dev) || !mp->mglist || 534 mp->queries_sent >= br->multicast_last_member_count) 535 goto out; 536 537 br_multicast_send_group_query(mp); 538 539 out: 540 spin_unlock(&br->multicast_lock); 541 } 542 543 static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) 544 { 545 struct net_bridge_port *port = pg->port; 546 struct net_bridge *br = port->br; 547 struct sk_buff *skb; 548 549 skb = br_multicast_alloc_query(br, &pg->addr); 550 if (!skb) 551 goto timer; 552 553 br_deliver(port, skb); 554 555 timer: 556 if (++pg->queries_sent < br->multicast_last_member_count) 557 mod_timer(&pg->query_timer, 558 jiffies + br->multicast_last_member_interval); 559 } 560 561 static void br_multicast_port_group_query_expired(unsigned long data) 562 { 563 struct net_bridge_port_group *pg = (void *)data; 564 struct net_bridge_port *port = pg->port; 565 struct net_bridge *br = port->br; 566 567 spin_lock(&br->multicast_lock); 568 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 569 pg->queries_sent >= br->multicast_last_member_count) 570 goto out; 571 572 br_multicast_send_port_group_query(pg); 573 574 out: 575 spin_unlock(&br->multicast_lock); 576 } 577 578 static struct net_bridge_mdb_entry *br_multicast_get_group( 579 struct net_bridge *br, struct net_bridge_port *port, 580 struct br_ip *group, int hash) 581 { 582 struct net_bridge_mdb_htable *mdb; 583 struct net_bridge_mdb_entry *mp; 584 struct hlist_node *p; 585 unsigned count = 0; 586 unsigned max; 587 int elasticity; 588 int err; 589 590 mdb = rcu_dereference_protected(br->mdb, 1); 591 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 592 count++; 593 if (unlikely(br_ip_equal(group, &mp->addr))) 594 return mp; 595 } 596 597 elasticity = 0; 598 max = mdb->max; 599 600 if (unlikely(count > br->hash_elasticity && count)) { 601 if (net_ratelimit()) 602 br_info(br, "Multicast hash table " 603 "chain limit reached: %s\n", 604 port ? port->dev->name : br->dev->name); 605 606 elasticity = br->hash_elasticity; 607 } 608 609 if (mdb->size >= max) { 610 max *= 2; 611 if (unlikely(max >= br->hash_max)) { 612 br_warn(br, "Multicast hash table maximum " 613 "reached, disabling snooping: %s, %d\n", 614 port ? port->dev->name : br->dev->name, max); 615 err = -E2BIG; 616 disable: 617 br->multicast_disabled = 1; 618 goto err; 619 } 620 } 621 622 if (max > mdb->max || elasticity) { 623 if (mdb->old) { 624 if (net_ratelimit()) 625 br_info(br, "Multicast hash table " 626 "on fire: %s\n", 627 port ? port->dev->name : br->dev->name); 628 err = -EEXIST; 629 goto err; 630 } 631 632 err = br_mdb_rehash(&br->mdb, max, elasticity); 633 if (err) { 634 br_warn(br, "Cannot rehash multicast " 635 "hash table, disabling snooping: %s, %d, %d\n", 636 port ? port->dev->name : br->dev->name, 637 mdb->size, err); 638 goto disable; 639 } 640 641 err = -EAGAIN; 642 goto err; 643 } 644 645 return NULL; 646 647 err: 648 mp = ERR_PTR(err); 649 return mp; 650 } 651 652 static struct net_bridge_mdb_entry *br_multicast_new_group( 653 struct net_bridge *br, struct net_bridge_port *port, 654 struct br_ip *group) 655 { 656 struct net_bridge_mdb_htable *mdb; 657 struct net_bridge_mdb_entry *mp; 658 int hash; 659 int err; 660 661 mdb = rcu_dereference_protected(br->mdb, 1); 662 if (!mdb) { 663 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 664 if (err) 665 return ERR_PTR(err); 666 goto rehash; 667 } 668 669 hash = br_ip_hash(mdb, group); 670 mp = br_multicast_get_group(br, port, group, hash); 671 switch (PTR_ERR(mp)) { 672 case 0: 673 break; 674 675 case -EAGAIN: 676 rehash: 677 mdb = rcu_dereference_protected(br->mdb, 1); 678 hash = br_ip_hash(mdb, group); 679 break; 680 681 default: 682 goto out; 683 } 684 685 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 686 if (unlikely(!mp)) 687 return ERR_PTR(-ENOMEM); 688 689 mp->br = br; 690 mp->addr = *group; 691 setup_timer(&mp->timer, br_multicast_group_expired, 692 (unsigned long)mp); 693 setup_timer(&mp->query_timer, br_multicast_group_query_expired, 694 (unsigned long)mp); 695 696 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 697 mdb->size++; 698 699 out: 700 return mp; 701 } 702 703 static int br_multicast_add_group(struct net_bridge *br, 704 struct net_bridge_port *port, 705 struct br_ip *group) 706 { 707 struct net_bridge_mdb_entry *mp; 708 struct net_bridge_port_group *p; 709 struct net_bridge_port_group __rcu **pp; 710 unsigned long now = jiffies; 711 int err; 712 713 spin_lock(&br->multicast_lock); 714 if (!netif_running(br->dev) || 715 (port && port->state == BR_STATE_DISABLED)) 716 goto out; 717 718 mp = br_multicast_new_group(br, port, group); 719 err = PTR_ERR(mp); 720 if (IS_ERR(mp)) 721 goto err; 722 723 if (!port) { 724 mp->mglist = true; 725 mod_timer(&mp->timer, now + br->multicast_membership_interval); 726 goto out; 727 } 728 729 for (pp = &mp->ports; 730 (p = mlock_dereference(*pp, br)) != NULL; 731 pp = &p->next) { 732 if (p->port == port) 733 goto found; 734 if ((unsigned long)p->port < (unsigned long)port) 735 break; 736 } 737 738 p = kzalloc(sizeof(*p), GFP_ATOMIC); 739 err = -ENOMEM; 740 if (unlikely(!p)) 741 goto err; 742 743 p->addr = *group; 744 p->port = port; 745 p->next = *pp; 746 hlist_add_head(&p->mglist, &port->mglist); 747 setup_timer(&p->timer, br_multicast_port_group_expired, 748 (unsigned long)p); 749 setup_timer(&p->query_timer, br_multicast_port_group_query_expired, 750 (unsigned long)p); 751 752 rcu_assign_pointer(*pp, p); 753 754 found: 755 mod_timer(&p->timer, now + br->multicast_membership_interval); 756 out: 757 err = 0; 758 759 err: 760 spin_unlock(&br->multicast_lock); 761 return err; 762 } 763 764 static int br_ip4_multicast_add_group(struct net_bridge *br, 765 struct net_bridge_port *port, 766 __be32 group) 767 { 768 struct br_ip br_group; 769 770 if (ipv4_is_local_multicast(group)) 771 return 0; 772 773 br_group.u.ip4 = group; 774 br_group.proto = htons(ETH_P_IP); 775 776 return br_multicast_add_group(br, port, &br_group); 777 } 778 779 #if IS_ENABLED(CONFIG_IPV6) 780 static int br_ip6_multicast_add_group(struct net_bridge *br, 781 struct net_bridge_port *port, 782 const struct in6_addr *group) 783 { 784 struct br_ip br_group; 785 786 if (!ipv6_is_transient_multicast(group)) 787 return 0; 788 789 br_group.u.ip6 = *group; 790 br_group.proto = htons(ETH_P_IPV6); 791 792 return br_multicast_add_group(br, port, &br_group); 793 } 794 #endif 795 796 static void br_multicast_router_expired(unsigned long data) 797 { 798 struct net_bridge_port *port = (void *)data; 799 struct net_bridge *br = port->br; 800 801 spin_lock(&br->multicast_lock); 802 if (port->multicast_router != 1 || 803 timer_pending(&port->multicast_router_timer) || 804 hlist_unhashed(&port->rlist)) 805 goto out; 806 807 hlist_del_init_rcu(&port->rlist); 808 809 out: 810 spin_unlock(&br->multicast_lock); 811 } 812 813 static void br_multicast_local_router_expired(unsigned long data) 814 { 815 } 816 817 static void __br_multicast_send_query(struct net_bridge *br, 818 struct net_bridge_port *port, 819 struct br_ip *ip) 820 { 821 struct sk_buff *skb; 822 823 skb = br_multicast_alloc_query(br, ip); 824 if (!skb) 825 return; 826 827 if (port) { 828 __skb_push(skb, sizeof(struct ethhdr)); 829 skb->dev = port->dev; 830 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 831 dev_queue_xmit); 832 } else 833 netif_rx(skb); 834 } 835 836 static void br_multicast_send_query(struct net_bridge *br, 837 struct net_bridge_port *port, u32 sent) 838 { 839 unsigned long time; 840 struct br_ip br_group; 841 842 if (!netif_running(br->dev) || br->multicast_disabled || 843 timer_pending(&br->multicast_querier_timer)) 844 return; 845 846 memset(&br_group.u, 0, sizeof(br_group.u)); 847 848 br_group.proto = htons(ETH_P_IP); 849 __br_multicast_send_query(br, port, &br_group); 850 851 #if IS_ENABLED(CONFIG_IPV6) 852 br_group.proto = htons(ETH_P_IPV6); 853 __br_multicast_send_query(br, port, &br_group); 854 #endif 855 856 time = jiffies; 857 time += sent < br->multicast_startup_query_count ? 858 br->multicast_startup_query_interval : 859 br->multicast_query_interval; 860 mod_timer(port ? &port->multicast_query_timer : 861 &br->multicast_query_timer, time); 862 } 863 864 static void br_multicast_port_query_expired(unsigned long data) 865 { 866 struct net_bridge_port *port = (void *)data; 867 struct net_bridge *br = port->br; 868 869 spin_lock(&br->multicast_lock); 870 if (port->state == BR_STATE_DISABLED || 871 port->state == BR_STATE_BLOCKING) 872 goto out; 873 874 if (port->multicast_startup_queries_sent < 875 br->multicast_startup_query_count) 876 port->multicast_startup_queries_sent++; 877 878 br_multicast_send_query(port->br, port, 879 port->multicast_startup_queries_sent); 880 881 out: 882 spin_unlock(&br->multicast_lock); 883 } 884 885 void br_multicast_add_port(struct net_bridge_port *port) 886 { 887 port->multicast_router = 1; 888 889 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 890 (unsigned long)port); 891 setup_timer(&port->multicast_query_timer, 892 br_multicast_port_query_expired, (unsigned long)port); 893 } 894 895 void br_multicast_del_port(struct net_bridge_port *port) 896 { 897 del_timer_sync(&port->multicast_router_timer); 898 } 899 900 static void __br_multicast_enable_port(struct net_bridge_port *port) 901 { 902 port->multicast_startup_queries_sent = 0; 903 904 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 905 del_timer(&port->multicast_query_timer)) 906 mod_timer(&port->multicast_query_timer, jiffies); 907 } 908 909 void br_multicast_enable_port(struct net_bridge_port *port) 910 { 911 struct net_bridge *br = port->br; 912 913 spin_lock(&br->multicast_lock); 914 if (br->multicast_disabled || !netif_running(br->dev)) 915 goto out; 916 917 __br_multicast_enable_port(port); 918 919 out: 920 spin_unlock(&br->multicast_lock); 921 } 922 923 void br_multicast_disable_port(struct net_bridge_port *port) 924 { 925 struct net_bridge *br = port->br; 926 struct net_bridge_port_group *pg; 927 struct hlist_node *p, *n; 928 929 spin_lock(&br->multicast_lock); 930 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) 931 br_multicast_del_pg(br, pg); 932 933 if (!hlist_unhashed(&port->rlist)) 934 hlist_del_init_rcu(&port->rlist); 935 del_timer(&port->multicast_router_timer); 936 del_timer(&port->multicast_query_timer); 937 spin_unlock(&br->multicast_lock); 938 } 939 940 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 941 struct net_bridge_port *port, 942 struct sk_buff *skb) 943 { 944 struct igmpv3_report *ih; 945 struct igmpv3_grec *grec; 946 int i; 947 int len; 948 int num; 949 int type; 950 int err = 0; 951 __be32 group; 952 953 if (!pskb_may_pull(skb, sizeof(*ih))) 954 return -EINVAL; 955 956 ih = igmpv3_report_hdr(skb); 957 num = ntohs(ih->ngrec); 958 len = sizeof(*ih); 959 960 for (i = 0; i < num; i++) { 961 len += sizeof(*grec); 962 if (!pskb_may_pull(skb, len)) 963 return -EINVAL; 964 965 grec = (void *)(skb->data + len - sizeof(*grec)); 966 group = grec->grec_mca; 967 type = grec->grec_type; 968 969 len += ntohs(grec->grec_nsrcs) * 4; 970 if (!pskb_may_pull(skb, len)) 971 return -EINVAL; 972 973 /* We treat this as an IGMPv2 report for now. */ 974 switch (type) { 975 case IGMPV3_MODE_IS_INCLUDE: 976 case IGMPV3_MODE_IS_EXCLUDE: 977 case IGMPV3_CHANGE_TO_INCLUDE: 978 case IGMPV3_CHANGE_TO_EXCLUDE: 979 case IGMPV3_ALLOW_NEW_SOURCES: 980 case IGMPV3_BLOCK_OLD_SOURCES: 981 break; 982 983 default: 984 continue; 985 } 986 987 err = br_ip4_multicast_add_group(br, port, group); 988 if (err) 989 break; 990 } 991 992 return err; 993 } 994 995 #if IS_ENABLED(CONFIG_IPV6) 996 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 997 struct net_bridge_port *port, 998 struct sk_buff *skb) 999 { 1000 struct icmp6hdr *icmp6h; 1001 struct mld2_grec *grec; 1002 int i; 1003 int len; 1004 int num; 1005 int err = 0; 1006 1007 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1008 return -EINVAL; 1009 1010 icmp6h = icmp6_hdr(skb); 1011 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1012 len = sizeof(*icmp6h); 1013 1014 for (i = 0; i < num; i++) { 1015 __be16 *nsrcs, _nsrcs; 1016 1017 nsrcs = skb_header_pointer(skb, 1018 len + offsetof(struct mld2_grec, 1019 grec_nsrcs), 1020 sizeof(_nsrcs), &_nsrcs); 1021 if (!nsrcs) 1022 return -EINVAL; 1023 1024 if (!pskb_may_pull(skb, 1025 len + sizeof(*grec) + 1026 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1027 return -EINVAL; 1028 1029 grec = (struct mld2_grec *)(skb->data + len); 1030 len += sizeof(*grec) + 1031 sizeof(struct in6_addr) * ntohs(*nsrcs); 1032 1033 /* We treat these as MLDv1 reports for now. */ 1034 switch (grec->grec_type) { 1035 case MLD2_MODE_IS_INCLUDE: 1036 case MLD2_MODE_IS_EXCLUDE: 1037 case MLD2_CHANGE_TO_INCLUDE: 1038 case MLD2_CHANGE_TO_EXCLUDE: 1039 case MLD2_ALLOW_NEW_SOURCES: 1040 case MLD2_BLOCK_OLD_SOURCES: 1041 break; 1042 1043 default: 1044 continue; 1045 } 1046 1047 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); 1048 if (!err) 1049 break; 1050 } 1051 1052 return err; 1053 } 1054 #endif 1055 1056 /* 1057 * Add port to rotuer_list 1058 * list is maintained ordered by pointer value 1059 * and locked by br->multicast_lock and RCU 1060 */ 1061 static void br_multicast_add_router(struct net_bridge *br, 1062 struct net_bridge_port *port) 1063 { 1064 struct net_bridge_port *p; 1065 struct hlist_node *n, *slot = NULL; 1066 1067 hlist_for_each_entry(p, n, &br->router_list, rlist) { 1068 if ((unsigned long) port >= (unsigned long) p) 1069 break; 1070 slot = n; 1071 } 1072 1073 if (slot) 1074 hlist_add_after_rcu(slot, &port->rlist); 1075 else 1076 hlist_add_head_rcu(&port->rlist, &br->router_list); 1077 } 1078 1079 static void br_multicast_mark_router(struct net_bridge *br, 1080 struct net_bridge_port *port) 1081 { 1082 unsigned long now = jiffies; 1083 1084 if (!port) { 1085 if (br->multicast_router == 1) 1086 mod_timer(&br->multicast_router_timer, 1087 now + br->multicast_querier_interval); 1088 return; 1089 } 1090 1091 if (port->multicast_router != 1) 1092 return; 1093 1094 if (!hlist_unhashed(&port->rlist)) 1095 goto timer; 1096 1097 br_multicast_add_router(br, port); 1098 1099 timer: 1100 mod_timer(&port->multicast_router_timer, 1101 now + br->multicast_querier_interval); 1102 } 1103 1104 static void br_multicast_query_received(struct net_bridge *br, 1105 struct net_bridge_port *port, 1106 int saddr) 1107 { 1108 if (saddr) 1109 mod_timer(&br->multicast_querier_timer, 1110 jiffies + br->multicast_querier_interval); 1111 else if (timer_pending(&br->multicast_querier_timer)) 1112 return; 1113 1114 br_multicast_mark_router(br, port); 1115 } 1116 1117 static int br_ip4_multicast_query(struct net_bridge *br, 1118 struct net_bridge_port *port, 1119 struct sk_buff *skb) 1120 { 1121 const struct iphdr *iph = ip_hdr(skb); 1122 struct igmphdr *ih = igmp_hdr(skb); 1123 struct net_bridge_mdb_entry *mp; 1124 struct igmpv3_query *ih3; 1125 struct net_bridge_port_group *p; 1126 struct net_bridge_port_group __rcu **pp; 1127 unsigned long max_delay; 1128 unsigned long now = jiffies; 1129 __be32 group; 1130 int err = 0; 1131 1132 spin_lock(&br->multicast_lock); 1133 if (!netif_running(br->dev) || 1134 (port && port->state == BR_STATE_DISABLED)) 1135 goto out; 1136 1137 br_multicast_query_received(br, port, !!iph->saddr); 1138 1139 group = ih->group; 1140 1141 if (skb->len == sizeof(*ih)) { 1142 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1143 1144 if (!max_delay) { 1145 max_delay = 10 * HZ; 1146 group = 0; 1147 } 1148 } else { 1149 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { 1150 err = -EINVAL; 1151 goto out; 1152 } 1153 1154 ih3 = igmpv3_query_hdr(skb); 1155 if (ih3->nsrcs) 1156 goto out; 1157 1158 max_delay = ih3->code ? 1159 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1160 } 1161 1162 if (!group) 1163 goto out; 1164 1165 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group); 1166 if (!mp) 1167 goto out; 1168 1169 max_delay *= br->multicast_last_member_count; 1170 1171 if (mp->mglist && 1172 (timer_pending(&mp->timer) ? 1173 time_after(mp->timer.expires, now + max_delay) : 1174 try_to_del_timer_sync(&mp->timer) >= 0)) 1175 mod_timer(&mp->timer, now + max_delay); 1176 1177 for (pp = &mp->ports; 1178 (p = mlock_dereference(*pp, br)) != NULL; 1179 pp = &p->next) { 1180 if (timer_pending(&p->timer) ? 1181 time_after(p->timer.expires, now + max_delay) : 1182 try_to_del_timer_sync(&p->timer) >= 0) 1183 mod_timer(&p->timer, now + max_delay); 1184 } 1185 1186 out: 1187 spin_unlock(&br->multicast_lock); 1188 return err; 1189 } 1190 1191 #if IS_ENABLED(CONFIG_IPV6) 1192 static int br_ip6_multicast_query(struct net_bridge *br, 1193 struct net_bridge_port *port, 1194 struct sk_buff *skb) 1195 { 1196 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1197 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1198 struct net_bridge_mdb_entry *mp; 1199 struct mld2_query *mld2q; 1200 struct net_bridge_port_group *p; 1201 struct net_bridge_port_group __rcu **pp; 1202 unsigned long max_delay; 1203 unsigned long now = jiffies; 1204 const struct in6_addr *group = NULL; 1205 int err = 0; 1206 1207 spin_lock(&br->multicast_lock); 1208 if (!netif_running(br->dev) || 1209 (port && port->state == BR_STATE_DISABLED)) 1210 goto out; 1211 1212 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); 1213 1214 if (skb->len == sizeof(*mld)) { 1215 if (!pskb_may_pull(skb, sizeof(*mld))) { 1216 err = -EINVAL; 1217 goto out; 1218 } 1219 mld = (struct mld_msg *) icmp6_hdr(skb); 1220 max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); 1221 if (max_delay) 1222 group = &mld->mld_mca; 1223 } else if (skb->len >= sizeof(*mld2q)) { 1224 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1225 err = -EINVAL; 1226 goto out; 1227 } 1228 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1229 if (!mld2q->mld2q_nsrcs) 1230 group = &mld2q->mld2q_mca; 1231 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; 1232 } 1233 1234 if (!group) 1235 goto out; 1236 1237 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group); 1238 if (!mp) 1239 goto out; 1240 1241 max_delay *= br->multicast_last_member_count; 1242 if (mp->mglist && 1243 (timer_pending(&mp->timer) ? 1244 time_after(mp->timer.expires, now + max_delay) : 1245 try_to_del_timer_sync(&mp->timer) >= 0)) 1246 mod_timer(&mp->timer, now + max_delay); 1247 1248 for (pp = &mp->ports; 1249 (p = mlock_dereference(*pp, br)) != NULL; 1250 pp = &p->next) { 1251 if (timer_pending(&p->timer) ? 1252 time_after(p->timer.expires, now + max_delay) : 1253 try_to_del_timer_sync(&p->timer) >= 0) 1254 mod_timer(&p->timer, now + max_delay); 1255 } 1256 1257 out: 1258 spin_unlock(&br->multicast_lock); 1259 return err; 1260 } 1261 #endif 1262 1263 static void br_multicast_leave_group(struct net_bridge *br, 1264 struct net_bridge_port *port, 1265 struct br_ip *group) 1266 { 1267 struct net_bridge_mdb_htable *mdb; 1268 struct net_bridge_mdb_entry *mp; 1269 struct net_bridge_port_group *p; 1270 unsigned long now; 1271 unsigned long time; 1272 1273 spin_lock(&br->multicast_lock); 1274 if (!netif_running(br->dev) || 1275 (port && port->state == BR_STATE_DISABLED) || 1276 timer_pending(&br->multicast_querier_timer)) 1277 goto out; 1278 1279 mdb = mlock_dereference(br->mdb, br); 1280 mp = br_mdb_ip_get(mdb, group); 1281 if (!mp) 1282 goto out; 1283 1284 now = jiffies; 1285 time = now + br->multicast_last_member_count * 1286 br->multicast_last_member_interval; 1287 1288 if (!port) { 1289 if (mp->mglist && 1290 (timer_pending(&mp->timer) ? 1291 time_after(mp->timer.expires, time) : 1292 try_to_del_timer_sync(&mp->timer) >= 0)) { 1293 mod_timer(&mp->timer, time); 1294 1295 mp->queries_sent = 0; 1296 mod_timer(&mp->query_timer, now); 1297 } 1298 1299 goto out; 1300 } 1301 1302 for (p = mlock_dereference(mp->ports, br); 1303 p != NULL; 1304 p = mlock_dereference(p->next, br)) { 1305 if (p->port != port) 1306 continue; 1307 1308 if (!hlist_unhashed(&p->mglist) && 1309 (timer_pending(&p->timer) ? 1310 time_after(p->timer.expires, time) : 1311 try_to_del_timer_sync(&p->timer) >= 0)) { 1312 mod_timer(&p->timer, time); 1313 1314 p->queries_sent = 0; 1315 mod_timer(&p->query_timer, now); 1316 } 1317 1318 break; 1319 } 1320 1321 out: 1322 spin_unlock(&br->multicast_lock); 1323 } 1324 1325 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1326 struct net_bridge_port *port, 1327 __be32 group) 1328 { 1329 struct br_ip br_group; 1330 1331 if (ipv4_is_local_multicast(group)) 1332 return; 1333 1334 br_group.u.ip4 = group; 1335 br_group.proto = htons(ETH_P_IP); 1336 1337 br_multicast_leave_group(br, port, &br_group); 1338 } 1339 1340 #if IS_ENABLED(CONFIG_IPV6) 1341 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1342 struct net_bridge_port *port, 1343 const struct in6_addr *group) 1344 { 1345 struct br_ip br_group; 1346 1347 if (!ipv6_is_transient_multicast(group)) 1348 return; 1349 1350 br_group.u.ip6 = *group; 1351 br_group.proto = htons(ETH_P_IPV6); 1352 1353 br_multicast_leave_group(br, port, &br_group); 1354 } 1355 #endif 1356 1357 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1358 struct net_bridge_port *port, 1359 struct sk_buff *skb) 1360 { 1361 struct sk_buff *skb2 = skb; 1362 const struct iphdr *iph; 1363 struct igmphdr *ih; 1364 unsigned len; 1365 unsigned offset; 1366 int err; 1367 1368 /* We treat OOM as packet loss for now. */ 1369 if (!pskb_may_pull(skb, sizeof(*iph))) 1370 return -EINVAL; 1371 1372 iph = ip_hdr(skb); 1373 1374 if (iph->ihl < 5 || iph->version != 4) 1375 return -EINVAL; 1376 1377 if (!pskb_may_pull(skb, ip_hdrlen(skb))) 1378 return -EINVAL; 1379 1380 iph = ip_hdr(skb); 1381 1382 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1383 return -EINVAL; 1384 1385 if (iph->protocol != IPPROTO_IGMP) { 1386 if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP) 1387 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1388 return 0; 1389 } 1390 1391 len = ntohs(iph->tot_len); 1392 if (skb->len < len || len < ip_hdrlen(skb)) 1393 return -EINVAL; 1394 1395 if (skb->len > len) { 1396 skb2 = skb_clone(skb, GFP_ATOMIC); 1397 if (!skb2) 1398 return -ENOMEM; 1399 1400 err = pskb_trim_rcsum(skb2, len); 1401 if (err) 1402 goto err_out; 1403 } 1404 1405 len -= ip_hdrlen(skb2); 1406 offset = skb_network_offset(skb2) + ip_hdrlen(skb2); 1407 __skb_pull(skb2, offset); 1408 skb_reset_transport_header(skb2); 1409 1410 err = -EINVAL; 1411 if (!pskb_may_pull(skb2, sizeof(*ih))) 1412 goto out; 1413 1414 switch (skb2->ip_summed) { 1415 case CHECKSUM_COMPLETE: 1416 if (!csum_fold(skb2->csum)) 1417 break; 1418 /* fall through */ 1419 case CHECKSUM_NONE: 1420 skb2->csum = 0; 1421 if (skb_checksum_complete(skb2)) 1422 goto out; 1423 } 1424 1425 err = 0; 1426 1427 BR_INPUT_SKB_CB(skb)->igmp = 1; 1428 ih = igmp_hdr(skb2); 1429 1430 switch (ih->type) { 1431 case IGMP_HOST_MEMBERSHIP_REPORT: 1432 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1433 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1434 err = br_ip4_multicast_add_group(br, port, ih->group); 1435 break; 1436 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1437 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1438 break; 1439 case IGMP_HOST_MEMBERSHIP_QUERY: 1440 err = br_ip4_multicast_query(br, port, skb2); 1441 break; 1442 case IGMP_HOST_LEAVE_MESSAGE: 1443 br_ip4_multicast_leave_group(br, port, ih->group); 1444 break; 1445 } 1446 1447 out: 1448 __skb_push(skb2, offset); 1449 err_out: 1450 if (skb2 != skb) 1451 kfree_skb(skb2); 1452 return err; 1453 } 1454 1455 #if IS_ENABLED(CONFIG_IPV6) 1456 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1457 struct net_bridge_port *port, 1458 struct sk_buff *skb) 1459 { 1460 struct sk_buff *skb2; 1461 const struct ipv6hdr *ip6h; 1462 u8 icmp6_type; 1463 u8 nexthdr; 1464 __be16 frag_off; 1465 unsigned len; 1466 int offset; 1467 int err; 1468 1469 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1470 return -EINVAL; 1471 1472 ip6h = ipv6_hdr(skb); 1473 1474 /* 1475 * We're interested in MLD messages only. 1476 * - Version is 6 1477 * - MLD has always Router Alert hop-by-hop option 1478 * - But we do not support jumbrograms. 1479 */ 1480 if (ip6h->version != 6 || 1481 ip6h->nexthdr != IPPROTO_HOPOPTS || 1482 ip6h->payload_len == 0) 1483 return 0; 1484 1485 len = ntohs(ip6h->payload_len) + sizeof(*ip6h); 1486 if (skb->len < len) 1487 return -EINVAL; 1488 1489 nexthdr = ip6h->nexthdr; 1490 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off); 1491 1492 if (offset < 0 || nexthdr != IPPROTO_ICMPV6) 1493 return 0; 1494 1495 /* Okay, we found ICMPv6 header */ 1496 skb2 = skb_clone(skb, GFP_ATOMIC); 1497 if (!skb2) 1498 return -ENOMEM; 1499 1500 err = -EINVAL; 1501 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) 1502 goto out; 1503 1504 len -= offset - skb_network_offset(skb2); 1505 1506 __skb_pull(skb2, offset); 1507 skb_reset_transport_header(skb2); 1508 skb_postpull_rcsum(skb2, skb_network_header(skb2), 1509 skb_network_header_len(skb2)); 1510 1511 icmp6_type = icmp6_hdr(skb2)->icmp6_type; 1512 1513 switch (icmp6_type) { 1514 case ICMPV6_MGM_QUERY: 1515 case ICMPV6_MGM_REPORT: 1516 case ICMPV6_MGM_REDUCTION: 1517 case ICMPV6_MLD2_REPORT: 1518 break; 1519 default: 1520 err = 0; 1521 goto out; 1522 } 1523 1524 /* Okay, we found MLD message. Check further. */ 1525 if (skb2->len > len) { 1526 err = pskb_trim_rcsum(skb2, len); 1527 if (err) 1528 goto out; 1529 err = -EINVAL; 1530 } 1531 1532 ip6h = ipv6_hdr(skb2); 1533 1534 switch (skb2->ip_summed) { 1535 case CHECKSUM_COMPLETE: 1536 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, 1537 IPPROTO_ICMPV6, skb2->csum)) 1538 break; 1539 /*FALLTHROUGH*/ 1540 case CHECKSUM_NONE: 1541 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, 1542 &ip6h->daddr, 1543 skb2->len, 1544 IPPROTO_ICMPV6, 0)); 1545 if (__skb_checksum_complete(skb2)) 1546 goto out; 1547 } 1548 1549 err = 0; 1550 1551 BR_INPUT_SKB_CB(skb)->igmp = 1; 1552 1553 switch (icmp6_type) { 1554 case ICMPV6_MGM_REPORT: 1555 { 1556 struct mld_msg *mld; 1557 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1558 err = -EINVAL; 1559 goto out; 1560 } 1561 mld = (struct mld_msg *)skb_transport_header(skb2); 1562 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1563 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1564 break; 1565 } 1566 case ICMPV6_MLD2_REPORT: 1567 err = br_ip6_multicast_mld2_report(br, port, skb2); 1568 break; 1569 case ICMPV6_MGM_QUERY: 1570 err = br_ip6_multicast_query(br, port, skb2); 1571 break; 1572 case ICMPV6_MGM_REDUCTION: 1573 { 1574 struct mld_msg *mld; 1575 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1576 err = -EINVAL; 1577 goto out; 1578 } 1579 mld = (struct mld_msg *)skb_transport_header(skb2); 1580 br_ip6_multicast_leave_group(br, port, &mld->mld_mca); 1581 } 1582 } 1583 1584 out: 1585 kfree_skb(skb2); 1586 return err; 1587 } 1588 #endif 1589 1590 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1591 struct sk_buff *skb) 1592 { 1593 BR_INPUT_SKB_CB(skb)->igmp = 0; 1594 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1595 1596 if (br->multicast_disabled) 1597 return 0; 1598 1599 switch (skb->protocol) { 1600 case htons(ETH_P_IP): 1601 return br_multicast_ipv4_rcv(br, port, skb); 1602 #if IS_ENABLED(CONFIG_IPV6) 1603 case htons(ETH_P_IPV6): 1604 return br_multicast_ipv6_rcv(br, port, skb); 1605 #endif 1606 } 1607 1608 return 0; 1609 } 1610 1611 static void br_multicast_query_expired(unsigned long data) 1612 { 1613 struct net_bridge *br = (void *)data; 1614 1615 spin_lock(&br->multicast_lock); 1616 if (br->multicast_startup_queries_sent < 1617 br->multicast_startup_query_count) 1618 br->multicast_startup_queries_sent++; 1619 1620 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1621 1622 spin_unlock(&br->multicast_lock); 1623 } 1624 1625 void br_multicast_init(struct net_bridge *br) 1626 { 1627 br->hash_elasticity = 4; 1628 br->hash_max = 512; 1629 1630 br->multicast_router = 1; 1631 br->multicast_last_member_count = 2; 1632 br->multicast_startup_query_count = 2; 1633 1634 br->multicast_last_member_interval = HZ; 1635 br->multicast_query_response_interval = 10 * HZ; 1636 br->multicast_startup_query_interval = 125 * HZ / 4; 1637 br->multicast_query_interval = 125 * HZ; 1638 br->multicast_querier_interval = 255 * HZ; 1639 br->multicast_membership_interval = 260 * HZ; 1640 1641 spin_lock_init(&br->multicast_lock); 1642 setup_timer(&br->multicast_router_timer, 1643 br_multicast_local_router_expired, 0); 1644 setup_timer(&br->multicast_querier_timer, 1645 br_multicast_local_router_expired, 0); 1646 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1647 (unsigned long)br); 1648 } 1649 1650 void br_multicast_open(struct net_bridge *br) 1651 { 1652 br->multicast_startup_queries_sent = 0; 1653 1654 if (br->multicast_disabled) 1655 return; 1656 1657 mod_timer(&br->multicast_query_timer, jiffies); 1658 } 1659 1660 void br_multicast_stop(struct net_bridge *br) 1661 { 1662 struct net_bridge_mdb_htable *mdb; 1663 struct net_bridge_mdb_entry *mp; 1664 struct hlist_node *p, *n; 1665 u32 ver; 1666 int i; 1667 1668 del_timer_sync(&br->multicast_router_timer); 1669 del_timer_sync(&br->multicast_querier_timer); 1670 del_timer_sync(&br->multicast_query_timer); 1671 1672 spin_lock_bh(&br->multicast_lock); 1673 mdb = mlock_dereference(br->mdb, br); 1674 if (!mdb) 1675 goto out; 1676 1677 br->mdb = NULL; 1678 1679 ver = mdb->ver; 1680 for (i = 0; i < mdb->max; i++) { 1681 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1682 hlist[ver]) { 1683 del_timer(&mp->timer); 1684 del_timer(&mp->query_timer); 1685 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1686 } 1687 } 1688 1689 if (mdb->old) { 1690 spin_unlock_bh(&br->multicast_lock); 1691 rcu_barrier_bh(); 1692 spin_lock_bh(&br->multicast_lock); 1693 WARN_ON(mdb->old); 1694 } 1695 1696 mdb->old = mdb; 1697 call_rcu_bh(&mdb->rcu, br_mdb_free); 1698 1699 out: 1700 spin_unlock_bh(&br->multicast_lock); 1701 } 1702 1703 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1704 { 1705 int err = -ENOENT; 1706 1707 spin_lock_bh(&br->multicast_lock); 1708 if (!netif_running(br->dev)) 1709 goto unlock; 1710 1711 switch (val) { 1712 case 0: 1713 case 2: 1714 del_timer(&br->multicast_router_timer); 1715 /* fall through */ 1716 case 1: 1717 br->multicast_router = val; 1718 err = 0; 1719 break; 1720 1721 default: 1722 err = -EINVAL; 1723 break; 1724 } 1725 1726 unlock: 1727 spin_unlock_bh(&br->multicast_lock); 1728 1729 return err; 1730 } 1731 1732 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1733 { 1734 struct net_bridge *br = p->br; 1735 int err = -ENOENT; 1736 1737 spin_lock(&br->multicast_lock); 1738 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) 1739 goto unlock; 1740 1741 switch (val) { 1742 case 0: 1743 case 1: 1744 case 2: 1745 p->multicast_router = val; 1746 err = 0; 1747 1748 if (val < 2 && !hlist_unhashed(&p->rlist)) 1749 hlist_del_init_rcu(&p->rlist); 1750 1751 if (val == 1) 1752 break; 1753 1754 del_timer(&p->multicast_router_timer); 1755 1756 if (val == 0) 1757 break; 1758 1759 br_multicast_add_router(br, p); 1760 break; 1761 1762 default: 1763 err = -EINVAL; 1764 break; 1765 } 1766 1767 unlock: 1768 spin_unlock(&br->multicast_lock); 1769 1770 return err; 1771 } 1772 1773 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1774 { 1775 struct net_bridge_port *port; 1776 int err = 0; 1777 struct net_bridge_mdb_htable *mdb; 1778 1779 spin_lock_bh(&br->multicast_lock); 1780 if (br->multicast_disabled == !val) 1781 goto unlock; 1782 1783 br->multicast_disabled = !val; 1784 if (br->multicast_disabled) 1785 goto unlock; 1786 1787 if (!netif_running(br->dev)) 1788 goto unlock; 1789 1790 mdb = mlock_dereference(br->mdb, br); 1791 if (mdb) { 1792 if (mdb->old) { 1793 err = -EEXIST; 1794 rollback: 1795 br->multicast_disabled = !!val; 1796 goto unlock; 1797 } 1798 1799 err = br_mdb_rehash(&br->mdb, mdb->max, 1800 br->hash_elasticity); 1801 if (err) 1802 goto rollback; 1803 } 1804 1805 br_multicast_open(br); 1806 list_for_each_entry(port, &br->port_list, list) { 1807 if (port->state == BR_STATE_DISABLED || 1808 port->state == BR_STATE_BLOCKING) 1809 continue; 1810 1811 __br_multicast_enable_port(port); 1812 } 1813 1814 unlock: 1815 spin_unlock_bh(&br->multicast_lock); 1816 1817 return err; 1818 } 1819 1820 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1821 { 1822 int err = -ENOENT; 1823 u32 old; 1824 struct net_bridge_mdb_htable *mdb; 1825 1826 spin_lock(&br->multicast_lock); 1827 if (!netif_running(br->dev)) 1828 goto unlock; 1829 1830 err = -EINVAL; 1831 if (!is_power_of_2(val)) 1832 goto unlock; 1833 1834 mdb = mlock_dereference(br->mdb, br); 1835 if (mdb && val < mdb->size) 1836 goto unlock; 1837 1838 err = 0; 1839 1840 old = br->hash_max; 1841 br->hash_max = val; 1842 1843 if (mdb) { 1844 if (mdb->old) { 1845 err = -EEXIST; 1846 rollback: 1847 br->hash_max = old; 1848 goto unlock; 1849 } 1850 1851 err = br_mdb_rehash(&br->mdb, br->hash_max, 1852 br->hash_elasticity); 1853 if (err) 1854 goto rollback; 1855 } 1856 1857 unlock: 1858 spin_unlock(&br->multicast_lock); 1859 1860 return err; 1861 } 1862