1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <linux/mroute.h> 29 #include <net/ip.h> 30 #include <net/switchdev.h> 31 #if IS_ENABLED(CONFIG_IPV6) 32 #include <net/ipv6.h> 33 #include <net/mld.h> 34 #include <net/ip6_checksum.h> 35 #include <net/addrconf.h> 36 #endif 37 38 #include "br_private.h" 39 40 static void br_multicast_start_querier(struct net_bridge *br, 41 struct bridge_mcast_own_query *query); 42 static void br_multicast_add_router(struct net_bridge *br, 43 struct net_bridge_port *port); 44 static void br_ip4_multicast_leave_group(struct net_bridge *br, 45 struct net_bridge_port *port, 46 __be32 group, 47 __u16 vid, 48 const unsigned char *src); 49 50 static void __del_port_router(struct net_bridge_port *p); 51 #if IS_ENABLED(CONFIG_IPV6) 52 static void br_ip6_multicast_leave_group(struct net_bridge *br, 53 struct net_bridge_port *port, 54 const struct in6_addr *group, 55 __u16 vid, const unsigned char *src); 56 #endif 57 unsigned int br_mdb_rehash_seq; 58 59 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 60 { 61 if (a->proto != b->proto) 62 return 0; 63 if (a->vid != b->vid) 64 return 0; 65 switch (a->proto) { 66 case htons(ETH_P_IP): 67 return a->u.ip4 == b->u.ip4; 68 #if IS_ENABLED(CONFIG_IPV6) 69 case htons(ETH_P_IPV6): 70 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 71 #endif 72 } 73 return 0; 74 } 75 76 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 77 __u16 vid) 78 { 79 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 80 } 81 82 #if IS_ENABLED(CONFIG_IPV6) 83 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 84 const struct in6_addr *ip, 85 __u16 vid) 86 { 87 return jhash_2words(ipv6_addr_hash(ip), vid, 88 mdb->secret) & (mdb->max - 1); 89 } 90 #endif 91 92 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 93 struct br_ip *ip) 94 { 95 switch (ip->proto) { 96 case htons(ETH_P_IP): 97 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 98 #if IS_ENABLED(CONFIG_IPV6) 99 case htons(ETH_P_IPV6): 100 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 101 #endif 102 } 103 return 0; 104 } 105 106 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 107 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 108 { 109 struct net_bridge_mdb_entry *mp; 110 111 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 112 if (br_ip_equal(&mp->addr, dst)) 113 return mp; 114 } 115 116 return NULL; 117 } 118 119 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 120 struct br_ip *dst) 121 { 122 if (!mdb) 123 return NULL; 124 125 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 126 } 127 128 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 129 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 130 { 131 struct br_ip br_dst; 132 133 br_dst.u.ip4 = dst; 134 br_dst.proto = htons(ETH_P_IP); 135 br_dst.vid = vid; 136 137 return br_mdb_ip_get(mdb, &br_dst); 138 } 139 140 #if IS_ENABLED(CONFIG_IPV6) 141 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 142 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 143 __u16 vid) 144 { 145 struct br_ip br_dst; 146 147 br_dst.u.ip6 = *dst; 148 br_dst.proto = htons(ETH_P_IPV6); 149 br_dst.vid = vid; 150 151 return br_mdb_ip_get(mdb, &br_dst); 152 } 153 #endif 154 155 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 156 struct sk_buff *skb, u16 vid) 157 { 158 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 159 struct br_ip ip; 160 161 if (br->multicast_disabled) 162 return NULL; 163 164 if (BR_INPUT_SKB_CB(skb)->igmp) 165 return NULL; 166 167 ip.proto = skb->protocol; 168 ip.vid = vid; 169 170 switch (skb->protocol) { 171 case htons(ETH_P_IP): 172 ip.u.ip4 = ip_hdr(skb)->daddr; 173 break; 174 #if IS_ENABLED(CONFIG_IPV6) 175 case htons(ETH_P_IPV6): 176 ip.u.ip6 = ipv6_hdr(skb)->daddr; 177 break; 178 #endif 179 default: 180 return NULL; 181 } 182 183 return br_mdb_ip_get(mdb, &ip); 184 } 185 186 static void br_mdb_free(struct rcu_head *head) 187 { 188 struct net_bridge_mdb_htable *mdb = 189 container_of(head, struct net_bridge_mdb_htable, rcu); 190 struct net_bridge_mdb_htable *old = mdb->old; 191 192 mdb->old = NULL; 193 kfree(old->mhash); 194 kfree(old); 195 } 196 197 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 198 struct net_bridge_mdb_htable *old, 199 int elasticity) 200 { 201 struct net_bridge_mdb_entry *mp; 202 int maxlen; 203 int len; 204 int i; 205 206 for (i = 0; i < old->max; i++) 207 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 208 hlist_add_head(&mp->hlist[new->ver], 209 &new->mhash[br_ip_hash(new, &mp->addr)]); 210 211 if (!elasticity) 212 return 0; 213 214 maxlen = 0; 215 for (i = 0; i < new->max; i++) { 216 len = 0; 217 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 218 len++; 219 if (len > maxlen) 220 maxlen = len; 221 } 222 223 return maxlen > elasticity ? -EINVAL : 0; 224 } 225 226 void br_multicast_free_pg(struct rcu_head *head) 227 { 228 struct net_bridge_port_group *p = 229 container_of(head, struct net_bridge_port_group, rcu); 230 231 kfree(p); 232 } 233 234 static void br_multicast_free_group(struct rcu_head *head) 235 { 236 struct net_bridge_mdb_entry *mp = 237 container_of(head, struct net_bridge_mdb_entry, rcu); 238 239 kfree(mp); 240 } 241 242 static void br_multicast_group_expired(struct timer_list *t) 243 { 244 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 245 struct net_bridge *br = mp->br; 246 struct net_bridge_mdb_htable *mdb; 247 248 spin_lock(&br->multicast_lock); 249 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 250 goto out; 251 252 mp->mglist = false; 253 254 if (mp->ports) 255 goto out; 256 257 mdb = mlock_dereference(br->mdb, br); 258 259 hlist_del_rcu(&mp->hlist[mdb->ver]); 260 mdb->size--; 261 262 call_rcu_bh(&mp->rcu, br_multicast_free_group); 263 264 out: 265 spin_unlock(&br->multicast_lock); 266 } 267 268 static void br_multicast_del_pg(struct net_bridge *br, 269 struct net_bridge_port_group *pg) 270 { 271 struct net_bridge_mdb_htable *mdb; 272 struct net_bridge_mdb_entry *mp; 273 struct net_bridge_port_group *p; 274 struct net_bridge_port_group __rcu **pp; 275 276 mdb = mlock_dereference(br->mdb, br); 277 278 mp = br_mdb_ip_get(mdb, &pg->addr); 279 if (WARN_ON(!mp)) 280 return; 281 282 for (pp = &mp->ports; 283 (p = mlock_dereference(*pp, br)) != NULL; 284 pp = &p->next) { 285 if (p != pg) 286 continue; 287 288 rcu_assign_pointer(*pp, p->next); 289 hlist_del_init(&p->mglist); 290 del_timer(&p->timer); 291 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 292 p->flags); 293 call_rcu_bh(&p->rcu, br_multicast_free_pg); 294 295 if (!mp->ports && !mp->mglist && 296 netif_running(br->dev)) 297 mod_timer(&mp->timer, jiffies); 298 299 return; 300 } 301 302 WARN_ON(1); 303 } 304 305 static void br_multicast_port_group_expired(struct timer_list *t) 306 { 307 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 308 struct net_bridge *br = pg->port->br; 309 310 spin_lock(&br->multicast_lock); 311 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 312 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 313 goto out; 314 315 br_multicast_del_pg(br, pg); 316 317 out: 318 spin_unlock(&br->multicast_lock); 319 } 320 321 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 322 int elasticity) 323 { 324 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 325 struct net_bridge_mdb_htable *mdb; 326 int err; 327 328 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 329 if (!mdb) 330 return -ENOMEM; 331 332 mdb->max = max; 333 mdb->old = old; 334 335 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 336 if (!mdb->mhash) { 337 kfree(mdb); 338 return -ENOMEM; 339 } 340 341 mdb->size = old ? old->size : 0; 342 mdb->ver = old ? old->ver ^ 1 : 0; 343 344 if (!old || elasticity) 345 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 346 else 347 mdb->secret = old->secret; 348 349 if (!old) 350 goto out; 351 352 err = br_mdb_copy(mdb, old, elasticity); 353 if (err) { 354 kfree(mdb->mhash); 355 kfree(mdb); 356 return err; 357 } 358 359 br_mdb_rehash_seq++; 360 call_rcu_bh(&mdb->rcu, br_mdb_free); 361 362 out: 363 rcu_assign_pointer(*mdbp, mdb); 364 365 return 0; 366 } 367 368 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 369 __be32 group, 370 u8 *igmp_type) 371 { 372 struct igmpv3_query *ihv3; 373 size_t igmp_hdr_size; 374 struct sk_buff *skb; 375 struct igmphdr *ih; 376 struct ethhdr *eth; 377 struct iphdr *iph; 378 379 igmp_hdr_size = sizeof(*ih); 380 if (br->multicast_igmp_version == 3) 381 igmp_hdr_size = sizeof(*ihv3); 382 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 383 igmp_hdr_size + 4); 384 if (!skb) 385 goto out; 386 387 skb->protocol = htons(ETH_P_IP); 388 389 skb_reset_mac_header(skb); 390 eth = eth_hdr(skb); 391 392 ether_addr_copy(eth->h_source, br->dev->dev_addr); 393 eth->h_dest[0] = 1; 394 eth->h_dest[1] = 0; 395 eth->h_dest[2] = 0x5e; 396 eth->h_dest[3] = 0; 397 eth->h_dest[4] = 0; 398 eth->h_dest[5] = 1; 399 eth->h_proto = htons(ETH_P_IP); 400 skb_put(skb, sizeof(*eth)); 401 402 skb_set_network_header(skb, skb->len); 403 iph = ip_hdr(skb); 404 405 iph->version = 4; 406 iph->ihl = 6; 407 iph->tos = 0xc0; 408 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4); 409 iph->id = 0; 410 iph->frag_off = htons(IP_DF); 411 iph->ttl = 1; 412 iph->protocol = IPPROTO_IGMP; 413 iph->saddr = br->multicast_query_use_ifaddr ? 414 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 415 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 416 ((u8 *)&iph[1])[0] = IPOPT_RA; 417 ((u8 *)&iph[1])[1] = 4; 418 ((u8 *)&iph[1])[2] = 0; 419 ((u8 *)&iph[1])[3] = 0; 420 ip_send_check(iph); 421 skb_put(skb, 24); 422 423 skb_set_transport_header(skb, skb->len); 424 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 425 426 switch (br->multicast_igmp_version) { 427 case 2: 428 ih = igmp_hdr(skb); 429 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 430 ih->code = (group ? br->multicast_last_member_interval : 431 br->multicast_query_response_interval) / 432 (HZ / IGMP_TIMER_SCALE); 433 ih->group = group; 434 ih->csum = 0; 435 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih)); 436 break; 437 case 3: 438 ihv3 = igmpv3_query_hdr(skb); 439 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 440 ihv3->code = (group ? br->multicast_last_member_interval : 441 br->multicast_query_response_interval) / 442 (HZ / IGMP_TIMER_SCALE); 443 ihv3->group = group; 444 ihv3->qqic = br->multicast_query_interval / HZ; 445 ihv3->nsrcs = 0; 446 ihv3->resv = 0; 447 ihv3->suppress = 0; 448 ihv3->qrv = 2; 449 ihv3->csum = 0; 450 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3)); 451 break; 452 } 453 454 skb_put(skb, igmp_hdr_size); 455 __skb_pull(skb, sizeof(*eth)); 456 457 out: 458 return skb; 459 } 460 461 #if IS_ENABLED(CONFIG_IPV6) 462 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 463 const struct in6_addr *grp, 464 u8 *igmp_type) 465 { 466 struct mld2_query *mld2q; 467 unsigned long interval; 468 struct ipv6hdr *ip6h; 469 struct mld_msg *mldq; 470 size_t mld_hdr_size; 471 struct sk_buff *skb; 472 struct ethhdr *eth; 473 u8 *hopopt; 474 475 mld_hdr_size = sizeof(*mldq); 476 if (br->multicast_mld_version == 2) 477 mld_hdr_size = sizeof(*mld2q); 478 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 479 8 + mld_hdr_size); 480 if (!skb) 481 goto out; 482 483 skb->protocol = htons(ETH_P_IPV6); 484 485 /* Ethernet header */ 486 skb_reset_mac_header(skb); 487 eth = eth_hdr(skb); 488 489 ether_addr_copy(eth->h_source, br->dev->dev_addr); 490 eth->h_proto = htons(ETH_P_IPV6); 491 skb_put(skb, sizeof(*eth)); 492 493 /* IPv6 header + HbH option */ 494 skb_set_network_header(skb, skb->len); 495 ip6h = ipv6_hdr(skb); 496 497 *(__force __be32 *)ip6h = htonl(0x60000000); 498 ip6h->payload_len = htons(8 + mld_hdr_size); 499 ip6h->nexthdr = IPPROTO_HOPOPTS; 500 ip6h->hop_limit = 1; 501 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 502 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 503 &ip6h->saddr)) { 504 kfree_skb(skb); 505 br->has_ipv6_addr = 0; 506 return NULL; 507 } 508 509 br->has_ipv6_addr = 1; 510 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 511 512 hopopt = (u8 *)(ip6h + 1); 513 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 514 hopopt[1] = 0; /* length of HbH */ 515 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 516 hopopt[3] = 2; /* Length of RA Option */ 517 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 518 hopopt[5] = 0; 519 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 520 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 521 522 skb_put(skb, sizeof(*ip6h) + 8); 523 524 /* ICMPv6 */ 525 skb_set_transport_header(skb, skb->len); 526 interval = ipv6_addr_any(grp) ? 527 br->multicast_query_response_interval : 528 br->multicast_last_member_interval; 529 *igmp_type = ICMPV6_MGM_QUERY; 530 switch (br->multicast_mld_version) { 531 case 1: 532 mldq = (struct mld_msg *)icmp6_hdr(skb); 533 mldq->mld_type = ICMPV6_MGM_QUERY; 534 mldq->mld_code = 0; 535 mldq->mld_cksum = 0; 536 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 537 mldq->mld_reserved = 0; 538 mldq->mld_mca = *grp; 539 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 540 sizeof(*mldq), IPPROTO_ICMPV6, 541 csum_partial(mldq, 542 sizeof(*mldq), 543 0)); 544 break; 545 case 2: 546 mld2q = (struct mld2_query *)icmp6_hdr(skb); 547 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 548 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 549 mld2q->mld2q_code = 0; 550 mld2q->mld2q_cksum = 0; 551 mld2q->mld2q_resv1 = 0; 552 mld2q->mld2q_resv2 = 0; 553 mld2q->mld2q_suppress = 0; 554 mld2q->mld2q_qrv = 2; 555 mld2q->mld2q_nsrcs = 0; 556 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 557 mld2q->mld2q_mca = *grp; 558 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 559 sizeof(*mld2q), 560 IPPROTO_ICMPV6, 561 csum_partial(mld2q, 562 sizeof(*mld2q), 563 0)); 564 break; 565 } 566 skb_put(skb, mld_hdr_size); 567 568 __skb_pull(skb, sizeof(*eth)); 569 570 out: 571 return skb; 572 } 573 #endif 574 575 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 576 struct br_ip *addr, 577 u8 *igmp_type) 578 { 579 switch (addr->proto) { 580 case htons(ETH_P_IP): 581 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type); 582 #if IS_ENABLED(CONFIG_IPV6) 583 case htons(ETH_P_IPV6): 584 return br_ip6_multicast_alloc_query(br, &addr->u.ip6, 585 igmp_type); 586 #endif 587 } 588 return NULL; 589 } 590 591 static struct net_bridge_mdb_entry *br_multicast_get_group( 592 struct net_bridge *br, struct net_bridge_port *port, 593 struct br_ip *group, int hash) 594 { 595 struct net_bridge_mdb_htable *mdb; 596 struct net_bridge_mdb_entry *mp; 597 unsigned int count = 0; 598 unsigned int max; 599 int elasticity; 600 int err; 601 602 mdb = rcu_dereference_protected(br->mdb, 1); 603 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 604 count++; 605 if (unlikely(br_ip_equal(group, &mp->addr))) 606 return mp; 607 } 608 609 elasticity = 0; 610 max = mdb->max; 611 612 if (unlikely(count > br->hash_elasticity && count)) { 613 if (net_ratelimit()) 614 br_info(br, "Multicast hash table " 615 "chain limit reached: %s\n", 616 port ? port->dev->name : br->dev->name); 617 618 elasticity = br->hash_elasticity; 619 } 620 621 if (mdb->size >= max) { 622 max *= 2; 623 if (unlikely(max > br->hash_max)) { 624 br_warn(br, "Multicast hash table maximum of %d " 625 "reached, disabling snooping: %s\n", 626 br->hash_max, 627 port ? port->dev->name : br->dev->name); 628 err = -E2BIG; 629 disable: 630 br->multicast_disabled = 1; 631 goto err; 632 } 633 } 634 635 if (max > mdb->max || elasticity) { 636 if (mdb->old) { 637 if (net_ratelimit()) 638 br_info(br, "Multicast hash table " 639 "on fire: %s\n", 640 port ? port->dev->name : br->dev->name); 641 err = -EEXIST; 642 goto err; 643 } 644 645 err = br_mdb_rehash(&br->mdb, max, elasticity); 646 if (err) { 647 br_warn(br, "Cannot rehash multicast " 648 "hash table, disabling snooping: %s, %d, %d\n", 649 port ? port->dev->name : br->dev->name, 650 mdb->size, err); 651 goto disable; 652 } 653 654 err = -EAGAIN; 655 goto err; 656 } 657 658 return NULL; 659 660 err: 661 mp = ERR_PTR(err); 662 return mp; 663 } 664 665 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 666 struct net_bridge_port *p, 667 struct br_ip *group) 668 { 669 struct net_bridge_mdb_htable *mdb; 670 struct net_bridge_mdb_entry *mp; 671 int hash; 672 int err; 673 674 mdb = rcu_dereference_protected(br->mdb, 1); 675 if (!mdb) { 676 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 677 if (err) 678 return ERR_PTR(err); 679 goto rehash; 680 } 681 682 hash = br_ip_hash(mdb, group); 683 mp = br_multicast_get_group(br, p, group, hash); 684 switch (PTR_ERR(mp)) { 685 case 0: 686 break; 687 688 case -EAGAIN: 689 rehash: 690 mdb = rcu_dereference_protected(br->mdb, 1); 691 hash = br_ip_hash(mdb, group); 692 break; 693 694 default: 695 goto out; 696 } 697 698 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 699 if (unlikely(!mp)) 700 return ERR_PTR(-ENOMEM); 701 702 mp->br = br; 703 mp->addr = *group; 704 timer_setup(&mp->timer, br_multicast_group_expired, 0); 705 706 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 707 mdb->size++; 708 709 out: 710 return mp; 711 } 712 713 struct net_bridge_port_group *br_multicast_new_port_group( 714 struct net_bridge_port *port, 715 struct br_ip *group, 716 struct net_bridge_port_group __rcu *next, 717 unsigned char flags, 718 const unsigned char *src) 719 { 720 struct net_bridge_port_group *p; 721 722 p = kzalloc(sizeof(*p), GFP_ATOMIC); 723 if (unlikely(!p)) 724 return NULL; 725 726 p->addr = *group; 727 p->port = port; 728 p->flags = flags; 729 rcu_assign_pointer(p->next, next); 730 hlist_add_head(&p->mglist, &port->mglist); 731 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 732 733 if (src) 734 memcpy(p->eth_addr, src, ETH_ALEN); 735 else 736 memset(p->eth_addr, 0xff, ETH_ALEN); 737 738 return p; 739 } 740 741 static bool br_port_group_equal(struct net_bridge_port_group *p, 742 struct net_bridge_port *port, 743 const unsigned char *src) 744 { 745 if (p->port != port) 746 return false; 747 748 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 749 return true; 750 751 return ether_addr_equal(src, p->eth_addr); 752 } 753 754 static int br_multicast_add_group(struct net_bridge *br, 755 struct net_bridge_port *port, 756 struct br_ip *group, 757 const unsigned char *src) 758 { 759 struct net_bridge_port_group __rcu **pp; 760 struct net_bridge_port_group *p; 761 struct net_bridge_mdb_entry *mp; 762 unsigned long now = jiffies; 763 int err; 764 765 spin_lock(&br->multicast_lock); 766 if (!netif_running(br->dev) || 767 (port && port->state == BR_STATE_DISABLED)) 768 goto out; 769 770 mp = br_multicast_new_group(br, port, group); 771 err = PTR_ERR(mp); 772 if (IS_ERR(mp)) 773 goto err; 774 775 if (!port) { 776 mp->mglist = true; 777 mod_timer(&mp->timer, now + br->multicast_membership_interval); 778 goto out; 779 } 780 781 for (pp = &mp->ports; 782 (p = mlock_dereference(*pp, br)) != NULL; 783 pp = &p->next) { 784 if (br_port_group_equal(p, port, src)) 785 goto found; 786 if ((unsigned long)p->port < (unsigned long)port) 787 break; 788 } 789 790 p = br_multicast_new_port_group(port, group, *pp, 0, src); 791 if (unlikely(!p)) 792 goto err; 793 rcu_assign_pointer(*pp, p); 794 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 795 796 found: 797 mod_timer(&p->timer, now + br->multicast_membership_interval); 798 out: 799 err = 0; 800 801 err: 802 spin_unlock(&br->multicast_lock); 803 return err; 804 } 805 806 static int br_ip4_multicast_add_group(struct net_bridge *br, 807 struct net_bridge_port *port, 808 __be32 group, 809 __u16 vid, 810 const unsigned char *src) 811 { 812 struct br_ip br_group; 813 814 if (ipv4_is_local_multicast(group)) 815 return 0; 816 817 br_group.u.ip4 = group; 818 br_group.proto = htons(ETH_P_IP); 819 br_group.vid = vid; 820 821 return br_multicast_add_group(br, port, &br_group, src); 822 } 823 824 #if IS_ENABLED(CONFIG_IPV6) 825 static int br_ip6_multicast_add_group(struct net_bridge *br, 826 struct net_bridge_port *port, 827 const struct in6_addr *group, 828 __u16 vid, 829 const unsigned char *src) 830 { 831 struct br_ip br_group; 832 833 if (ipv6_addr_is_ll_all_nodes(group)) 834 return 0; 835 836 br_group.u.ip6 = *group; 837 br_group.proto = htons(ETH_P_IPV6); 838 br_group.vid = vid; 839 840 return br_multicast_add_group(br, port, &br_group, src); 841 } 842 #endif 843 844 static void br_multicast_router_expired(struct timer_list *t) 845 { 846 struct net_bridge_port *port = 847 from_timer(port, t, multicast_router_timer); 848 struct net_bridge *br = port->br; 849 850 spin_lock(&br->multicast_lock); 851 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 852 port->multicast_router == MDB_RTR_TYPE_PERM || 853 timer_pending(&port->multicast_router_timer)) 854 goto out; 855 856 __del_port_router(port); 857 out: 858 spin_unlock(&br->multicast_lock); 859 } 860 861 static void br_mc_router_state_change(struct net_bridge *p, 862 bool is_mc_router) 863 { 864 struct switchdev_attr attr = { 865 .orig_dev = p->dev, 866 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 867 .flags = SWITCHDEV_F_DEFER, 868 .u.mrouter = is_mc_router, 869 }; 870 871 switchdev_port_attr_set(p->dev, &attr); 872 } 873 874 static void br_multicast_local_router_expired(struct timer_list *t) 875 { 876 struct net_bridge *br = from_timer(br, t, multicast_router_timer); 877 878 spin_lock(&br->multicast_lock); 879 if (br->multicast_router == MDB_RTR_TYPE_DISABLED || 880 br->multicast_router == MDB_RTR_TYPE_PERM || 881 timer_pending(&br->multicast_router_timer)) 882 goto out; 883 884 br_mc_router_state_change(br, false); 885 out: 886 spin_unlock(&br->multicast_lock); 887 } 888 889 static void br_multicast_querier_expired(struct net_bridge *br, 890 struct bridge_mcast_own_query *query) 891 { 892 spin_lock(&br->multicast_lock); 893 if (!netif_running(br->dev) || br->multicast_disabled) 894 goto out; 895 896 br_multicast_start_querier(br, query); 897 898 out: 899 spin_unlock(&br->multicast_lock); 900 } 901 902 static void br_ip4_multicast_querier_expired(struct timer_list *t) 903 { 904 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer); 905 906 br_multicast_querier_expired(br, &br->ip4_own_query); 907 } 908 909 #if IS_ENABLED(CONFIG_IPV6) 910 static void br_ip6_multicast_querier_expired(struct timer_list *t) 911 { 912 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer); 913 914 br_multicast_querier_expired(br, &br->ip6_own_query); 915 } 916 #endif 917 918 static void br_multicast_select_own_querier(struct net_bridge *br, 919 struct br_ip *ip, 920 struct sk_buff *skb) 921 { 922 if (ip->proto == htons(ETH_P_IP)) 923 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 924 #if IS_ENABLED(CONFIG_IPV6) 925 else 926 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 927 #endif 928 } 929 930 static void __br_multicast_send_query(struct net_bridge *br, 931 struct net_bridge_port *port, 932 struct br_ip *ip) 933 { 934 struct sk_buff *skb; 935 u8 igmp_type; 936 937 skb = br_multicast_alloc_query(br, ip, &igmp_type); 938 if (!skb) 939 return; 940 941 if (port) { 942 skb->dev = port->dev; 943 br_multicast_count(br, port, skb, igmp_type, 944 BR_MCAST_DIR_TX); 945 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 946 dev_net(port->dev), NULL, skb, NULL, skb->dev, 947 br_dev_queue_push_xmit); 948 } else { 949 br_multicast_select_own_querier(br, ip, skb); 950 br_multicast_count(br, port, skb, igmp_type, 951 BR_MCAST_DIR_RX); 952 netif_rx(skb); 953 } 954 } 955 956 static void br_multicast_send_query(struct net_bridge *br, 957 struct net_bridge_port *port, 958 struct bridge_mcast_own_query *own_query) 959 { 960 struct bridge_mcast_other_query *other_query = NULL; 961 struct br_ip br_group; 962 unsigned long time; 963 964 if (!netif_running(br->dev) || br->multicast_disabled || 965 !br->multicast_querier) 966 return; 967 968 memset(&br_group.u, 0, sizeof(br_group.u)); 969 970 if (port ? (own_query == &port->ip4_own_query) : 971 (own_query == &br->ip4_own_query)) { 972 other_query = &br->ip4_other_query; 973 br_group.proto = htons(ETH_P_IP); 974 #if IS_ENABLED(CONFIG_IPV6) 975 } else { 976 other_query = &br->ip6_other_query; 977 br_group.proto = htons(ETH_P_IPV6); 978 #endif 979 } 980 981 if (!other_query || timer_pending(&other_query->timer)) 982 return; 983 984 __br_multicast_send_query(br, port, &br_group); 985 986 time = jiffies; 987 time += own_query->startup_sent < br->multicast_startup_query_count ? 988 br->multicast_startup_query_interval : 989 br->multicast_query_interval; 990 mod_timer(&own_query->timer, time); 991 } 992 993 static void 994 br_multicast_port_query_expired(struct net_bridge_port *port, 995 struct bridge_mcast_own_query *query) 996 { 997 struct net_bridge *br = port->br; 998 999 spin_lock(&br->multicast_lock); 1000 if (port->state == BR_STATE_DISABLED || 1001 port->state == BR_STATE_BLOCKING) 1002 goto out; 1003 1004 if (query->startup_sent < br->multicast_startup_query_count) 1005 query->startup_sent++; 1006 1007 br_multicast_send_query(port->br, port, query); 1008 1009 out: 1010 spin_unlock(&br->multicast_lock); 1011 } 1012 1013 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1014 { 1015 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer); 1016 1017 br_multicast_port_query_expired(port, &port->ip4_own_query); 1018 } 1019 1020 #if IS_ENABLED(CONFIG_IPV6) 1021 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1022 { 1023 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer); 1024 1025 br_multicast_port_query_expired(port, &port->ip6_own_query); 1026 } 1027 #endif 1028 1029 static void br_mc_disabled_update(struct net_device *dev, bool value) 1030 { 1031 struct switchdev_attr attr = { 1032 .orig_dev = dev, 1033 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1034 .flags = SWITCHDEV_F_DEFER, 1035 .u.mc_disabled = value, 1036 }; 1037 1038 switchdev_port_attr_set(dev, &attr); 1039 } 1040 1041 int br_multicast_add_port(struct net_bridge_port *port) 1042 { 1043 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1044 1045 timer_setup(&port->multicast_router_timer, 1046 br_multicast_router_expired, 0); 1047 timer_setup(&port->ip4_own_query.timer, 1048 br_ip4_multicast_port_query_expired, 0); 1049 #if IS_ENABLED(CONFIG_IPV6) 1050 timer_setup(&port->ip6_own_query.timer, 1051 br_ip6_multicast_port_query_expired, 0); 1052 #endif 1053 br_mc_disabled_update(port->dev, port->br->multicast_disabled); 1054 1055 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1056 if (!port->mcast_stats) 1057 return -ENOMEM; 1058 1059 return 0; 1060 } 1061 1062 void br_multicast_del_port(struct net_bridge_port *port) 1063 { 1064 struct net_bridge *br = port->br; 1065 struct net_bridge_port_group *pg; 1066 struct hlist_node *n; 1067 1068 /* Take care of the remaining groups, only perm ones should be left */ 1069 spin_lock_bh(&br->multicast_lock); 1070 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1071 br_multicast_del_pg(br, pg); 1072 spin_unlock_bh(&br->multicast_lock); 1073 del_timer_sync(&port->multicast_router_timer); 1074 free_percpu(port->mcast_stats); 1075 } 1076 1077 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1078 { 1079 query->startup_sent = 0; 1080 1081 if (try_to_del_timer_sync(&query->timer) >= 0 || 1082 del_timer(&query->timer)) 1083 mod_timer(&query->timer, jiffies); 1084 } 1085 1086 static void __br_multicast_enable_port(struct net_bridge_port *port) 1087 { 1088 struct net_bridge *br = port->br; 1089 1090 if (br->multicast_disabled || !netif_running(br->dev)) 1091 return; 1092 1093 br_multicast_enable(&port->ip4_own_query); 1094 #if IS_ENABLED(CONFIG_IPV6) 1095 br_multicast_enable(&port->ip6_own_query); 1096 #endif 1097 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1098 hlist_unhashed(&port->rlist)) 1099 br_multicast_add_router(br, port); 1100 } 1101 1102 void br_multicast_enable_port(struct net_bridge_port *port) 1103 { 1104 struct net_bridge *br = port->br; 1105 1106 spin_lock(&br->multicast_lock); 1107 __br_multicast_enable_port(port); 1108 spin_unlock(&br->multicast_lock); 1109 } 1110 1111 void br_multicast_disable_port(struct net_bridge_port *port) 1112 { 1113 struct net_bridge *br = port->br; 1114 struct net_bridge_port_group *pg; 1115 struct hlist_node *n; 1116 1117 spin_lock(&br->multicast_lock); 1118 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1119 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1120 br_multicast_del_pg(br, pg); 1121 1122 __del_port_router(port); 1123 1124 del_timer(&port->multicast_router_timer); 1125 del_timer(&port->ip4_own_query.timer); 1126 #if IS_ENABLED(CONFIG_IPV6) 1127 del_timer(&port->ip6_own_query.timer); 1128 #endif 1129 spin_unlock(&br->multicast_lock); 1130 } 1131 1132 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1133 struct net_bridge_port *port, 1134 struct sk_buff *skb, 1135 u16 vid) 1136 { 1137 const unsigned char *src; 1138 struct igmpv3_report *ih; 1139 struct igmpv3_grec *grec; 1140 int i; 1141 int len; 1142 int num; 1143 int type; 1144 int err = 0; 1145 __be32 group; 1146 1147 ih = igmpv3_report_hdr(skb); 1148 num = ntohs(ih->ngrec); 1149 len = skb_transport_offset(skb) + sizeof(*ih); 1150 1151 for (i = 0; i < num; i++) { 1152 len += sizeof(*grec); 1153 if (!pskb_may_pull(skb, len)) 1154 return -EINVAL; 1155 1156 grec = (void *)(skb->data + len - sizeof(*grec)); 1157 group = grec->grec_mca; 1158 type = grec->grec_type; 1159 1160 len += ntohs(grec->grec_nsrcs) * 4; 1161 if (!pskb_may_pull(skb, len)) 1162 return -EINVAL; 1163 1164 /* We treat this as an IGMPv2 report for now. */ 1165 switch (type) { 1166 case IGMPV3_MODE_IS_INCLUDE: 1167 case IGMPV3_MODE_IS_EXCLUDE: 1168 case IGMPV3_CHANGE_TO_INCLUDE: 1169 case IGMPV3_CHANGE_TO_EXCLUDE: 1170 case IGMPV3_ALLOW_NEW_SOURCES: 1171 case IGMPV3_BLOCK_OLD_SOURCES: 1172 break; 1173 1174 default: 1175 continue; 1176 } 1177 1178 src = eth_hdr(skb)->h_source; 1179 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1180 type == IGMPV3_MODE_IS_INCLUDE) && 1181 ntohs(grec->grec_nsrcs) == 0) { 1182 br_ip4_multicast_leave_group(br, port, group, vid, src); 1183 } else { 1184 err = br_ip4_multicast_add_group(br, port, group, vid, 1185 src); 1186 if (err) 1187 break; 1188 } 1189 } 1190 1191 return err; 1192 } 1193 1194 #if IS_ENABLED(CONFIG_IPV6) 1195 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1196 struct net_bridge_port *port, 1197 struct sk_buff *skb, 1198 u16 vid) 1199 { 1200 const unsigned char *src; 1201 struct icmp6hdr *icmp6h; 1202 struct mld2_grec *grec; 1203 int i; 1204 int len; 1205 int num; 1206 int err = 0; 1207 1208 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1209 return -EINVAL; 1210 1211 icmp6h = icmp6_hdr(skb); 1212 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1213 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1214 1215 for (i = 0; i < num; i++) { 1216 __be16 *nsrcs, _nsrcs; 1217 1218 nsrcs = skb_header_pointer(skb, 1219 len + offsetof(struct mld2_grec, 1220 grec_nsrcs), 1221 sizeof(_nsrcs), &_nsrcs); 1222 if (!nsrcs) 1223 return -EINVAL; 1224 1225 if (!pskb_may_pull(skb, 1226 len + sizeof(*grec) + 1227 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1228 return -EINVAL; 1229 1230 grec = (struct mld2_grec *)(skb->data + len); 1231 len += sizeof(*grec) + 1232 sizeof(struct in6_addr) * ntohs(*nsrcs); 1233 1234 /* We treat these as MLDv1 reports for now. */ 1235 switch (grec->grec_type) { 1236 case MLD2_MODE_IS_INCLUDE: 1237 case MLD2_MODE_IS_EXCLUDE: 1238 case MLD2_CHANGE_TO_INCLUDE: 1239 case MLD2_CHANGE_TO_EXCLUDE: 1240 case MLD2_ALLOW_NEW_SOURCES: 1241 case MLD2_BLOCK_OLD_SOURCES: 1242 break; 1243 1244 default: 1245 continue; 1246 } 1247 1248 src = eth_hdr(skb)->h_source; 1249 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1250 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1251 ntohs(*nsrcs) == 0) { 1252 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1253 vid, src); 1254 } else { 1255 err = br_ip6_multicast_add_group(br, port, 1256 &grec->grec_mca, vid, 1257 src); 1258 if (err) 1259 break; 1260 } 1261 } 1262 1263 return err; 1264 } 1265 #endif 1266 1267 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1268 struct net_bridge_port *port, 1269 __be32 saddr) 1270 { 1271 if (!timer_pending(&br->ip4_own_query.timer) && 1272 !timer_pending(&br->ip4_other_query.timer)) 1273 goto update; 1274 1275 if (!br->ip4_querier.addr.u.ip4) 1276 goto update; 1277 1278 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1279 goto update; 1280 1281 return false; 1282 1283 update: 1284 br->ip4_querier.addr.u.ip4 = saddr; 1285 1286 /* update protected by general multicast_lock by caller */ 1287 rcu_assign_pointer(br->ip4_querier.port, port); 1288 1289 return true; 1290 } 1291 1292 #if IS_ENABLED(CONFIG_IPV6) 1293 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1294 struct net_bridge_port *port, 1295 struct in6_addr *saddr) 1296 { 1297 if (!timer_pending(&br->ip6_own_query.timer) && 1298 !timer_pending(&br->ip6_other_query.timer)) 1299 goto update; 1300 1301 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1302 goto update; 1303 1304 return false; 1305 1306 update: 1307 br->ip6_querier.addr.u.ip6 = *saddr; 1308 1309 /* update protected by general multicast_lock by caller */ 1310 rcu_assign_pointer(br->ip6_querier.port, port); 1311 1312 return true; 1313 } 1314 #endif 1315 1316 static bool br_multicast_select_querier(struct net_bridge *br, 1317 struct net_bridge_port *port, 1318 struct br_ip *saddr) 1319 { 1320 switch (saddr->proto) { 1321 case htons(ETH_P_IP): 1322 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1323 #if IS_ENABLED(CONFIG_IPV6) 1324 case htons(ETH_P_IPV6): 1325 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1326 #endif 1327 } 1328 1329 return false; 1330 } 1331 1332 static void 1333 br_multicast_update_query_timer(struct net_bridge *br, 1334 struct bridge_mcast_other_query *query, 1335 unsigned long max_delay) 1336 { 1337 if (!timer_pending(&query->timer)) 1338 query->delay_time = jiffies + max_delay; 1339 1340 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1341 } 1342 1343 static void br_port_mc_router_state_change(struct net_bridge_port *p, 1344 bool is_mc_router) 1345 { 1346 struct switchdev_attr attr = { 1347 .orig_dev = p->dev, 1348 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 1349 .flags = SWITCHDEV_F_DEFER, 1350 .u.mrouter = is_mc_router, 1351 }; 1352 1353 switchdev_port_attr_set(p->dev, &attr); 1354 } 1355 1356 /* 1357 * Add port to router_list 1358 * list is maintained ordered by pointer value 1359 * and locked by br->multicast_lock and RCU 1360 */ 1361 static void br_multicast_add_router(struct net_bridge *br, 1362 struct net_bridge_port *port) 1363 { 1364 struct net_bridge_port *p; 1365 struct hlist_node *slot = NULL; 1366 1367 if (!hlist_unhashed(&port->rlist)) 1368 return; 1369 1370 hlist_for_each_entry(p, &br->router_list, rlist) { 1371 if ((unsigned long) port >= (unsigned long) p) 1372 break; 1373 slot = &p->rlist; 1374 } 1375 1376 if (slot) 1377 hlist_add_behind_rcu(&port->rlist, slot); 1378 else 1379 hlist_add_head_rcu(&port->rlist, &br->router_list); 1380 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1381 br_port_mc_router_state_change(port, true); 1382 } 1383 1384 static void br_multicast_mark_router(struct net_bridge *br, 1385 struct net_bridge_port *port) 1386 { 1387 unsigned long now = jiffies; 1388 1389 if (!port) { 1390 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 1391 if (!timer_pending(&br->multicast_router_timer)) 1392 br_mc_router_state_change(br, true); 1393 mod_timer(&br->multicast_router_timer, 1394 now + br->multicast_querier_interval); 1395 } 1396 return; 1397 } 1398 1399 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1400 port->multicast_router == MDB_RTR_TYPE_PERM) 1401 return; 1402 1403 br_multicast_add_router(br, port); 1404 1405 mod_timer(&port->multicast_router_timer, 1406 now + br->multicast_querier_interval); 1407 } 1408 1409 static void br_multicast_query_received(struct net_bridge *br, 1410 struct net_bridge_port *port, 1411 struct bridge_mcast_other_query *query, 1412 struct br_ip *saddr, 1413 unsigned long max_delay) 1414 { 1415 if (!br_multicast_select_querier(br, port, saddr)) 1416 return; 1417 1418 br_multicast_update_query_timer(br, query, max_delay); 1419 br_multicast_mark_router(br, port); 1420 } 1421 1422 static int br_ip4_multicast_query(struct net_bridge *br, 1423 struct net_bridge_port *port, 1424 struct sk_buff *skb, 1425 u16 vid) 1426 { 1427 const struct iphdr *iph = ip_hdr(skb); 1428 struct igmphdr *ih = igmp_hdr(skb); 1429 struct net_bridge_mdb_entry *mp; 1430 struct igmpv3_query *ih3; 1431 struct net_bridge_port_group *p; 1432 struct net_bridge_port_group __rcu **pp; 1433 struct br_ip saddr; 1434 unsigned long max_delay; 1435 unsigned long now = jiffies; 1436 unsigned int offset = skb_transport_offset(skb); 1437 __be32 group; 1438 int err = 0; 1439 1440 spin_lock(&br->multicast_lock); 1441 if (!netif_running(br->dev) || 1442 (port && port->state == BR_STATE_DISABLED)) 1443 goto out; 1444 1445 group = ih->group; 1446 1447 if (skb->len == offset + sizeof(*ih)) { 1448 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1449 1450 if (!max_delay) { 1451 max_delay = 10 * HZ; 1452 group = 0; 1453 } 1454 } else if (skb->len >= offset + sizeof(*ih3)) { 1455 ih3 = igmpv3_query_hdr(skb); 1456 if (ih3->nsrcs) 1457 goto out; 1458 1459 max_delay = ih3->code ? 1460 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1461 } else { 1462 goto out; 1463 } 1464 1465 if (!group) { 1466 saddr.proto = htons(ETH_P_IP); 1467 saddr.u.ip4 = iph->saddr; 1468 1469 br_multicast_query_received(br, port, &br->ip4_other_query, 1470 &saddr, max_delay); 1471 goto out; 1472 } 1473 1474 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1475 if (!mp) 1476 goto out; 1477 1478 max_delay *= br->multicast_last_member_count; 1479 1480 if (mp->mglist && 1481 (timer_pending(&mp->timer) ? 1482 time_after(mp->timer.expires, now + max_delay) : 1483 try_to_del_timer_sync(&mp->timer) >= 0)) 1484 mod_timer(&mp->timer, now + max_delay); 1485 1486 for (pp = &mp->ports; 1487 (p = mlock_dereference(*pp, br)) != NULL; 1488 pp = &p->next) { 1489 if (timer_pending(&p->timer) ? 1490 time_after(p->timer.expires, now + max_delay) : 1491 try_to_del_timer_sync(&p->timer) >= 0) 1492 mod_timer(&p->timer, now + max_delay); 1493 } 1494 1495 out: 1496 spin_unlock(&br->multicast_lock); 1497 return err; 1498 } 1499 1500 #if IS_ENABLED(CONFIG_IPV6) 1501 static int br_ip6_multicast_query(struct net_bridge *br, 1502 struct net_bridge_port *port, 1503 struct sk_buff *skb, 1504 u16 vid) 1505 { 1506 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1507 struct mld_msg *mld; 1508 struct net_bridge_mdb_entry *mp; 1509 struct mld2_query *mld2q; 1510 struct net_bridge_port_group *p; 1511 struct net_bridge_port_group __rcu **pp; 1512 struct br_ip saddr; 1513 unsigned long max_delay; 1514 unsigned long now = jiffies; 1515 unsigned int offset = skb_transport_offset(skb); 1516 const struct in6_addr *group = NULL; 1517 bool is_general_query; 1518 int err = 0; 1519 1520 spin_lock(&br->multicast_lock); 1521 if (!netif_running(br->dev) || 1522 (port && port->state == BR_STATE_DISABLED)) 1523 goto out; 1524 1525 if (skb->len == offset + sizeof(*mld)) { 1526 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1527 err = -EINVAL; 1528 goto out; 1529 } 1530 mld = (struct mld_msg *) icmp6_hdr(skb); 1531 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1532 if (max_delay) 1533 group = &mld->mld_mca; 1534 } else { 1535 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1536 err = -EINVAL; 1537 goto out; 1538 } 1539 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1540 if (!mld2q->mld2q_nsrcs) 1541 group = &mld2q->mld2q_mca; 1542 1543 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1544 } 1545 1546 is_general_query = group && ipv6_addr_any(group); 1547 1548 if (is_general_query) { 1549 saddr.proto = htons(ETH_P_IPV6); 1550 saddr.u.ip6 = ip6h->saddr; 1551 1552 br_multicast_query_received(br, port, &br->ip6_other_query, 1553 &saddr, max_delay); 1554 goto out; 1555 } else if (!group) { 1556 goto out; 1557 } 1558 1559 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1560 if (!mp) 1561 goto out; 1562 1563 max_delay *= br->multicast_last_member_count; 1564 if (mp->mglist && 1565 (timer_pending(&mp->timer) ? 1566 time_after(mp->timer.expires, now + max_delay) : 1567 try_to_del_timer_sync(&mp->timer) >= 0)) 1568 mod_timer(&mp->timer, now + max_delay); 1569 1570 for (pp = &mp->ports; 1571 (p = mlock_dereference(*pp, br)) != NULL; 1572 pp = &p->next) { 1573 if (timer_pending(&p->timer) ? 1574 time_after(p->timer.expires, now + max_delay) : 1575 try_to_del_timer_sync(&p->timer) >= 0) 1576 mod_timer(&p->timer, now + max_delay); 1577 } 1578 1579 out: 1580 spin_unlock(&br->multicast_lock); 1581 return err; 1582 } 1583 #endif 1584 1585 static void 1586 br_multicast_leave_group(struct net_bridge *br, 1587 struct net_bridge_port *port, 1588 struct br_ip *group, 1589 struct bridge_mcast_other_query *other_query, 1590 struct bridge_mcast_own_query *own_query, 1591 const unsigned char *src) 1592 { 1593 struct net_bridge_mdb_htable *mdb; 1594 struct net_bridge_mdb_entry *mp; 1595 struct net_bridge_port_group *p; 1596 unsigned long now; 1597 unsigned long time; 1598 1599 spin_lock(&br->multicast_lock); 1600 if (!netif_running(br->dev) || 1601 (port && port->state == BR_STATE_DISABLED)) 1602 goto out; 1603 1604 mdb = mlock_dereference(br->mdb, br); 1605 mp = br_mdb_ip_get(mdb, group); 1606 if (!mp) 1607 goto out; 1608 1609 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1610 struct net_bridge_port_group __rcu **pp; 1611 1612 for (pp = &mp->ports; 1613 (p = mlock_dereference(*pp, br)) != NULL; 1614 pp = &p->next) { 1615 if (!br_port_group_equal(p, port, src)) 1616 continue; 1617 1618 rcu_assign_pointer(*pp, p->next); 1619 hlist_del_init(&p->mglist); 1620 del_timer(&p->timer); 1621 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1622 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1623 p->flags); 1624 1625 if (!mp->ports && !mp->mglist && 1626 netif_running(br->dev)) 1627 mod_timer(&mp->timer, jiffies); 1628 } 1629 goto out; 1630 } 1631 1632 if (timer_pending(&other_query->timer)) 1633 goto out; 1634 1635 if (br->multicast_querier) { 1636 __br_multicast_send_query(br, port, &mp->addr); 1637 1638 time = jiffies + br->multicast_last_member_count * 1639 br->multicast_last_member_interval; 1640 1641 mod_timer(&own_query->timer, time); 1642 1643 for (p = mlock_dereference(mp->ports, br); 1644 p != NULL; 1645 p = mlock_dereference(p->next, br)) { 1646 if (!br_port_group_equal(p, port, src)) 1647 continue; 1648 1649 if (!hlist_unhashed(&p->mglist) && 1650 (timer_pending(&p->timer) ? 1651 time_after(p->timer.expires, time) : 1652 try_to_del_timer_sync(&p->timer) >= 0)) { 1653 mod_timer(&p->timer, time); 1654 } 1655 1656 break; 1657 } 1658 } 1659 1660 now = jiffies; 1661 time = now + br->multicast_last_member_count * 1662 br->multicast_last_member_interval; 1663 1664 if (!port) { 1665 if (mp->mglist && 1666 (timer_pending(&mp->timer) ? 1667 time_after(mp->timer.expires, time) : 1668 try_to_del_timer_sync(&mp->timer) >= 0)) { 1669 mod_timer(&mp->timer, time); 1670 } 1671 1672 goto out; 1673 } 1674 1675 for (p = mlock_dereference(mp->ports, br); 1676 p != NULL; 1677 p = mlock_dereference(p->next, br)) { 1678 if (p->port != port) 1679 continue; 1680 1681 if (!hlist_unhashed(&p->mglist) && 1682 (timer_pending(&p->timer) ? 1683 time_after(p->timer.expires, time) : 1684 try_to_del_timer_sync(&p->timer) >= 0)) { 1685 mod_timer(&p->timer, time); 1686 } 1687 1688 break; 1689 } 1690 out: 1691 spin_unlock(&br->multicast_lock); 1692 } 1693 1694 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1695 struct net_bridge_port *port, 1696 __be32 group, 1697 __u16 vid, 1698 const unsigned char *src) 1699 { 1700 struct br_ip br_group; 1701 struct bridge_mcast_own_query *own_query; 1702 1703 if (ipv4_is_local_multicast(group)) 1704 return; 1705 1706 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1707 1708 br_group.u.ip4 = group; 1709 br_group.proto = htons(ETH_P_IP); 1710 br_group.vid = vid; 1711 1712 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1713 own_query, src); 1714 } 1715 1716 #if IS_ENABLED(CONFIG_IPV6) 1717 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1718 struct net_bridge_port *port, 1719 const struct in6_addr *group, 1720 __u16 vid, 1721 const unsigned char *src) 1722 { 1723 struct br_ip br_group; 1724 struct bridge_mcast_own_query *own_query; 1725 1726 if (ipv6_addr_is_ll_all_nodes(group)) 1727 return; 1728 1729 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1730 1731 br_group.u.ip6 = *group; 1732 br_group.proto = htons(ETH_P_IPV6); 1733 br_group.vid = vid; 1734 1735 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1736 own_query, src); 1737 } 1738 #endif 1739 1740 static void br_multicast_err_count(const struct net_bridge *br, 1741 const struct net_bridge_port *p, 1742 __be16 proto) 1743 { 1744 struct bridge_mcast_stats __percpu *stats; 1745 struct bridge_mcast_stats *pstats; 1746 1747 if (!br->multicast_stats_enabled) 1748 return; 1749 1750 if (p) 1751 stats = p->mcast_stats; 1752 else 1753 stats = br->mcast_stats; 1754 if (WARN_ON(!stats)) 1755 return; 1756 1757 pstats = this_cpu_ptr(stats); 1758 1759 u64_stats_update_begin(&pstats->syncp); 1760 switch (proto) { 1761 case htons(ETH_P_IP): 1762 pstats->mstats.igmp_parse_errors++; 1763 break; 1764 #if IS_ENABLED(CONFIG_IPV6) 1765 case htons(ETH_P_IPV6): 1766 pstats->mstats.mld_parse_errors++; 1767 break; 1768 #endif 1769 } 1770 u64_stats_update_end(&pstats->syncp); 1771 } 1772 1773 static void br_multicast_pim(struct net_bridge *br, 1774 struct net_bridge_port *port, 1775 const struct sk_buff *skb) 1776 { 1777 unsigned int offset = skb_transport_offset(skb); 1778 struct pimhdr *pimhdr, _pimhdr; 1779 1780 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 1781 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 1782 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 1783 return; 1784 1785 br_multicast_mark_router(br, port); 1786 } 1787 1788 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1789 struct net_bridge_port *port, 1790 struct sk_buff *skb, 1791 u16 vid) 1792 { 1793 struct sk_buff *skb_trimmed = NULL; 1794 const unsigned char *src; 1795 struct igmphdr *ih; 1796 int err; 1797 1798 err = ip_mc_check_igmp(skb, &skb_trimmed); 1799 1800 if (err == -ENOMSG) { 1801 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 1802 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1803 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 1804 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 1805 br_multicast_pim(br, port, skb); 1806 } 1807 return 0; 1808 } else if (err < 0) { 1809 br_multicast_err_count(br, port, skb->protocol); 1810 return err; 1811 } 1812 1813 ih = igmp_hdr(skb); 1814 src = eth_hdr(skb)->h_source; 1815 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 1816 1817 switch (ih->type) { 1818 case IGMP_HOST_MEMBERSHIP_REPORT: 1819 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1820 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1821 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src); 1822 break; 1823 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1824 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1825 break; 1826 case IGMP_HOST_MEMBERSHIP_QUERY: 1827 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1828 break; 1829 case IGMP_HOST_LEAVE_MESSAGE: 1830 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 1831 break; 1832 } 1833 1834 if (skb_trimmed && skb_trimmed != skb) 1835 kfree_skb(skb_trimmed); 1836 1837 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1838 BR_MCAST_DIR_RX); 1839 1840 return err; 1841 } 1842 1843 #if IS_ENABLED(CONFIG_IPV6) 1844 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1845 struct net_bridge_port *port, 1846 struct sk_buff *skb, 1847 u16 vid) 1848 { 1849 struct sk_buff *skb_trimmed = NULL; 1850 const unsigned char *src; 1851 struct mld_msg *mld; 1852 int err; 1853 1854 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1855 1856 if (err == -ENOMSG) { 1857 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1858 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1859 return 0; 1860 } else if (err < 0) { 1861 br_multicast_err_count(br, port, skb->protocol); 1862 return err; 1863 } 1864 1865 mld = (struct mld_msg *)skb_transport_header(skb); 1866 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 1867 1868 switch (mld->mld_type) { 1869 case ICMPV6_MGM_REPORT: 1870 src = eth_hdr(skb)->h_source; 1871 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1872 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 1873 src); 1874 break; 1875 case ICMPV6_MLD2_REPORT: 1876 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1877 break; 1878 case ICMPV6_MGM_QUERY: 1879 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1880 break; 1881 case ICMPV6_MGM_REDUCTION: 1882 src = eth_hdr(skb)->h_source; 1883 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 1884 break; 1885 } 1886 1887 if (skb_trimmed && skb_trimmed != skb) 1888 kfree_skb(skb_trimmed); 1889 1890 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1891 BR_MCAST_DIR_RX); 1892 1893 return err; 1894 } 1895 #endif 1896 1897 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1898 struct sk_buff *skb, u16 vid) 1899 { 1900 int ret = 0; 1901 1902 BR_INPUT_SKB_CB(skb)->igmp = 0; 1903 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1904 1905 if (br->multicast_disabled) 1906 return 0; 1907 1908 switch (skb->protocol) { 1909 case htons(ETH_P_IP): 1910 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 1911 break; 1912 #if IS_ENABLED(CONFIG_IPV6) 1913 case htons(ETH_P_IPV6): 1914 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 1915 break; 1916 #endif 1917 } 1918 1919 return ret; 1920 } 1921 1922 static void br_multicast_query_expired(struct net_bridge *br, 1923 struct bridge_mcast_own_query *query, 1924 struct bridge_mcast_querier *querier) 1925 { 1926 spin_lock(&br->multicast_lock); 1927 if (query->startup_sent < br->multicast_startup_query_count) 1928 query->startup_sent++; 1929 1930 RCU_INIT_POINTER(querier->port, NULL); 1931 br_multicast_send_query(br, NULL, query); 1932 spin_unlock(&br->multicast_lock); 1933 } 1934 1935 static void br_ip4_multicast_query_expired(struct timer_list *t) 1936 { 1937 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer); 1938 1939 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1940 } 1941 1942 #if IS_ENABLED(CONFIG_IPV6) 1943 static void br_ip6_multicast_query_expired(struct timer_list *t) 1944 { 1945 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer); 1946 1947 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1948 } 1949 #endif 1950 1951 void br_multicast_init(struct net_bridge *br) 1952 { 1953 br->hash_elasticity = 4; 1954 br->hash_max = 512; 1955 1956 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1957 br->multicast_querier = 0; 1958 br->multicast_query_use_ifaddr = 0; 1959 br->multicast_last_member_count = 2; 1960 br->multicast_startup_query_count = 2; 1961 1962 br->multicast_last_member_interval = HZ; 1963 br->multicast_query_response_interval = 10 * HZ; 1964 br->multicast_startup_query_interval = 125 * HZ / 4; 1965 br->multicast_query_interval = 125 * HZ; 1966 br->multicast_querier_interval = 255 * HZ; 1967 br->multicast_membership_interval = 260 * HZ; 1968 1969 br->ip4_other_query.delay_time = 0; 1970 br->ip4_querier.port = NULL; 1971 br->multicast_igmp_version = 2; 1972 #if IS_ENABLED(CONFIG_IPV6) 1973 br->multicast_mld_version = 1; 1974 br->ip6_other_query.delay_time = 0; 1975 br->ip6_querier.port = NULL; 1976 #endif 1977 br->has_ipv6_addr = 1; 1978 1979 spin_lock_init(&br->multicast_lock); 1980 timer_setup(&br->multicast_router_timer, 1981 br_multicast_local_router_expired, 0); 1982 timer_setup(&br->ip4_other_query.timer, 1983 br_ip4_multicast_querier_expired, 0); 1984 timer_setup(&br->ip4_own_query.timer, 1985 br_ip4_multicast_query_expired, 0); 1986 #if IS_ENABLED(CONFIG_IPV6) 1987 timer_setup(&br->ip6_other_query.timer, 1988 br_ip6_multicast_querier_expired, 0); 1989 timer_setup(&br->ip6_own_query.timer, 1990 br_ip6_multicast_query_expired, 0); 1991 #endif 1992 } 1993 1994 static void __br_multicast_open(struct net_bridge *br, 1995 struct bridge_mcast_own_query *query) 1996 { 1997 query->startup_sent = 0; 1998 1999 if (br->multicast_disabled) 2000 return; 2001 2002 mod_timer(&query->timer, jiffies); 2003 } 2004 2005 void br_multicast_open(struct net_bridge *br) 2006 { 2007 __br_multicast_open(br, &br->ip4_own_query); 2008 #if IS_ENABLED(CONFIG_IPV6) 2009 __br_multicast_open(br, &br->ip6_own_query); 2010 #endif 2011 } 2012 2013 void br_multicast_stop(struct net_bridge *br) 2014 { 2015 del_timer_sync(&br->multicast_router_timer); 2016 del_timer_sync(&br->ip4_other_query.timer); 2017 del_timer_sync(&br->ip4_own_query.timer); 2018 #if IS_ENABLED(CONFIG_IPV6) 2019 del_timer_sync(&br->ip6_other_query.timer); 2020 del_timer_sync(&br->ip6_own_query.timer); 2021 #endif 2022 } 2023 2024 void br_multicast_dev_del(struct net_bridge *br) 2025 { 2026 struct net_bridge_mdb_htable *mdb; 2027 struct net_bridge_mdb_entry *mp; 2028 struct hlist_node *n; 2029 u32 ver; 2030 int i; 2031 2032 spin_lock_bh(&br->multicast_lock); 2033 mdb = mlock_dereference(br->mdb, br); 2034 if (!mdb) 2035 goto out; 2036 2037 br->mdb = NULL; 2038 2039 ver = mdb->ver; 2040 for (i = 0; i < mdb->max; i++) { 2041 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 2042 hlist[ver]) { 2043 del_timer(&mp->timer); 2044 call_rcu_bh(&mp->rcu, br_multicast_free_group); 2045 } 2046 } 2047 2048 if (mdb->old) { 2049 spin_unlock_bh(&br->multicast_lock); 2050 rcu_barrier_bh(); 2051 spin_lock_bh(&br->multicast_lock); 2052 WARN_ON(mdb->old); 2053 } 2054 2055 mdb->old = mdb; 2056 call_rcu_bh(&mdb->rcu, br_mdb_free); 2057 2058 out: 2059 spin_unlock_bh(&br->multicast_lock); 2060 } 2061 2062 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2063 { 2064 int err = -EINVAL; 2065 2066 spin_lock_bh(&br->multicast_lock); 2067 2068 switch (val) { 2069 case MDB_RTR_TYPE_DISABLED: 2070 case MDB_RTR_TYPE_PERM: 2071 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 2072 del_timer(&br->multicast_router_timer); 2073 br->multicast_router = val; 2074 err = 0; 2075 break; 2076 case MDB_RTR_TYPE_TEMP_QUERY: 2077 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 2078 br_mc_router_state_change(br, false); 2079 br->multicast_router = val; 2080 err = 0; 2081 break; 2082 } 2083 2084 spin_unlock_bh(&br->multicast_lock); 2085 2086 return err; 2087 } 2088 2089 static void __del_port_router(struct net_bridge_port *p) 2090 { 2091 if (hlist_unhashed(&p->rlist)) 2092 return; 2093 hlist_del_init_rcu(&p->rlist); 2094 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 2095 br_port_mc_router_state_change(p, false); 2096 2097 /* don't allow timer refresh */ 2098 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2099 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2100 } 2101 2102 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 2103 { 2104 struct net_bridge *br = p->br; 2105 unsigned long now = jiffies; 2106 int err = -EINVAL; 2107 2108 spin_lock(&br->multicast_lock); 2109 if (p->multicast_router == val) { 2110 /* Refresh the temp router port timer */ 2111 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2112 mod_timer(&p->multicast_router_timer, 2113 now + br->multicast_querier_interval); 2114 err = 0; 2115 goto unlock; 2116 } 2117 switch (val) { 2118 case MDB_RTR_TYPE_DISABLED: 2119 p->multicast_router = MDB_RTR_TYPE_DISABLED; 2120 __del_port_router(p); 2121 del_timer(&p->multicast_router_timer); 2122 break; 2123 case MDB_RTR_TYPE_TEMP_QUERY: 2124 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2125 __del_port_router(p); 2126 break; 2127 case MDB_RTR_TYPE_PERM: 2128 p->multicast_router = MDB_RTR_TYPE_PERM; 2129 del_timer(&p->multicast_router_timer); 2130 br_multicast_add_router(br, p); 2131 break; 2132 case MDB_RTR_TYPE_TEMP: 2133 p->multicast_router = MDB_RTR_TYPE_TEMP; 2134 br_multicast_mark_router(br, p); 2135 break; 2136 default: 2137 goto unlock; 2138 } 2139 err = 0; 2140 unlock: 2141 spin_unlock(&br->multicast_lock); 2142 2143 return err; 2144 } 2145 2146 static void br_multicast_start_querier(struct net_bridge *br, 2147 struct bridge_mcast_own_query *query) 2148 { 2149 struct net_bridge_port *port; 2150 2151 __br_multicast_open(br, query); 2152 2153 list_for_each_entry(port, &br->port_list, list) { 2154 if (port->state == BR_STATE_DISABLED || 2155 port->state == BR_STATE_BLOCKING) 2156 continue; 2157 2158 if (query == &br->ip4_own_query) 2159 br_multicast_enable(&port->ip4_own_query); 2160 #if IS_ENABLED(CONFIG_IPV6) 2161 else 2162 br_multicast_enable(&port->ip6_own_query); 2163 #endif 2164 } 2165 } 2166 2167 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2168 { 2169 struct net_bridge_mdb_htable *mdb; 2170 struct net_bridge_port *port; 2171 int err = 0; 2172 2173 spin_lock_bh(&br->multicast_lock); 2174 if (br->multicast_disabled == !val) 2175 goto unlock; 2176 2177 br_mc_disabled_update(br->dev, !val); 2178 br->multicast_disabled = !val; 2179 if (br->multicast_disabled) 2180 goto unlock; 2181 2182 if (!netif_running(br->dev)) 2183 goto unlock; 2184 2185 mdb = mlock_dereference(br->mdb, br); 2186 if (mdb) { 2187 if (mdb->old) { 2188 err = -EEXIST; 2189 rollback: 2190 br->multicast_disabled = !!val; 2191 goto unlock; 2192 } 2193 2194 err = br_mdb_rehash(&br->mdb, mdb->max, 2195 br->hash_elasticity); 2196 if (err) 2197 goto rollback; 2198 } 2199 2200 br_multicast_open(br); 2201 list_for_each_entry(port, &br->port_list, list) 2202 __br_multicast_enable_port(port); 2203 2204 unlock: 2205 spin_unlock_bh(&br->multicast_lock); 2206 2207 return err; 2208 } 2209 2210 bool br_multicast_enabled(const struct net_device *dev) 2211 { 2212 struct net_bridge *br = netdev_priv(dev); 2213 2214 return !br->multicast_disabled; 2215 } 2216 EXPORT_SYMBOL_GPL(br_multicast_enabled); 2217 2218 bool br_multicast_router(const struct net_device *dev) 2219 { 2220 struct net_bridge *br = netdev_priv(dev); 2221 bool is_router; 2222 2223 spin_lock_bh(&br->multicast_lock); 2224 is_router = br_multicast_is_router(br); 2225 spin_unlock_bh(&br->multicast_lock); 2226 return is_router; 2227 } 2228 EXPORT_SYMBOL_GPL(br_multicast_router); 2229 2230 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2231 { 2232 unsigned long max_delay; 2233 2234 val = !!val; 2235 2236 spin_lock_bh(&br->multicast_lock); 2237 if (br->multicast_querier == val) 2238 goto unlock; 2239 2240 br->multicast_querier = val; 2241 if (!val) 2242 goto unlock; 2243 2244 max_delay = br->multicast_query_response_interval; 2245 2246 if (!timer_pending(&br->ip4_other_query.timer)) 2247 br->ip4_other_query.delay_time = jiffies + max_delay; 2248 2249 br_multicast_start_querier(br, &br->ip4_own_query); 2250 2251 #if IS_ENABLED(CONFIG_IPV6) 2252 if (!timer_pending(&br->ip6_other_query.timer)) 2253 br->ip6_other_query.delay_time = jiffies + max_delay; 2254 2255 br_multicast_start_querier(br, &br->ip6_own_query); 2256 #endif 2257 2258 unlock: 2259 spin_unlock_bh(&br->multicast_lock); 2260 2261 return 0; 2262 } 2263 2264 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2265 { 2266 int err = -EINVAL; 2267 u32 old; 2268 struct net_bridge_mdb_htable *mdb; 2269 2270 spin_lock_bh(&br->multicast_lock); 2271 if (!is_power_of_2(val)) 2272 goto unlock; 2273 2274 mdb = mlock_dereference(br->mdb, br); 2275 if (mdb && val < mdb->size) 2276 goto unlock; 2277 2278 err = 0; 2279 2280 old = br->hash_max; 2281 br->hash_max = val; 2282 2283 if (mdb) { 2284 if (mdb->old) { 2285 err = -EEXIST; 2286 rollback: 2287 br->hash_max = old; 2288 goto unlock; 2289 } 2290 2291 err = br_mdb_rehash(&br->mdb, br->hash_max, 2292 br->hash_elasticity); 2293 if (err) 2294 goto rollback; 2295 } 2296 2297 unlock: 2298 spin_unlock_bh(&br->multicast_lock); 2299 2300 return err; 2301 } 2302 2303 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 2304 { 2305 /* Currently we support only version 2 and 3 */ 2306 switch (val) { 2307 case 2: 2308 case 3: 2309 break; 2310 default: 2311 return -EINVAL; 2312 } 2313 2314 spin_lock_bh(&br->multicast_lock); 2315 br->multicast_igmp_version = val; 2316 spin_unlock_bh(&br->multicast_lock); 2317 2318 return 0; 2319 } 2320 2321 #if IS_ENABLED(CONFIG_IPV6) 2322 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 2323 { 2324 /* Currently we support version 1 and 2 */ 2325 switch (val) { 2326 case 1: 2327 case 2: 2328 break; 2329 default: 2330 return -EINVAL; 2331 } 2332 2333 spin_lock_bh(&br->multicast_lock); 2334 br->multicast_mld_version = val; 2335 spin_unlock_bh(&br->multicast_lock); 2336 2337 return 0; 2338 } 2339 #endif 2340 2341 /** 2342 * br_multicast_list_adjacent - Returns snooped multicast addresses 2343 * @dev: The bridge port adjacent to which to retrieve addresses 2344 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2345 * 2346 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2347 * snooping feature on all bridge ports of dev's bridge device, excluding 2348 * the addresses from dev itself. 2349 * 2350 * Returns the number of items added to br_ip_list. 2351 * 2352 * Notes: 2353 * - br_ip_list needs to be initialized by caller 2354 * - br_ip_list might contain duplicates in the end 2355 * (needs to be taken care of by caller) 2356 * - br_ip_list needs to be freed by caller 2357 */ 2358 int br_multicast_list_adjacent(struct net_device *dev, 2359 struct list_head *br_ip_list) 2360 { 2361 struct net_bridge *br; 2362 struct net_bridge_port *port; 2363 struct net_bridge_port_group *group; 2364 struct br_ip_list *entry; 2365 int count = 0; 2366 2367 rcu_read_lock(); 2368 if (!br_ip_list || !br_port_exists(dev)) 2369 goto unlock; 2370 2371 port = br_port_get_rcu(dev); 2372 if (!port || !port->br) 2373 goto unlock; 2374 2375 br = port->br; 2376 2377 list_for_each_entry_rcu(port, &br->port_list, list) { 2378 if (!port->dev || port->dev == dev) 2379 continue; 2380 2381 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2382 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2383 if (!entry) 2384 goto unlock; 2385 2386 entry->addr = group->addr; 2387 list_add(&entry->list, br_ip_list); 2388 count++; 2389 } 2390 } 2391 2392 unlock: 2393 rcu_read_unlock(); 2394 return count; 2395 } 2396 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2397 2398 /** 2399 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2400 * @dev: The bridge port providing the bridge on which to check for a querier 2401 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2402 * 2403 * Checks whether the given interface has a bridge on top and if so returns 2404 * true if a valid querier exists anywhere on the bridged link layer. 2405 * Otherwise returns false. 2406 */ 2407 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2408 { 2409 struct net_bridge *br; 2410 struct net_bridge_port *port; 2411 struct ethhdr eth; 2412 bool ret = false; 2413 2414 rcu_read_lock(); 2415 if (!br_port_exists(dev)) 2416 goto unlock; 2417 2418 port = br_port_get_rcu(dev); 2419 if (!port || !port->br) 2420 goto unlock; 2421 2422 br = port->br; 2423 2424 memset(ð, 0, sizeof(eth)); 2425 eth.h_proto = htons(proto); 2426 2427 ret = br_multicast_querier_exists(br, ð); 2428 2429 unlock: 2430 rcu_read_unlock(); 2431 return ret; 2432 } 2433 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2434 2435 /** 2436 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2437 * @dev: The bridge port adjacent to which to check for a querier 2438 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2439 * 2440 * Checks whether the given interface has a bridge on top and if so returns 2441 * true if a selected querier is behind one of the other ports of this 2442 * bridge. Otherwise returns false. 2443 */ 2444 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2445 { 2446 struct net_bridge *br; 2447 struct net_bridge_port *port; 2448 bool ret = false; 2449 2450 rcu_read_lock(); 2451 if (!br_port_exists(dev)) 2452 goto unlock; 2453 2454 port = br_port_get_rcu(dev); 2455 if (!port || !port->br) 2456 goto unlock; 2457 2458 br = port->br; 2459 2460 switch (proto) { 2461 case ETH_P_IP: 2462 if (!timer_pending(&br->ip4_other_query.timer) || 2463 rcu_dereference(br->ip4_querier.port) == port) 2464 goto unlock; 2465 break; 2466 #if IS_ENABLED(CONFIG_IPV6) 2467 case ETH_P_IPV6: 2468 if (!timer_pending(&br->ip6_other_query.timer) || 2469 rcu_dereference(br->ip6_querier.port) == port) 2470 goto unlock; 2471 break; 2472 #endif 2473 default: 2474 goto unlock; 2475 } 2476 2477 ret = true; 2478 unlock: 2479 rcu_read_unlock(); 2480 return ret; 2481 } 2482 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2483 2484 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 2485 const struct sk_buff *skb, u8 type, u8 dir) 2486 { 2487 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 2488 __be16 proto = skb->protocol; 2489 unsigned int t_len; 2490 2491 u64_stats_update_begin(&pstats->syncp); 2492 switch (proto) { 2493 case htons(ETH_P_IP): 2494 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 2495 switch (type) { 2496 case IGMP_HOST_MEMBERSHIP_REPORT: 2497 pstats->mstats.igmp_v1reports[dir]++; 2498 break; 2499 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2500 pstats->mstats.igmp_v2reports[dir]++; 2501 break; 2502 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2503 pstats->mstats.igmp_v3reports[dir]++; 2504 break; 2505 case IGMP_HOST_MEMBERSHIP_QUERY: 2506 if (t_len != sizeof(struct igmphdr)) { 2507 pstats->mstats.igmp_v3queries[dir]++; 2508 } else { 2509 unsigned int offset = skb_transport_offset(skb); 2510 struct igmphdr *ih, _ihdr; 2511 2512 ih = skb_header_pointer(skb, offset, 2513 sizeof(_ihdr), &_ihdr); 2514 if (!ih) 2515 break; 2516 if (!ih->code) 2517 pstats->mstats.igmp_v1queries[dir]++; 2518 else 2519 pstats->mstats.igmp_v2queries[dir]++; 2520 } 2521 break; 2522 case IGMP_HOST_LEAVE_MESSAGE: 2523 pstats->mstats.igmp_leaves[dir]++; 2524 break; 2525 } 2526 break; 2527 #if IS_ENABLED(CONFIG_IPV6) 2528 case htons(ETH_P_IPV6): 2529 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 2530 sizeof(struct ipv6hdr); 2531 t_len -= skb_network_header_len(skb); 2532 switch (type) { 2533 case ICMPV6_MGM_REPORT: 2534 pstats->mstats.mld_v1reports[dir]++; 2535 break; 2536 case ICMPV6_MLD2_REPORT: 2537 pstats->mstats.mld_v2reports[dir]++; 2538 break; 2539 case ICMPV6_MGM_QUERY: 2540 if (t_len != sizeof(struct mld_msg)) 2541 pstats->mstats.mld_v2queries[dir]++; 2542 else 2543 pstats->mstats.mld_v1queries[dir]++; 2544 break; 2545 case ICMPV6_MGM_REDUCTION: 2546 pstats->mstats.mld_leaves[dir]++; 2547 break; 2548 } 2549 break; 2550 #endif /* CONFIG_IPV6 */ 2551 } 2552 u64_stats_update_end(&pstats->syncp); 2553 } 2554 2555 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 2556 const struct sk_buff *skb, u8 type, u8 dir) 2557 { 2558 struct bridge_mcast_stats __percpu *stats; 2559 2560 /* if multicast_disabled is true then igmp type can't be set */ 2561 if (!type || !br->multicast_stats_enabled) 2562 return; 2563 2564 if (p) 2565 stats = p->mcast_stats; 2566 else 2567 stats = br->mcast_stats; 2568 if (WARN_ON(!stats)) 2569 return; 2570 2571 br_mcast_stats_add(stats, skb, type, dir); 2572 } 2573 2574 int br_multicast_init_stats(struct net_bridge *br) 2575 { 2576 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2577 if (!br->mcast_stats) 2578 return -ENOMEM; 2579 2580 return 0; 2581 } 2582 2583 void br_multicast_uninit_stats(struct net_bridge *br) 2584 { 2585 free_percpu(br->mcast_stats); 2586 } 2587 2588 static void mcast_stats_add_dir(u64 *dst, u64 *src) 2589 { 2590 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2591 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 2592 } 2593 2594 void br_multicast_get_stats(const struct net_bridge *br, 2595 const struct net_bridge_port *p, 2596 struct br_mcast_stats *dest) 2597 { 2598 struct bridge_mcast_stats __percpu *stats; 2599 struct br_mcast_stats tdst; 2600 int i; 2601 2602 memset(dest, 0, sizeof(*dest)); 2603 if (p) 2604 stats = p->mcast_stats; 2605 else 2606 stats = br->mcast_stats; 2607 if (WARN_ON(!stats)) 2608 return; 2609 2610 memset(&tdst, 0, sizeof(tdst)); 2611 for_each_possible_cpu(i) { 2612 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 2613 struct br_mcast_stats temp; 2614 unsigned int start; 2615 2616 do { 2617 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2618 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 2619 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2620 2621 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 2622 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 2623 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 2624 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 2625 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 2626 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 2627 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 2628 tdst.igmp_parse_errors += temp.igmp_parse_errors; 2629 2630 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 2631 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 2632 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 2633 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 2634 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 2635 tdst.mld_parse_errors += temp.mld_parse_errors; 2636 } 2637 memcpy(dest, &tdst, sizeof(*dest)); 2638 } 2639