1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 #include "br_private_mcast_eht.h" 37 38 static const struct rhashtable_params br_mdb_rht_params = { 39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 41 .key_len = sizeof(struct br_ip), 42 .automatic_shrinking = true, 43 }; 44 45 static const struct rhashtable_params br_sg_port_rht_params = { 46 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 47 .key_offset = offsetof(struct net_bridge_port_group, key), 48 .key_len = sizeof(struct net_bridge_port_group_sg_key), 49 .automatic_shrinking = true, 50 }; 51 52 static void br_multicast_start_querier(struct net_bridge *br, 53 struct bridge_mcast_own_query *query); 54 static void br_multicast_add_router(struct net_bridge *br, 55 struct net_bridge_port *port); 56 static void br_ip4_multicast_leave_group(struct net_bridge *br, 57 struct net_bridge_port *port, 58 __be32 group, 59 __u16 vid, 60 const unsigned char *src); 61 static void br_multicast_port_group_rexmit(struct timer_list *t); 62 63 static void __del_port_router(struct net_bridge_port *p); 64 #if IS_ENABLED(CONFIG_IPV6) 65 static void br_ip6_multicast_leave_group(struct net_bridge *br, 66 struct net_bridge_port *port, 67 const struct in6_addr *group, 68 __u16 vid, const unsigned char *src); 69 #endif 70 static struct net_bridge_port_group * 71 __br_multicast_add_group(struct net_bridge *br, 72 struct net_bridge_port *port, 73 struct br_ip *group, 74 const unsigned char *src, 75 u8 filter_mode, 76 bool igmpv2_mldv1, 77 bool blocked); 78 static void br_multicast_find_del_pg(struct net_bridge *br, 79 struct net_bridge_port_group *pg); 80 81 static struct net_bridge_port_group * 82 br_sg_port_find(struct net_bridge *br, 83 struct net_bridge_port_group_sg_key *sg_p) 84 { 85 lockdep_assert_held_once(&br->multicast_lock); 86 87 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 88 br_sg_port_rht_params); 89 } 90 91 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 92 struct br_ip *dst) 93 { 94 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 95 } 96 97 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 98 struct br_ip *dst) 99 { 100 struct net_bridge_mdb_entry *ent; 101 102 lockdep_assert_held_once(&br->multicast_lock); 103 104 rcu_read_lock(); 105 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 106 rcu_read_unlock(); 107 108 return ent; 109 } 110 111 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 112 __be32 dst, __u16 vid) 113 { 114 struct br_ip br_dst; 115 116 memset(&br_dst, 0, sizeof(br_dst)); 117 br_dst.dst.ip4 = dst; 118 br_dst.proto = htons(ETH_P_IP); 119 br_dst.vid = vid; 120 121 return br_mdb_ip_get(br, &br_dst); 122 } 123 124 #if IS_ENABLED(CONFIG_IPV6) 125 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 126 const struct in6_addr *dst, 127 __u16 vid) 128 { 129 struct br_ip br_dst; 130 131 memset(&br_dst, 0, sizeof(br_dst)); 132 br_dst.dst.ip6 = *dst; 133 br_dst.proto = htons(ETH_P_IPV6); 134 br_dst.vid = vid; 135 136 return br_mdb_ip_get(br, &br_dst); 137 } 138 #endif 139 140 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 141 struct sk_buff *skb, u16 vid) 142 { 143 struct br_ip ip; 144 145 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 146 return NULL; 147 148 if (BR_INPUT_SKB_CB(skb)->igmp) 149 return NULL; 150 151 memset(&ip, 0, sizeof(ip)); 152 ip.proto = skb->protocol; 153 ip.vid = vid; 154 155 switch (skb->protocol) { 156 case htons(ETH_P_IP): 157 ip.dst.ip4 = ip_hdr(skb)->daddr; 158 if (br->multicast_igmp_version == 3) { 159 struct net_bridge_mdb_entry *mdb; 160 161 ip.src.ip4 = ip_hdr(skb)->saddr; 162 mdb = br_mdb_ip_get_rcu(br, &ip); 163 if (mdb) 164 return mdb; 165 ip.src.ip4 = 0; 166 } 167 break; 168 #if IS_ENABLED(CONFIG_IPV6) 169 case htons(ETH_P_IPV6): 170 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 171 if (br->multicast_mld_version == 2) { 172 struct net_bridge_mdb_entry *mdb; 173 174 ip.src.ip6 = ipv6_hdr(skb)->saddr; 175 mdb = br_mdb_ip_get_rcu(br, &ip); 176 if (mdb) 177 return mdb; 178 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 179 } 180 break; 181 #endif 182 default: 183 ip.proto = 0; 184 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 185 } 186 187 return br_mdb_ip_get_rcu(br, &ip); 188 } 189 190 static bool br_port_group_equal(struct net_bridge_port_group *p, 191 struct net_bridge_port *port, 192 const unsigned char *src) 193 { 194 if (p->key.port != port) 195 return false; 196 197 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 198 return true; 199 200 return ether_addr_equal(src, p->eth_addr); 201 } 202 203 static void __fwd_add_star_excl(struct net_bridge_port_group *pg, 204 struct br_ip *sg_ip) 205 { 206 struct net_bridge_port_group_sg_key sg_key; 207 struct net_bridge *br = pg->key.port->br; 208 struct net_bridge_port_group *src_pg; 209 210 memset(&sg_key, 0, sizeof(sg_key)); 211 sg_key.port = pg->key.port; 212 sg_key.addr = *sg_ip; 213 if (br_sg_port_find(br, &sg_key)) 214 return; 215 216 src_pg = __br_multicast_add_group(br, pg->key.port, sg_ip, pg->eth_addr, 217 MCAST_INCLUDE, false, false); 218 if (IS_ERR_OR_NULL(src_pg) || 219 src_pg->rt_protocol != RTPROT_KERNEL) 220 return; 221 222 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 223 } 224 225 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 226 struct br_ip *sg_ip) 227 { 228 struct net_bridge_port_group_sg_key sg_key; 229 struct net_bridge *br = pg->key.port->br; 230 struct net_bridge_port_group *src_pg; 231 232 memset(&sg_key, 0, sizeof(sg_key)); 233 sg_key.port = pg->key.port; 234 sg_key.addr = *sg_ip; 235 src_pg = br_sg_port_find(br, &sg_key); 236 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 237 src_pg->rt_protocol != RTPROT_KERNEL) 238 return; 239 240 br_multicast_find_del_pg(br, src_pg); 241 } 242 243 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 244 * to all other ports' S,G entries which are not blocked by the current group 245 * for proper replication, the assumption is that any S,G blocked entries 246 * are already added so the S,G,port lookup should skip them. 247 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 248 * deleted we need to remove it from all ports' S,G entries where it was 249 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 250 */ 251 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 252 u8 filter_mode) 253 { 254 struct net_bridge *br = pg->key.port->br; 255 struct net_bridge_port_group *pg_lst; 256 struct net_bridge_mdb_entry *mp; 257 struct br_ip sg_ip; 258 259 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 260 return; 261 262 mp = br_mdb_ip_get(br, &pg->key.addr); 263 if (!mp) 264 return; 265 266 memset(&sg_ip, 0, sizeof(sg_ip)); 267 sg_ip = pg->key.addr; 268 for (pg_lst = mlock_dereference(mp->ports, br); 269 pg_lst; 270 pg_lst = mlock_dereference(pg_lst->next, br)) { 271 struct net_bridge_group_src *src_ent; 272 273 if (pg_lst == pg) 274 continue; 275 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 276 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 277 continue; 278 sg_ip.src = src_ent->addr.src; 279 switch (filter_mode) { 280 case MCAST_INCLUDE: 281 __fwd_del_star_excl(pg, &sg_ip); 282 break; 283 case MCAST_EXCLUDE: 284 __fwd_add_star_excl(pg, &sg_ip); 285 break; 286 } 287 } 288 } 289 } 290 291 /* called when adding a new S,G with host_joined == false by default */ 292 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 293 struct net_bridge_port_group *sg) 294 { 295 struct net_bridge_mdb_entry *sg_mp; 296 297 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 298 return; 299 if (!star_mp->host_joined) 300 return; 301 302 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 303 if (!sg_mp) 304 return; 305 sg_mp->host_joined = true; 306 } 307 308 /* set the host_joined state of all of *,G's S,G entries */ 309 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 310 { 311 struct net_bridge *br = star_mp->br; 312 struct net_bridge_mdb_entry *sg_mp; 313 struct net_bridge_port_group *pg; 314 struct br_ip sg_ip; 315 316 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 317 return; 318 319 memset(&sg_ip, 0, sizeof(sg_ip)); 320 sg_ip = star_mp->addr; 321 for (pg = mlock_dereference(star_mp->ports, br); 322 pg; 323 pg = mlock_dereference(pg->next, br)) { 324 struct net_bridge_group_src *src_ent; 325 326 hlist_for_each_entry(src_ent, &pg->src_list, node) { 327 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 328 continue; 329 sg_ip.src = src_ent->addr.src; 330 sg_mp = br_mdb_ip_get(br, &sg_ip); 331 if (!sg_mp) 332 continue; 333 sg_mp->host_joined = star_mp->host_joined; 334 } 335 } 336 } 337 338 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 339 { 340 struct net_bridge_port_group __rcu **pp; 341 struct net_bridge_port_group *p; 342 343 /* *,G exclude ports are only added to S,G entries */ 344 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 345 return; 346 347 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 348 * we should ignore perm entries since they're managed by user-space 349 */ 350 for (pp = &sgmp->ports; 351 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 352 pp = &p->next) 353 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 354 MDB_PG_FLAGS_PERMANENT))) 355 return; 356 357 /* currently the host can only have joined the *,G which means 358 * we treat it as EXCLUDE {}, so for an S,G it's considered a 359 * STAR_EXCLUDE entry and we can safely leave it 360 */ 361 sgmp->host_joined = false; 362 363 for (pp = &sgmp->ports; 364 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 365 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 366 br_multicast_del_pg(sgmp, p, pp); 367 else 368 pp = &p->next; 369 } 370 } 371 372 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 373 struct net_bridge_port_group *sg) 374 { 375 struct net_bridge_port_group_sg_key sg_key; 376 struct net_bridge *br = star_mp->br; 377 struct net_bridge_port_group *pg; 378 379 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 380 return; 381 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 382 return; 383 384 br_multicast_sg_host_state(star_mp, sg); 385 memset(&sg_key, 0, sizeof(sg_key)); 386 sg_key.addr = sg->key.addr; 387 /* we need to add all exclude ports to the S,G */ 388 for (pg = mlock_dereference(star_mp->ports, br); 389 pg; 390 pg = mlock_dereference(pg->next, br)) { 391 struct net_bridge_port_group *src_pg; 392 393 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 394 continue; 395 396 sg_key.port = pg->key.port; 397 if (br_sg_port_find(br, &sg_key)) 398 continue; 399 400 src_pg = __br_multicast_add_group(br, pg->key.port, 401 &sg->key.addr, 402 sg->eth_addr, 403 MCAST_INCLUDE, false, false); 404 if (IS_ERR_OR_NULL(src_pg) || 405 src_pg->rt_protocol != RTPROT_KERNEL) 406 continue; 407 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 408 } 409 } 410 411 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 412 { 413 struct net_bridge_mdb_entry *star_mp; 414 struct net_bridge_port_group *sg; 415 struct br_ip sg_ip; 416 417 if (src->flags & BR_SGRP_F_INSTALLED) 418 return; 419 420 memset(&sg_ip, 0, sizeof(sg_ip)); 421 sg_ip = src->pg->key.addr; 422 sg_ip.src = src->addr.src; 423 sg = __br_multicast_add_group(src->br, src->pg->key.port, &sg_ip, 424 src->pg->eth_addr, MCAST_INCLUDE, false, 425 !timer_pending(&src->timer)); 426 if (IS_ERR_OR_NULL(sg)) 427 return; 428 src->flags |= BR_SGRP_F_INSTALLED; 429 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 430 431 /* if it was added by user-space as perm we can skip next steps */ 432 if (sg->rt_protocol != RTPROT_KERNEL && 433 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 434 return; 435 436 /* the kernel is now responsible for removing this S,G */ 437 del_timer(&sg->timer); 438 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 439 if (!star_mp) 440 return; 441 442 br_multicast_sg_add_exclude_ports(star_mp, sg); 443 } 444 445 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 446 bool fastleave) 447 { 448 struct net_bridge_port_group *p, *pg = src->pg; 449 struct net_bridge_port_group __rcu **pp; 450 struct net_bridge_mdb_entry *mp; 451 struct br_ip sg_ip; 452 453 memset(&sg_ip, 0, sizeof(sg_ip)); 454 sg_ip = pg->key.addr; 455 sg_ip.src = src->addr.src; 456 457 mp = br_mdb_ip_get(src->br, &sg_ip); 458 if (!mp) 459 return; 460 461 for (pp = &mp->ports; 462 (p = mlock_dereference(*pp, src->br)) != NULL; 463 pp = &p->next) { 464 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 465 continue; 466 467 if (p->rt_protocol != RTPROT_KERNEL && 468 (p->flags & MDB_PG_FLAGS_PERMANENT)) 469 break; 470 471 if (fastleave) 472 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 473 br_multicast_del_pg(mp, p, pp); 474 break; 475 } 476 src->flags &= ~BR_SGRP_F_INSTALLED; 477 } 478 479 /* install S,G and based on src's timer enable or disable forwarding */ 480 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 481 { 482 struct net_bridge_port_group_sg_key sg_key; 483 struct net_bridge_port_group *sg; 484 u8 old_flags; 485 486 br_multicast_fwd_src_add(src); 487 488 memset(&sg_key, 0, sizeof(sg_key)); 489 sg_key.addr = src->pg->key.addr; 490 sg_key.addr.src = src->addr.src; 491 sg_key.port = src->pg->key.port; 492 493 sg = br_sg_port_find(src->br, &sg_key); 494 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 495 return; 496 497 old_flags = sg->flags; 498 if (timer_pending(&src->timer)) 499 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 500 else 501 sg->flags |= MDB_PG_FLAGS_BLOCKED; 502 503 if (old_flags != sg->flags) { 504 struct net_bridge_mdb_entry *sg_mp; 505 506 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 507 if (!sg_mp) 508 return; 509 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 510 } 511 } 512 513 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 514 { 515 struct net_bridge_mdb_entry *mp; 516 517 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 518 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 519 WARN_ON(mp->ports); 520 521 del_timer_sync(&mp->timer); 522 kfree_rcu(mp, rcu); 523 } 524 525 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 526 { 527 struct net_bridge *br = mp->br; 528 529 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 530 br_mdb_rht_params); 531 hlist_del_init_rcu(&mp->mdb_node); 532 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 533 queue_work(system_long_wq, &br->mcast_gc_work); 534 } 535 536 static void br_multicast_group_expired(struct timer_list *t) 537 { 538 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 539 struct net_bridge *br = mp->br; 540 541 spin_lock(&br->multicast_lock); 542 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 543 timer_pending(&mp->timer)) 544 goto out; 545 546 br_multicast_host_leave(mp, true); 547 548 if (mp->ports) 549 goto out; 550 br_multicast_del_mdb_entry(mp); 551 out: 552 spin_unlock(&br->multicast_lock); 553 } 554 555 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 556 { 557 struct net_bridge_group_src *src; 558 559 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 560 WARN_ON(!hlist_unhashed(&src->node)); 561 562 del_timer_sync(&src->timer); 563 kfree_rcu(src, rcu); 564 } 565 566 void br_multicast_del_group_src(struct net_bridge_group_src *src, 567 bool fastleave) 568 { 569 struct net_bridge *br = src->pg->key.port->br; 570 571 br_multicast_fwd_src_remove(src, fastleave); 572 hlist_del_init_rcu(&src->node); 573 src->pg->src_ents--; 574 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 575 queue_work(system_long_wq, &br->mcast_gc_work); 576 } 577 578 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 579 { 580 struct net_bridge_port_group *pg; 581 582 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 583 WARN_ON(!hlist_unhashed(&pg->mglist)); 584 WARN_ON(!hlist_empty(&pg->src_list)); 585 586 del_timer_sync(&pg->rexmit_timer); 587 del_timer_sync(&pg->timer); 588 kfree_rcu(pg, rcu); 589 } 590 591 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 592 struct net_bridge_port_group *pg, 593 struct net_bridge_port_group __rcu **pp) 594 { 595 struct net_bridge *br = pg->key.port->br; 596 struct net_bridge_group_src *ent; 597 struct hlist_node *tmp; 598 599 rcu_assign_pointer(*pp, pg->next); 600 hlist_del_init(&pg->mglist); 601 br_multicast_eht_clean_sets(pg); 602 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 603 br_multicast_del_group_src(ent, false); 604 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 605 if (!br_multicast_is_star_g(&mp->addr)) { 606 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 607 br_sg_port_rht_params); 608 br_multicast_sg_del_exclude_ports(mp); 609 } else { 610 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 611 } 612 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 613 queue_work(system_long_wq, &br->mcast_gc_work); 614 615 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 616 mod_timer(&mp->timer, jiffies); 617 } 618 619 static void br_multicast_find_del_pg(struct net_bridge *br, 620 struct net_bridge_port_group *pg) 621 { 622 struct net_bridge_port_group __rcu **pp; 623 struct net_bridge_mdb_entry *mp; 624 struct net_bridge_port_group *p; 625 626 mp = br_mdb_ip_get(br, &pg->key.addr); 627 if (WARN_ON(!mp)) 628 return; 629 630 for (pp = &mp->ports; 631 (p = mlock_dereference(*pp, br)) != NULL; 632 pp = &p->next) { 633 if (p != pg) 634 continue; 635 636 br_multicast_del_pg(mp, pg, pp); 637 return; 638 } 639 640 WARN_ON(1); 641 } 642 643 static void br_multicast_port_group_expired(struct timer_list *t) 644 { 645 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 646 struct net_bridge_group_src *src_ent; 647 struct net_bridge *br = pg->key.port->br; 648 struct hlist_node *tmp; 649 bool changed; 650 651 spin_lock(&br->multicast_lock); 652 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 653 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 654 goto out; 655 656 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 657 pg->filter_mode = MCAST_INCLUDE; 658 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 659 if (!timer_pending(&src_ent->timer)) { 660 br_multicast_del_group_src(src_ent, false); 661 changed = true; 662 } 663 } 664 665 if (hlist_empty(&pg->src_list)) { 666 br_multicast_find_del_pg(br, pg); 667 } else if (changed) { 668 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 669 670 if (changed && br_multicast_is_star_g(&pg->key.addr)) 671 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 672 673 if (WARN_ON(!mp)) 674 goto out; 675 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 676 } 677 out: 678 spin_unlock(&br->multicast_lock); 679 } 680 681 static void br_multicast_gc(struct hlist_head *head) 682 { 683 struct net_bridge_mcast_gc *gcent; 684 struct hlist_node *tmp; 685 686 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 687 hlist_del_init(&gcent->gc_node); 688 gcent->destroy(gcent); 689 } 690 } 691 692 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 693 struct net_bridge_port_group *pg, 694 __be32 ip_dst, __be32 group, 695 bool with_srcs, bool over_lmqt, 696 u8 sflag, u8 *igmp_type, 697 bool *need_rexmit) 698 { 699 struct net_bridge_port *p = pg ? pg->key.port : NULL; 700 struct net_bridge_group_src *ent; 701 size_t pkt_size, igmp_hdr_size; 702 unsigned long now = jiffies; 703 struct igmpv3_query *ihv3; 704 void *csum_start = NULL; 705 __sum16 *csum = NULL; 706 struct sk_buff *skb; 707 struct igmphdr *ih; 708 struct ethhdr *eth; 709 unsigned long lmqt; 710 struct iphdr *iph; 711 u16 lmqt_srcs = 0; 712 713 igmp_hdr_size = sizeof(*ih); 714 if (br->multicast_igmp_version == 3) { 715 igmp_hdr_size = sizeof(*ihv3); 716 if (pg && with_srcs) { 717 lmqt = now + (br->multicast_last_member_interval * 718 br->multicast_last_member_count); 719 hlist_for_each_entry(ent, &pg->src_list, node) { 720 if (over_lmqt == time_after(ent->timer.expires, 721 lmqt) && 722 ent->src_query_rexmit_cnt > 0) 723 lmqt_srcs++; 724 } 725 726 if (!lmqt_srcs) 727 return NULL; 728 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 729 } 730 } 731 732 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 733 if ((p && pkt_size > p->dev->mtu) || 734 pkt_size > br->dev->mtu) 735 return NULL; 736 737 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 738 if (!skb) 739 goto out; 740 741 skb->protocol = htons(ETH_P_IP); 742 743 skb_reset_mac_header(skb); 744 eth = eth_hdr(skb); 745 746 ether_addr_copy(eth->h_source, br->dev->dev_addr); 747 ip_eth_mc_map(ip_dst, eth->h_dest); 748 eth->h_proto = htons(ETH_P_IP); 749 skb_put(skb, sizeof(*eth)); 750 751 skb_set_network_header(skb, skb->len); 752 iph = ip_hdr(skb); 753 iph->tot_len = htons(pkt_size - sizeof(*eth)); 754 755 iph->version = 4; 756 iph->ihl = 6; 757 iph->tos = 0xc0; 758 iph->id = 0; 759 iph->frag_off = htons(IP_DF); 760 iph->ttl = 1; 761 iph->protocol = IPPROTO_IGMP; 762 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 763 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 764 iph->daddr = ip_dst; 765 ((u8 *)&iph[1])[0] = IPOPT_RA; 766 ((u8 *)&iph[1])[1] = 4; 767 ((u8 *)&iph[1])[2] = 0; 768 ((u8 *)&iph[1])[3] = 0; 769 ip_send_check(iph); 770 skb_put(skb, 24); 771 772 skb_set_transport_header(skb, skb->len); 773 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 774 775 switch (br->multicast_igmp_version) { 776 case 2: 777 ih = igmp_hdr(skb); 778 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 779 ih->code = (group ? br->multicast_last_member_interval : 780 br->multicast_query_response_interval) / 781 (HZ / IGMP_TIMER_SCALE); 782 ih->group = group; 783 ih->csum = 0; 784 csum = &ih->csum; 785 csum_start = (void *)ih; 786 break; 787 case 3: 788 ihv3 = igmpv3_query_hdr(skb); 789 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 790 ihv3->code = (group ? br->multicast_last_member_interval : 791 br->multicast_query_response_interval) / 792 (HZ / IGMP_TIMER_SCALE); 793 ihv3->group = group; 794 ihv3->qqic = br->multicast_query_interval / HZ; 795 ihv3->nsrcs = htons(lmqt_srcs); 796 ihv3->resv = 0; 797 ihv3->suppress = sflag; 798 ihv3->qrv = 2; 799 ihv3->csum = 0; 800 csum = &ihv3->csum; 801 csum_start = (void *)ihv3; 802 if (!pg || !with_srcs) 803 break; 804 805 lmqt_srcs = 0; 806 hlist_for_each_entry(ent, &pg->src_list, node) { 807 if (over_lmqt == time_after(ent->timer.expires, 808 lmqt) && 809 ent->src_query_rexmit_cnt > 0) { 810 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 811 ent->src_query_rexmit_cnt--; 812 if (need_rexmit && ent->src_query_rexmit_cnt) 813 *need_rexmit = true; 814 } 815 } 816 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 817 kfree_skb(skb); 818 return NULL; 819 } 820 break; 821 } 822 823 if (WARN_ON(!csum || !csum_start)) { 824 kfree_skb(skb); 825 return NULL; 826 } 827 828 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 829 skb_put(skb, igmp_hdr_size); 830 __skb_pull(skb, sizeof(*eth)); 831 832 out: 833 return skb; 834 } 835 836 #if IS_ENABLED(CONFIG_IPV6) 837 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 838 struct net_bridge_port_group *pg, 839 const struct in6_addr *ip6_dst, 840 const struct in6_addr *group, 841 bool with_srcs, bool over_llqt, 842 u8 sflag, u8 *igmp_type, 843 bool *need_rexmit) 844 { 845 struct net_bridge_port *p = pg ? pg->key.port : NULL; 846 struct net_bridge_group_src *ent; 847 size_t pkt_size, mld_hdr_size; 848 unsigned long now = jiffies; 849 struct mld2_query *mld2q; 850 void *csum_start = NULL; 851 unsigned long interval; 852 __sum16 *csum = NULL; 853 struct ipv6hdr *ip6h; 854 struct mld_msg *mldq; 855 struct sk_buff *skb; 856 unsigned long llqt; 857 struct ethhdr *eth; 858 u16 llqt_srcs = 0; 859 u8 *hopopt; 860 861 mld_hdr_size = sizeof(*mldq); 862 if (br->multicast_mld_version == 2) { 863 mld_hdr_size = sizeof(*mld2q); 864 if (pg && with_srcs) { 865 llqt = now + (br->multicast_last_member_interval * 866 br->multicast_last_member_count); 867 hlist_for_each_entry(ent, &pg->src_list, node) { 868 if (over_llqt == time_after(ent->timer.expires, 869 llqt) && 870 ent->src_query_rexmit_cnt > 0) 871 llqt_srcs++; 872 } 873 874 if (!llqt_srcs) 875 return NULL; 876 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 877 } 878 } 879 880 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 881 if ((p && pkt_size > p->dev->mtu) || 882 pkt_size > br->dev->mtu) 883 return NULL; 884 885 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 886 if (!skb) 887 goto out; 888 889 skb->protocol = htons(ETH_P_IPV6); 890 891 /* Ethernet header */ 892 skb_reset_mac_header(skb); 893 eth = eth_hdr(skb); 894 895 ether_addr_copy(eth->h_source, br->dev->dev_addr); 896 eth->h_proto = htons(ETH_P_IPV6); 897 skb_put(skb, sizeof(*eth)); 898 899 /* IPv6 header + HbH option */ 900 skb_set_network_header(skb, skb->len); 901 ip6h = ipv6_hdr(skb); 902 903 *(__force __be32 *)ip6h = htonl(0x60000000); 904 ip6h->payload_len = htons(8 + mld_hdr_size); 905 ip6h->nexthdr = IPPROTO_HOPOPTS; 906 ip6h->hop_limit = 1; 907 ip6h->daddr = *ip6_dst; 908 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 909 &ip6h->saddr)) { 910 kfree_skb(skb); 911 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false); 912 return NULL; 913 } 914 915 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 916 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 917 918 hopopt = (u8 *)(ip6h + 1); 919 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 920 hopopt[1] = 0; /* length of HbH */ 921 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 922 hopopt[3] = 2; /* Length of RA Option */ 923 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 924 hopopt[5] = 0; 925 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 926 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 927 928 skb_put(skb, sizeof(*ip6h) + 8); 929 930 /* ICMPv6 */ 931 skb_set_transport_header(skb, skb->len); 932 interval = ipv6_addr_any(group) ? 933 br->multicast_query_response_interval : 934 br->multicast_last_member_interval; 935 *igmp_type = ICMPV6_MGM_QUERY; 936 switch (br->multicast_mld_version) { 937 case 1: 938 mldq = (struct mld_msg *)icmp6_hdr(skb); 939 mldq->mld_type = ICMPV6_MGM_QUERY; 940 mldq->mld_code = 0; 941 mldq->mld_cksum = 0; 942 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 943 mldq->mld_reserved = 0; 944 mldq->mld_mca = *group; 945 csum = &mldq->mld_cksum; 946 csum_start = (void *)mldq; 947 break; 948 case 2: 949 mld2q = (struct mld2_query *)icmp6_hdr(skb); 950 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 951 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 952 mld2q->mld2q_code = 0; 953 mld2q->mld2q_cksum = 0; 954 mld2q->mld2q_resv1 = 0; 955 mld2q->mld2q_resv2 = 0; 956 mld2q->mld2q_suppress = sflag; 957 mld2q->mld2q_qrv = 2; 958 mld2q->mld2q_nsrcs = htons(llqt_srcs); 959 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 960 mld2q->mld2q_mca = *group; 961 csum = &mld2q->mld2q_cksum; 962 csum_start = (void *)mld2q; 963 if (!pg || !with_srcs) 964 break; 965 966 llqt_srcs = 0; 967 hlist_for_each_entry(ent, &pg->src_list, node) { 968 if (over_llqt == time_after(ent->timer.expires, 969 llqt) && 970 ent->src_query_rexmit_cnt > 0) { 971 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 972 ent->src_query_rexmit_cnt--; 973 if (need_rexmit && ent->src_query_rexmit_cnt) 974 *need_rexmit = true; 975 } 976 } 977 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 978 kfree_skb(skb); 979 return NULL; 980 } 981 break; 982 } 983 984 if (WARN_ON(!csum || !csum_start)) { 985 kfree_skb(skb); 986 return NULL; 987 } 988 989 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 990 IPPROTO_ICMPV6, 991 csum_partial(csum_start, mld_hdr_size, 0)); 992 skb_put(skb, mld_hdr_size); 993 __skb_pull(skb, sizeof(*eth)); 994 995 out: 996 return skb; 997 } 998 #endif 999 1000 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 1001 struct net_bridge_port_group *pg, 1002 struct br_ip *ip_dst, 1003 struct br_ip *group, 1004 bool with_srcs, bool over_lmqt, 1005 u8 sflag, u8 *igmp_type, 1006 bool *need_rexmit) 1007 { 1008 __be32 ip4_dst; 1009 1010 switch (group->proto) { 1011 case htons(ETH_P_IP): 1012 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1013 return br_ip4_multicast_alloc_query(br, pg, 1014 ip4_dst, group->dst.ip4, 1015 with_srcs, over_lmqt, 1016 sflag, igmp_type, 1017 need_rexmit); 1018 #if IS_ENABLED(CONFIG_IPV6) 1019 case htons(ETH_P_IPV6): { 1020 struct in6_addr ip6_dst; 1021 1022 if (ip_dst) 1023 ip6_dst = ip_dst->dst.ip6; 1024 else 1025 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1026 htonl(1)); 1027 1028 return br_ip6_multicast_alloc_query(br, pg, 1029 &ip6_dst, &group->dst.ip6, 1030 with_srcs, over_lmqt, 1031 sflag, igmp_type, 1032 need_rexmit); 1033 } 1034 #endif 1035 } 1036 return NULL; 1037 } 1038 1039 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1040 struct br_ip *group) 1041 { 1042 struct net_bridge_mdb_entry *mp; 1043 int err; 1044 1045 mp = br_mdb_ip_get(br, group); 1046 if (mp) 1047 return mp; 1048 1049 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1050 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1051 return ERR_PTR(-E2BIG); 1052 } 1053 1054 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1055 if (unlikely(!mp)) 1056 return ERR_PTR(-ENOMEM); 1057 1058 mp->br = br; 1059 mp->addr = *group; 1060 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1061 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1062 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1063 br_mdb_rht_params); 1064 if (err) { 1065 kfree(mp); 1066 mp = ERR_PTR(err); 1067 } else { 1068 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1069 } 1070 1071 return mp; 1072 } 1073 1074 static void br_multicast_group_src_expired(struct timer_list *t) 1075 { 1076 struct net_bridge_group_src *src = from_timer(src, t, timer); 1077 struct net_bridge_port_group *pg; 1078 struct net_bridge *br = src->br; 1079 1080 spin_lock(&br->multicast_lock); 1081 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1082 timer_pending(&src->timer)) 1083 goto out; 1084 1085 pg = src->pg; 1086 if (pg->filter_mode == MCAST_INCLUDE) { 1087 br_multicast_del_group_src(src, false); 1088 if (!hlist_empty(&pg->src_list)) 1089 goto out; 1090 br_multicast_find_del_pg(br, pg); 1091 } else { 1092 br_multicast_fwd_src_handle(src); 1093 } 1094 1095 out: 1096 spin_unlock(&br->multicast_lock); 1097 } 1098 1099 struct net_bridge_group_src * 1100 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1101 { 1102 struct net_bridge_group_src *ent; 1103 1104 switch (ip->proto) { 1105 case htons(ETH_P_IP): 1106 hlist_for_each_entry(ent, &pg->src_list, node) 1107 if (ip->src.ip4 == ent->addr.src.ip4) 1108 return ent; 1109 break; 1110 #if IS_ENABLED(CONFIG_IPV6) 1111 case htons(ETH_P_IPV6): 1112 hlist_for_each_entry(ent, &pg->src_list, node) 1113 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1114 return ent; 1115 break; 1116 #endif 1117 } 1118 1119 return NULL; 1120 } 1121 1122 static struct net_bridge_group_src * 1123 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1124 { 1125 struct net_bridge_group_src *grp_src; 1126 1127 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1128 return NULL; 1129 1130 switch (src_ip->proto) { 1131 case htons(ETH_P_IP): 1132 if (ipv4_is_zeronet(src_ip->src.ip4) || 1133 ipv4_is_multicast(src_ip->src.ip4)) 1134 return NULL; 1135 break; 1136 #if IS_ENABLED(CONFIG_IPV6) 1137 case htons(ETH_P_IPV6): 1138 if (ipv6_addr_any(&src_ip->src.ip6) || 1139 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1140 return NULL; 1141 break; 1142 #endif 1143 } 1144 1145 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1146 if (unlikely(!grp_src)) 1147 return NULL; 1148 1149 grp_src->pg = pg; 1150 grp_src->br = pg->key.port->br; 1151 grp_src->addr = *src_ip; 1152 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1153 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1154 1155 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1156 pg->src_ents++; 1157 1158 return grp_src; 1159 } 1160 1161 struct net_bridge_port_group *br_multicast_new_port_group( 1162 struct net_bridge_port *port, 1163 struct br_ip *group, 1164 struct net_bridge_port_group __rcu *next, 1165 unsigned char flags, 1166 const unsigned char *src, 1167 u8 filter_mode, 1168 u8 rt_protocol) 1169 { 1170 struct net_bridge_port_group *p; 1171 1172 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1173 if (unlikely(!p)) 1174 return NULL; 1175 1176 p->key.addr = *group; 1177 p->key.port = port; 1178 p->flags = flags; 1179 p->filter_mode = filter_mode; 1180 p->rt_protocol = rt_protocol; 1181 p->eht_host_tree = RB_ROOT; 1182 p->eht_set_tree = RB_ROOT; 1183 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1184 INIT_HLIST_HEAD(&p->src_list); 1185 1186 if (!br_multicast_is_star_g(group) && 1187 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1188 br_sg_port_rht_params)) { 1189 kfree(p); 1190 return NULL; 1191 } 1192 1193 rcu_assign_pointer(p->next, next); 1194 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1195 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1196 hlist_add_head(&p->mglist, &port->mglist); 1197 1198 if (src) 1199 memcpy(p->eth_addr, src, ETH_ALEN); 1200 else 1201 eth_broadcast_addr(p->eth_addr); 1202 1203 return p; 1204 } 1205 1206 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify) 1207 { 1208 if (!mp->host_joined) { 1209 mp->host_joined = true; 1210 if (br_multicast_is_star_g(&mp->addr)) 1211 br_multicast_star_g_host_state(mp); 1212 if (notify) 1213 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1214 } 1215 1216 if (br_group_is_l2(&mp->addr)) 1217 return; 1218 1219 mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval); 1220 } 1221 1222 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1223 { 1224 if (!mp->host_joined) 1225 return; 1226 1227 mp->host_joined = false; 1228 if (br_multicast_is_star_g(&mp->addr)) 1229 br_multicast_star_g_host_state(mp); 1230 if (notify) 1231 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1232 } 1233 1234 static struct net_bridge_port_group * 1235 __br_multicast_add_group(struct net_bridge *br, 1236 struct net_bridge_port *port, 1237 struct br_ip *group, 1238 const unsigned char *src, 1239 u8 filter_mode, 1240 bool igmpv2_mldv1, 1241 bool blocked) 1242 { 1243 struct net_bridge_port_group __rcu **pp; 1244 struct net_bridge_port_group *p = NULL; 1245 struct net_bridge_mdb_entry *mp; 1246 unsigned long now = jiffies; 1247 1248 if (!netif_running(br->dev) || 1249 (port && port->state == BR_STATE_DISABLED)) 1250 goto out; 1251 1252 mp = br_multicast_new_group(br, group); 1253 if (IS_ERR(mp)) 1254 return ERR_CAST(mp); 1255 1256 if (!port) { 1257 br_multicast_host_join(mp, true); 1258 goto out; 1259 } 1260 1261 for (pp = &mp->ports; 1262 (p = mlock_dereference(*pp, br)) != NULL; 1263 pp = &p->next) { 1264 if (br_port_group_equal(p, port, src)) 1265 goto found; 1266 if ((unsigned long)p->key.port < (unsigned long)port) 1267 break; 1268 } 1269 1270 p = br_multicast_new_port_group(port, group, *pp, 0, src, 1271 filter_mode, RTPROT_KERNEL); 1272 if (unlikely(!p)) { 1273 p = ERR_PTR(-ENOMEM); 1274 goto out; 1275 } 1276 rcu_assign_pointer(*pp, p); 1277 if (blocked) 1278 p->flags |= MDB_PG_FLAGS_BLOCKED; 1279 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB); 1280 1281 found: 1282 if (igmpv2_mldv1) 1283 mod_timer(&p->timer, now + br->multicast_membership_interval); 1284 1285 out: 1286 return p; 1287 } 1288 1289 static int br_multicast_add_group(struct net_bridge *br, 1290 struct net_bridge_port *port, 1291 struct br_ip *group, 1292 const unsigned char *src, 1293 u8 filter_mode, 1294 bool igmpv2_mldv1) 1295 { 1296 struct net_bridge_port_group *pg; 1297 int err; 1298 1299 spin_lock(&br->multicast_lock); 1300 pg = __br_multicast_add_group(br, port, group, src, filter_mode, 1301 igmpv2_mldv1, false); 1302 /* NULL is considered valid for host joined groups */ 1303 err = PTR_ERR_OR_ZERO(pg); 1304 spin_unlock(&br->multicast_lock); 1305 1306 return err; 1307 } 1308 1309 static int br_ip4_multicast_add_group(struct net_bridge *br, 1310 struct net_bridge_port *port, 1311 __be32 group, 1312 __u16 vid, 1313 const unsigned char *src, 1314 bool igmpv2) 1315 { 1316 struct br_ip br_group; 1317 u8 filter_mode; 1318 1319 if (ipv4_is_local_multicast(group)) 1320 return 0; 1321 1322 memset(&br_group, 0, sizeof(br_group)); 1323 br_group.dst.ip4 = group; 1324 br_group.proto = htons(ETH_P_IP); 1325 br_group.vid = vid; 1326 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1327 1328 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 1329 igmpv2); 1330 } 1331 1332 #if IS_ENABLED(CONFIG_IPV6) 1333 static int br_ip6_multicast_add_group(struct net_bridge *br, 1334 struct net_bridge_port *port, 1335 const struct in6_addr *group, 1336 __u16 vid, 1337 const unsigned char *src, 1338 bool mldv1) 1339 { 1340 struct br_ip br_group; 1341 u8 filter_mode; 1342 1343 if (ipv6_addr_is_ll_all_nodes(group)) 1344 return 0; 1345 1346 memset(&br_group, 0, sizeof(br_group)); 1347 br_group.dst.ip6 = *group; 1348 br_group.proto = htons(ETH_P_IPV6); 1349 br_group.vid = vid; 1350 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1351 1352 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 1353 mldv1); 1354 } 1355 #endif 1356 1357 static void br_multicast_router_expired(struct timer_list *t) 1358 { 1359 struct net_bridge_port *port = 1360 from_timer(port, t, multicast_router_timer); 1361 struct net_bridge *br = port->br; 1362 1363 spin_lock(&br->multicast_lock); 1364 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1365 port->multicast_router == MDB_RTR_TYPE_PERM || 1366 timer_pending(&port->multicast_router_timer)) 1367 goto out; 1368 1369 __del_port_router(port); 1370 out: 1371 spin_unlock(&br->multicast_lock); 1372 } 1373 1374 static void br_mc_router_state_change(struct net_bridge *p, 1375 bool is_mc_router) 1376 { 1377 struct switchdev_attr attr = { 1378 .orig_dev = p->dev, 1379 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1380 .flags = SWITCHDEV_F_DEFER, 1381 .u.mrouter = is_mc_router, 1382 }; 1383 1384 switchdev_port_attr_set(p->dev, &attr, NULL); 1385 } 1386 1387 static void br_multicast_local_router_expired(struct timer_list *t) 1388 { 1389 struct net_bridge *br = from_timer(br, t, multicast_router_timer); 1390 1391 spin_lock(&br->multicast_lock); 1392 if (br->multicast_router == MDB_RTR_TYPE_DISABLED || 1393 br->multicast_router == MDB_RTR_TYPE_PERM || 1394 timer_pending(&br->multicast_router_timer)) 1395 goto out; 1396 1397 br_mc_router_state_change(br, false); 1398 out: 1399 spin_unlock(&br->multicast_lock); 1400 } 1401 1402 static void br_multicast_querier_expired(struct net_bridge *br, 1403 struct bridge_mcast_own_query *query) 1404 { 1405 spin_lock(&br->multicast_lock); 1406 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1407 goto out; 1408 1409 br_multicast_start_querier(br, query); 1410 1411 out: 1412 spin_unlock(&br->multicast_lock); 1413 } 1414 1415 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1416 { 1417 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer); 1418 1419 br_multicast_querier_expired(br, &br->ip4_own_query); 1420 } 1421 1422 #if IS_ENABLED(CONFIG_IPV6) 1423 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1424 { 1425 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer); 1426 1427 br_multicast_querier_expired(br, &br->ip6_own_query); 1428 } 1429 #endif 1430 1431 static void br_multicast_select_own_querier(struct net_bridge *br, 1432 struct br_ip *ip, 1433 struct sk_buff *skb) 1434 { 1435 if (ip->proto == htons(ETH_P_IP)) 1436 br->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1437 #if IS_ENABLED(CONFIG_IPV6) 1438 else 1439 br->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1440 #endif 1441 } 1442 1443 static void __br_multicast_send_query(struct net_bridge *br, 1444 struct net_bridge_port *port, 1445 struct net_bridge_port_group *pg, 1446 struct br_ip *ip_dst, 1447 struct br_ip *group, 1448 bool with_srcs, 1449 u8 sflag, 1450 bool *need_rexmit) 1451 { 1452 bool over_lmqt = !!sflag; 1453 struct sk_buff *skb; 1454 u8 igmp_type; 1455 1456 again_under_lmqt: 1457 skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs, 1458 over_lmqt, sflag, &igmp_type, 1459 need_rexmit); 1460 if (!skb) 1461 return; 1462 1463 if (port) { 1464 skb->dev = port->dev; 1465 br_multicast_count(br, port, skb, igmp_type, 1466 BR_MCAST_DIR_TX); 1467 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1468 dev_net(port->dev), NULL, skb, NULL, skb->dev, 1469 br_dev_queue_push_xmit); 1470 1471 if (over_lmqt && with_srcs && sflag) { 1472 over_lmqt = false; 1473 goto again_under_lmqt; 1474 } 1475 } else { 1476 br_multicast_select_own_querier(br, group, skb); 1477 br_multicast_count(br, port, skb, igmp_type, 1478 BR_MCAST_DIR_RX); 1479 netif_rx(skb); 1480 } 1481 } 1482 1483 static void br_multicast_send_query(struct net_bridge *br, 1484 struct net_bridge_port *port, 1485 struct bridge_mcast_own_query *own_query) 1486 { 1487 struct bridge_mcast_other_query *other_query = NULL; 1488 struct br_ip br_group; 1489 unsigned long time; 1490 1491 if (!netif_running(br->dev) || 1492 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1493 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1494 return; 1495 1496 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1497 1498 if (port ? (own_query == &port->ip4_own_query) : 1499 (own_query == &br->ip4_own_query)) { 1500 other_query = &br->ip4_other_query; 1501 br_group.proto = htons(ETH_P_IP); 1502 #if IS_ENABLED(CONFIG_IPV6) 1503 } else { 1504 other_query = &br->ip6_other_query; 1505 br_group.proto = htons(ETH_P_IPV6); 1506 #endif 1507 } 1508 1509 if (!other_query || timer_pending(&other_query->timer)) 1510 return; 1511 1512 __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0, 1513 NULL); 1514 1515 time = jiffies; 1516 time += own_query->startup_sent < br->multicast_startup_query_count ? 1517 br->multicast_startup_query_interval : 1518 br->multicast_query_interval; 1519 mod_timer(&own_query->timer, time); 1520 } 1521 1522 static void 1523 br_multicast_port_query_expired(struct net_bridge_port *port, 1524 struct bridge_mcast_own_query *query) 1525 { 1526 struct net_bridge *br = port->br; 1527 1528 spin_lock(&br->multicast_lock); 1529 if (port->state == BR_STATE_DISABLED || 1530 port->state == BR_STATE_BLOCKING) 1531 goto out; 1532 1533 if (query->startup_sent < br->multicast_startup_query_count) 1534 query->startup_sent++; 1535 1536 br_multicast_send_query(port->br, port, query); 1537 1538 out: 1539 spin_unlock(&br->multicast_lock); 1540 } 1541 1542 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1543 { 1544 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer); 1545 1546 br_multicast_port_query_expired(port, &port->ip4_own_query); 1547 } 1548 1549 #if IS_ENABLED(CONFIG_IPV6) 1550 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1551 { 1552 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer); 1553 1554 br_multicast_port_query_expired(port, &port->ip6_own_query); 1555 } 1556 #endif 1557 1558 static void br_multicast_port_group_rexmit(struct timer_list *t) 1559 { 1560 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1561 struct bridge_mcast_other_query *other_query = NULL; 1562 struct net_bridge *br = pg->key.port->br; 1563 bool need_rexmit = false; 1564 1565 spin_lock(&br->multicast_lock); 1566 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1567 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1568 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1569 goto out; 1570 1571 if (pg->key.addr.proto == htons(ETH_P_IP)) 1572 other_query = &br->ip4_other_query; 1573 #if IS_ENABLED(CONFIG_IPV6) 1574 else 1575 other_query = &br->ip6_other_query; 1576 #endif 1577 1578 if (!other_query || timer_pending(&other_query->timer)) 1579 goto out; 1580 1581 if (pg->grp_query_rexmit_cnt) { 1582 pg->grp_query_rexmit_cnt--; 1583 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1584 &pg->key.addr, false, 1, NULL); 1585 } 1586 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1587 &pg->key.addr, true, 0, &need_rexmit); 1588 1589 if (pg->grp_query_rexmit_cnt || need_rexmit) 1590 mod_timer(&pg->rexmit_timer, jiffies + 1591 br->multicast_last_member_interval); 1592 out: 1593 spin_unlock(&br->multicast_lock); 1594 } 1595 1596 static void br_mc_disabled_update(struct net_device *dev, bool value) 1597 { 1598 struct switchdev_attr attr = { 1599 .orig_dev = dev, 1600 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1601 .flags = SWITCHDEV_F_DEFER, 1602 .u.mc_disabled = !value, 1603 }; 1604 1605 switchdev_port_attr_set(dev, &attr, NULL); 1606 } 1607 1608 int br_multicast_add_port(struct net_bridge_port *port) 1609 { 1610 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1611 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 1612 1613 timer_setup(&port->multicast_router_timer, 1614 br_multicast_router_expired, 0); 1615 timer_setup(&port->ip4_own_query.timer, 1616 br_ip4_multicast_port_query_expired, 0); 1617 #if IS_ENABLED(CONFIG_IPV6) 1618 timer_setup(&port->ip6_own_query.timer, 1619 br_ip6_multicast_port_query_expired, 0); 1620 #endif 1621 br_mc_disabled_update(port->dev, 1622 br_opt_get(port->br, BROPT_MULTICAST_ENABLED)); 1623 1624 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1625 if (!port->mcast_stats) 1626 return -ENOMEM; 1627 1628 return 0; 1629 } 1630 1631 void br_multicast_del_port(struct net_bridge_port *port) 1632 { 1633 struct net_bridge *br = port->br; 1634 struct net_bridge_port_group *pg; 1635 HLIST_HEAD(deleted_head); 1636 struct hlist_node *n; 1637 1638 /* Take care of the remaining groups, only perm ones should be left */ 1639 spin_lock_bh(&br->multicast_lock); 1640 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1641 br_multicast_find_del_pg(br, pg); 1642 hlist_move_list(&br->mcast_gc_list, &deleted_head); 1643 spin_unlock_bh(&br->multicast_lock); 1644 br_multicast_gc(&deleted_head); 1645 del_timer_sync(&port->multicast_router_timer); 1646 free_percpu(port->mcast_stats); 1647 } 1648 1649 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1650 { 1651 query->startup_sent = 0; 1652 1653 if (try_to_del_timer_sync(&query->timer) >= 0 || 1654 del_timer(&query->timer)) 1655 mod_timer(&query->timer, jiffies); 1656 } 1657 1658 static void __br_multicast_enable_port(struct net_bridge_port *port) 1659 { 1660 struct net_bridge *br = port->br; 1661 1662 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev)) 1663 return; 1664 1665 br_multicast_enable(&port->ip4_own_query); 1666 #if IS_ENABLED(CONFIG_IPV6) 1667 br_multicast_enable(&port->ip6_own_query); 1668 #endif 1669 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1670 hlist_unhashed(&port->rlist)) 1671 br_multicast_add_router(br, port); 1672 } 1673 1674 void br_multicast_enable_port(struct net_bridge_port *port) 1675 { 1676 struct net_bridge *br = port->br; 1677 1678 spin_lock(&br->multicast_lock); 1679 __br_multicast_enable_port(port); 1680 spin_unlock(&br->multicast_lock); 1681 } 1682 1683 void br_multicast_disable_port(struct net_bridge_port *port) 1684 { 1685 struct net_bridge *br = port->br; 1686 struct net_bridge_port_group *pg; 1687 struct hlist_node *n; 1688 1689 spin_lock(&br->multicast_lock); 1690 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1691 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1692 br_multicast_find_del_pg(br, pg); 1693 1694 __del_port_router(port); 1695 1696 del_timer(&port->multicast_router_timer); 1697 del_timer(&port->ip4_own_query.timer); 1698 #if IS_ENABLED(CONFIG_IPV6) 1699 del_timer(&port->ip6_own_query.timer); 1700 #endif 1701 spin_unlock(&br->multicast_lock); 1702 } 1703 1704 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1705 { 1706 struct net_bridge_group_src *ent; 1707 struct hlist_node *tmp; 1708 int deleted = 0; 1709 1710 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1711 if (ent->flags & BR_SGRP_F_DELETE) { 1712 br_multicast_del_group_src(ent, false); 1713 deleted++; 1714 } 1715 1716 return deleted; 1717 } 1718 1719 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 1720 unsigned long expires) 1721 { 1722 mod_timer(&src->timer, expires); 1723 br_multicast_fwd_src_handle(src); 1724 } 1725 1726 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg) 1727 { 1728 struct bridge_mcast_other_query *other_query = NULL; 1729 struct net_bridge *br = pg->key.port->br; 1730 u32 lmqc = br->multicast_last_member_count; 1731 unsigned long lmqt, lmi, now = jiffies; 1732 struct net_bridge_group_src *ent; 1733 1734 if (!netif_running(br->dev) || 1735 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1736 return; 1737 1738 if (pg->key.addr.proto == htons(ETH_P_IP)) 1739 other_query = &br->ip4_other_query; 1740 #if IS_ENABLED(CONFIG_IPV6) 1741 else 1742 other_query = &br->ip6_other_query; 1743 #endif 1744 1745 lmqt = now + br_multicast_lmqt(br); 1746 hlist_for_each_entry(ent, &pg->src_list, node) { 1747 if (ent->flags & BR_SGRP_F_SEND) { 1748 ent->flags &= ~BR_SGRP_F_SEND; 1749 if (ent->timer.expires > lmqt) { 1750 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1751 other_query && 1752 !timer_pending(&other_query->timer)) 1753 ent->src_query_rexmit_cnt = lmqc; 1754 __grp_src_mod_timer(ent, lmqt); 1755 } 1756 } 1757 } 1758 1759 if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) || 1760 !other_query || timer_pending(&other_query->timer)) 1761 return; 1762 1763 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1764 &pg->key.addr, true, 1, NULL); 1765 1766 lmi = now + br->multicast_last_member_interval; 1767 if (!timer_pending(&pg->rexmit_timer) || 1768 time_after(pg->rexmit_timer.expires, lmi)) 1769 mod_timer(&pg->rexmit_timer, lmi); 1770 } 1771 1772 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg) 1773 { 1774 struct bridge_mcast_other_query *other_query = NULL; 1775 struct net_bridge *br = pg->key.port->br; 1776 unsigned long now = jiffies, lmi; 1777 1778 if (!netif_running(br->dev) || 1779 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1780 return; 1781 1782 if (pg->key.addr.proto == htons(ETH_P_IP)) 1783 other_query = &br->ip4_other_query; 1784 #if IS_ENABLED(CONFIG_IPV6) 1785 else 1786 other_query = &br->ip6_other_query; 1787 #endif 1788 1789 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1790 other_query && !timer_pending(&other_query->timer)) { 1791 lmi = now + br->multicast_last_member_interval; 1792 pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1; 1793 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1794 &pg->key.addr, false, 0, NULL); 1795 if (!timer_pending(&pg->rexmit_timer) || 1796 time_after(pg->rexmit_timer.expires, lmi)) 1797 mod_timer(&pg->rexmit_timer, lmi); 1798 } 1799 1800 if (pg->filter_mode == MCAST_EXCLUDE && 1801 (!timer_pending(&pg->timer) || 1802 time_after(pg->timer.expires, now + br_multicast_lmqt(br)))) 1803 mod_timer(&pg->timer, now + br_multicast_lmqt(br)); 1804 } 1805 1806 /* State Msg type New state Actions 1807 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 1808 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 1809 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 1810 */ 1811 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg, void *h_addr, 1812 void *srcs, u32 nsrcs, size_t addr_size, 1813 int grec_type) 1814 { 1815 struct net_bridge *br = pg->key.port->br; 1816 struct net_bridge_group_src *ent; 1817 unsigned long now = jiffies; 1818 bool changed = false; 1819 struct br_ip src_ip; 1820 u32 src_idx; 1821 1822 memset(&src_ip, 0, sizeof(src_ip)); 1823 src_ip.proto = pg->key.addr.proto; 1824 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1825 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1826 ent = br_multicast_find_group_src(pg, &src_ip); 1827 if (!ent) { 1828 ent = br_multicast_new_group_src(pg, &src_ip); 1829 if (ent) 1830 changed = true; 1831 } 1832 1833 if (ent) 1834 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 1835 } 1836 1837 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 1838 changed = true; 1839 1840 return changed; 1841 } 1842 1843 /* State Msg type New state Actions 1844 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 1845 * Delete (A-B) 1846 * Group Timer=GMI 1847 */ 1848 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg, void *h_addr, 1849 void *srcs, u32 nsrcs, size_t addr_size, 1850 int grec_type) 1851 { 1852 struct net_bridge_group_src *ent; 1853 struct br_ip src_ip; 1854 u32 src_idx; 1855 1856 hlist_for_each_entry(ent, &pg->src_list, node) 1857 ent->flags |= BR_SGRP_F_DELETE; 1858 1859 memset(&src_ip, 0, sizeof(src_ip)); 1860 src_ip.proto = pg->key.addr.proto; 1861 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1862 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1863 ent = br_multicast_find_group_src(pg, &src_ip); 1864 if (ent) 1865 ent->flags &= ~BR_SGRP_F_DELETE; 1866 else 1867 ent = br_multicast_new_group_src(pg, &src_ip); 1868 if (ent) 1869 br_multicast_fwd_src_handle(ent); 1870 } 1871 1872 br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type); 1873 1874 __grp_src_delete_marked(pg); 1875 } 1876 1877 /* State Msg type New state Actions 1878 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 1879 * Delete (X-A) 1880 * Delete (Y-A) 1881 * Group Timer=GMI 1882 */ 1883 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg, void *h_addr, 1884 void *srcs, u32 nsrcs, size_t addr_size, 1885 int grec_type) 1886 { 1887 struct net_bridge *br = pg->key.port->br; 1888 struct net_bridge_group_src *ent; 1889 unsigned long now = jiffies; 1890 bool changed = false; 1891 struct br_ip src_ip; 1892 u32 src_idx; 1893 1894 hlist_for_each_entry(ent, &pg->src_list, node) 1895 ent->flags |= BR_SGRP_F_DELETE; 1896 1897 memset(&src_ip, 0, sizeof(src_ip)); 1898 src_ip.proto = pg->key.addr.proto; 1899 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1900 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1901 ent = br_multicast_find_group_src(pg, &src_ip); 1902 if (ent) { 1903 ent->flags &= ~BR_SGRP_F_DELETE; 1904 } else { 1905 ent = br_multicast_new_group_src(pg, &src_ip); 1906 if (ent) { 1907 __grp_src_mod_timer(ent, 1908 now + br_multicast_gmi(br)); 1909 changed = true; 1910 } 1911 } 1912 } 1913 1914 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 1915 changed = true; 1916 1917 if (__grp_src_delete_marked(pg)) 1918 changed = true; 1919 1920 return changed; 1921 } 1922 1923 static bool br_multicast_isexc(struct net_bridge_port_group *pg, void *h_addr, 1924 void *srcs, u32 nsrcs, size_t addr_size, 1925 int grec_type) 1926 { 1927 struct net_bridge *br = pg->key.port->br; 1928 bool changed = false; 1929 1930 switch (pg->filter_mode) { 1931 case MCAST_INCLUDE: 1932 __grp_src_isexc_incl(pg, h_addr, srcs, nsrcs, addr_size, 1933 grec_type); 1934 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 1935 changed = true; 1936 break; 1937 case MCAST_EXCLUDE: 1938 changed = __grp_src_isexc_excl(pg, h_addr, srcs, nsrcs, addr_size, 1939 grec_type); 1940 break; 1941 } 1942 1943 pg->filter_mode = MCAST_EXCLUDE; 1944 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 1945 1946 return changed; 1947 } 1948 1949 /* State Msg type New state Actions 1950 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 1951 * Send Q(G,A-B) 1952 */ 1953 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg, void *h_addr, 1954 void *srcs, u32 nsrcs, size_t addr_size, 1955 int grec_type) 1956 { 1957 struct net_bridge *br = pg->key.port->br; 1958 u32 src_idx, to_send = pg->src_ents; 1959 struct net_bridge_group_src *ent; 1960 unsigned long now = jiffies; 1961 bool changed = false; 1962 struct br_ip src_ip; 1963 1964 hlist_for_each_entry(ent, &pg->src_list, node) 1965 ent->flags |= BR_SGRP_F_SEND; 1966 1967 memset(&src_ip, 0, sizeof(src_ip)); 1968 src_ip.proto = pg->key.addr.proto; 1969 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1970 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1971 ent = br_multicast_find_group_src(pg, &src_ip); 1972 if (ent) { 1973 ent->flags &= ~BR_SGRP_F_SEND; 1974 to_send--; 1975 } else { 1976 ent = br_multicast_new_group_src(pg, &src_ip); 1977 if (ent) 1978 changed = true; 1979 } 1980 if (ent) 1981 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 1982 } 1983 1984 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 1985 changed = true; 1986 1987 if (to_send) 1988 __grp_src_query_marked_and_rexmit(pg); 1989 1990 return changed; 1991 } 1992 1993 /* State Msg type New state Actions 1994 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 1995 * Send Q(G,X-A) 1996 * Send Q(G) 1997 */ 1998 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg, void *h_addr, 1999 void *srcs, u32 nsrcs, size_t addr_size, 2000 int grec_type) 2001 { 2002 struct net_bridge *br = pg->key.port->br; 2003 u32 src_idx, to_send = pg->src_ents; 2004 struct net_bridge_group_src *ent; 2005 unsigned long now = jiffies; 2006 bool changed = false; 2007 struct br_ip src_ip; 2008 2009 hlist_for_each_entry(ent, &pg->src_list, node) 2010 if (timer_pending(&ent->timer)) 2011 ent->flags |= BR_SGRP_F_SEND; 2012 2013 memset(&src_ip, 0, sizeof(src_ip)); 2014 src_ip.proto = pg->key.addr.proto; 2015 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2016 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2017 ent = br_multicast_find_group_src(pg, &src_ip); 2018 if (ent) { 2019 if (timer_pending(&ent->timer)) { 2020 ent->flags &= ~BR_SGRP_F_SEND; 2021 to_send--; 2022 } 2023 } else { 2024 ent = br_multicast_new_group_src(pg, &src_ip); 2025 if (ent) 2026 changed = true; 2027 } 2028 if (ent) 2029 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 2030 } 2031 2032 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2033 changed = true; 2034 2035 if (to_send) 2036 __grp_src_query_marked_and_rexmit(pg); 2037 2038 __grp_send_query_and_rexmit(pg); 2039 2040 return changed; 2041 } 2042 2043 static bool br_multicast_toin(struct net_bridge_port_group *pg, void *h_addr, 2044 void *srcs, u32 nsrcs, size_t addr_size, 2045 int grec_type) 2046 { 2047 bool changed = false; 2048 2049 switch (pg->filter_mode) { 2050 case MCAST_INCLUDE: 2051 changed = __grp_src_toin_incl(pg, h_addr, srcs, nsrcs, addr_size, 2052 grec_type); 2053 break; 2054 case MCAST_EXCLUDE: 2055 changed = __grp_src_toin_excl(pg, h_addr, srcs, nsrcs, addr_size, 2056 grec_type); 2057 break; 2058 } 2059 2060 if (br_multicast_eht_should_del_pg(pg)) { 2061 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2062 br_multicast_find_del_pg(pg->key.port->br, pg); 2063 /* a notification has already been sent and we shouldn't 2064 * access pg after the delete so we have to return false 2065 */ 2066 changed = false; 2067 } 2068 2069 return changed; 2070 } 2071 2072 /* State Msg type New state Actions 2073 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2074 * Delete (A-B) 2075 * Send Q(G,A*B) 2076 * Group Timer=GMI 2077 */ 2078 static void __grp_src_toex_incl(struct net_bridge_port_group *pg, void *h_addr, 2079 void *srcs, u32 nsrcs, size_t addr_size, 2080 int grec_type) 2081 { 2082 struct net_bridge_group_src *ent; 2083 u32 src_idx, to_send = 0; 2084 struct br_ip src_ip; 2085 2086 hlist_for_each_entry(ent, &pg->src_list, node) 2087 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2088 2089 memset(&src_ip, 0, sizeof(src_ip)); 2090 src_ip.proto = pg->key.addr.proto; 2091 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2092 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2093 ent = br_multicast_find_group_src(pg, &src_ip); 2094 if (ent) { 2095 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2096 BR_SGRP_F_SEND; 2097 to_send++; 2098 } else { 2099 ent = br_multicast_new_group_src(pg, &src_ip); 2100 } 2101 if (ent) 2102 br_multicast_fwd_src_handle(ent); 2103 } 2104 2105 br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type); 2106 2107 __grp_src_delete_marked(pg); 2108 if (to_send) 2109 __grp_src_query_marked_and_rexmit(pg); 2110 } 2111 2112 /* State Msg type New state Actions 2113 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2114 * Delete (X-A) 2115 * Delete (Y-A) 2116 * Send Q(G,A-Y) 2117 * Group Timer=GMI 2118 */ 2119 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg, void *h_addr, 2120 void *srcs, u32 nsrcs, size_t addr_size, 2121 int grec_type) 2122 { 2123 struct net_bridge_group_src *ent; 2124 u32 src_idx, to_send = 0; 2125 bool changed = false; 2126 struct br_ip src_ip; 2127 2128 hlist_for_each_entry(ent, &pg->src_list, node) 2129 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2130 2131 memset(&src_ip, 0, sizeof(src_ip)); 2132 src_ip.proto = pg->key.addr.proto; 2133 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2134 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2135 ent = br_multicast_find_group_src(pg, &src_ip); 2136 if (ent) { 2137 ent->flags &= ~BR_SGRP_F_DELETE; 2138 } else { 2139 ent = br_multicast_new_group_src(pg, &src_ip); 2140 if (ent) { 2141 __grp_src_mod_timer(ent, pg->timer.expires); 2142 changed = true; 2143 } 2144 } 2145 if (ent && timer_pending(&ent->timer)) { 2146 ent->flags |= BR_SGRP_F_SEND; 2147 to_send++; 2148 } 2149 } 2150 2151 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2152 changed = true; 2153 2154 if (__grp_src_delete_marked(pg)) 2155 changed = true; 2156 if (to_send) 2157 __grp_src_query_marked_and_rexmit(pg); 2158 2159 return changed; 2160 } 2161 2162 static bool br_multicast_toex(struct net_bridge_port_group *pg, void *h_addr, 2163 void *srcs, u32 nsrcs, size_t addr_size, 2164 int grec_type) 2165 { 2166 struct net_bridge *br = pg->key.port->br; 2167 bool changed = false; 2168 2169 switch (pg->filter_mode) { 2170 case MCAST_INCLUDE: 2171 __grp_src_toex_incl(pg, h_addr, srcs, nsrcs, addr_size, 2172 grec_type); 2173 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2174 changed = true; 2175 break; 2176 case MCAST_EXCLUDE: 2177 changed = __grp_src_toex_excl(pg, h_addr, srcs, nsrcs, addr_size, 2178 grec_type); 2179 break; 2180 } 2181 2182 pg->filter_mode = MCAST_EXCLUDE; 2183 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 2184 2185 return changed; 2186 } 2187 2188 /* State Msg type New state Actions 2189 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2190 */ 2191 static bool __grp_src_block_incl(struct net_bridge_port_group *pg, void *h_addr, 2192 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2193 { 2194 struct net_bridge_group_src *ent; 2195 u32 src_idx, to_send = 0; 2196 bool changed = false; 2197 struct br_ip src_ip; 2198 2199 hlist_for_each_entry(ent, &pg->src_list, node) 2200 ent->flags &= ~BR_SGRP_F_SEND; 2201 2202 memset(&src_ip, 0, sizeof(src_ip)); 2203 src_ip.proto = pg->key.addr.proto; 2204 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2205 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2206 ent = br_multicast_find_group_src(pg, &src_ip); 2207 if (ent) { 2208 ent->flags |= BR_SGRP_F_SEND; 2209 to_send++; 2210 } 2211 } 2212 2213 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2214 changed = true; 2215 2216 if (to_send) 2217 __grp_src_query_marked_and_rexmit(pg); 2218 2219 return changed; 2220 } 2221 2222 /* State Msg type New state Actions 2223 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2224 * Send Q(G,A-Y) 2225 */ 2226 static bool __grp_src_block_excl(struct net_bridge_port_group *pg, void *h_addr, 2227 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2228 { 2229 struct net_bridge_group_src *ent; 2230 u32 src_idx, to_send = 0; 2231 bool changed = false; 2232 struct br_ip src_ip; 2233 2234 hlist_for_each_entry(ent, &pg->src_list, node) 2235 ent->flags &= ~BR_SGRP_F_SEND; 2236 2237 memset(&src_ip, 0, sizeof(src_ip)); 2238 src_ip.proto = pg->key.addr.proto; 2239 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2240 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2241 ent = br_multicast_find_group_src(pg, &src_ip); 2242 if (!ent) { 2243 ent = br_multicast_new_group_src(pg, &src_ip); 2244 if (ent) { 2245 __grp_src_mod_timer(ent, pg->timer.expires); 2246 changed = true; 2247 } 2248 } 2249 if (ent && timer_pending(&ent->timer)) { 2250 ent->flags |= BR_SGRP_F_SEND; 2251 to_send++; 2252 } 2253 } 2254 2255 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2256 changed = true; 2257 2258 if (to_send) 2259 __grp_src_query_marked_and_rexmit(pg); 2260 2261 return changed; 2262 } 2263 2264 static bool br_multicast_block(struct net_bridge_port_group *pg, void *h_addr, 2265 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2266 { 2267 bool changed = false; 2268 2269 switch (pg->filter_mode) { 2270 case MCAST_INCLUDE: 2271 changed = __grp_src_block_incl(pg, h_addr, srcs, nsrcs, addr_size, 2272 grec_type); 2273 break; 2274 case MCAST_EXCLUDE: 2275 changed = __grp_src_block_excl(pg, h_addr, srcs, nsrcs, addr_size, 2276 grec_type); 2277 break; 2278 } 2279 2280 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2281 br_multicast_eht_should_del_pg(pg)) { 2282 if (br_multicast_eht_should_del_pg(pg)) 2283 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2284 br_multicast_find_del_pg(pg->key.port->br, pg); 2285 /* a notification has already been sent and we shouldn't 2286 * access pg after the delete so we have to return false 2287 */ 2288 changed = false; 2289 } 2290 2291 return changed; 2292 } 2293 2294 static struct net_bridge_port_group * 2295 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2296 struct net_bridge_port *p, 2297 const unsigned char *src) 2298 { 2299 struct net_bridge *br __maybe_unused = mp->br; 2300 struct net_bridge_port_group *pg; 2301 2302 for (pg = mlock_dereference(mp->ports, br); 2303 pg; 2304 pg = mlock_dereference(pg->next, br)) 2305 if (br_port_group_equal(pg, p, src)) 2306 return pg; 2307 2308 return NULL; 2309 } 2310 2311 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 2312 struct net_bridge_port *port, 2313 struct sk_buff *skb, 2314 u16 vid) 2315 { 2316 bool igmpv2 = br->multicast_igmp_version == 2; 2317 struct net_bridge_mdb_entry *mdst; 2318 struct net_bridge_port_group *pg; 2319 const unsigned char *src; 2320 struct igmpv3_report *ih; 2321 struct igmpv3_grec *grec; 2322 int i, len, num, type; 2323 __be32 group, *h_addr; 2324 bool changed = false; 2325 int err = 0; 2326 u16 nsrcs; 2327 2328 ih = igmpv3_report_hdr(skb); 2329 num = ntohs(ih->ngrec); 2330 len = skb_transport_offset(skb) + sizeof(*ih); 2331 2332 for (i = 0; i < num; i++) { 2333 len += sizeof(*grec); 2334 if (!ip_mc_may_pull(skb, len)) 2335 return -EINVAL; 2336 2337 grec = (void *)(skb->data + len - sizeof(*grec)); 2338 group = grec->grec_mca; 2339 type = grec->grec_type; 2340 nsrcs = ntohs(grec->grec_nsrcs); 2341 2342 len += nsrcs * 4; 2343 if (!ip_mc_may_pull(skb, len)) 2344 return -EINVAL; 2345 2346 switch (type) { 2347 case IGMPV3_MODE_IS_INCLUDE: 2348 case IGMPV3_MODE_IS_EXCLUDE: 2349 case IGMPV3_CHANGE_TO_INCLUDE: 2350 case IGMPV3_CHANGE_TO_EXCLUDE: 2351 case IGMPV3_ALLOW_NEW_SOURCES: 2352 case IGMPV3_BLOCK_OLD_SOURCES: 2353 break; 2354 2355 default: 2356 continue; 2357 } 2358 2359 src = eth_hdr(skb)->h_source; 2360 if (nsrcs == 0 && 2361 (type == IGMPV3_CHANGE_TO_INCLUDE || 2362 type == IGMPV3_MODE_IS_INCLUDE)) { 2363 if (!port || igmpv2) { 2364 br_ip4_multicast_leave_group(br, port, group, vid, src); 2365 continue; 2366 } 2367 } else { 2368 err = br_ip4_multicast_add_group(br, port, group, vid, 2369 src, igmpv2); 2370 if (err) 2371 break; 2372 } 2373 2374 if (!port || igmpv2) 2375 continue; 2376 2377 spin_lock_bh(&br->multicast_lock); 2378 mdst = br_mdb_ip4_get(br, group, vid); 2379 if (!mdst) 2380 goto unlock_continue; 2381 pg = br_multicast_find_port(mdst, port, src); 2382 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2383 goto unlock_continue; 2384 /* reload grec and host addr */ 2385 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2386 h_addr = &ip_hdr(skb)->saddr; 2387 switch (type) { 2388 case IGMPV3_ALLOW_NEW_SOURCES: 2389 changed = br_multicast_isinc_allow(pg, h_addr, grec->grec_src, 2390 nsrcs, sizeof(__be32), type); 2391 break; 2392 case IGMPV3_MODE_IS_INCLUDE: 2393 changed = br_multicast_isinc_allow(pg, h_addr, grec->grec_src, 2394 nsrcs, sizeof(__be32), type); 2395 break; 2396 case IGMPV3_MODE_IS_EXCLUDE: 2397 changed = br_multicast_isexc(pg, h_addr, grec->grec_src, 2398 nsrcs, sizeof(__be32), type); 2399 break; 2400 case IGMPV3_CHANGE_TO_INCLUDE: 2401 changed = br_multicast_toin(pg, h_addr, grec->grec_src, 2402 nsrcs, sizeof(__be32), type); 2403 break; 2404 case IGMPV3_CHANGE_TO_EXCLUDE: 2405 changed = br_multicast_toex(pg, h_addr, grec->grec_src, 2406 nsrcs, sizeof(__be32), type); 2407 break; 2408 case IGMPV3_BLOCK_OLD_SOURCES: 2409 changed = br_multicast_block(pg, h_addr, grec->grec_src, 2410 nsrcs, sizeof(__be32), type); 2411 break; 2412 } 2413 if (changed) 2414 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 2415 unlock_continue: 2416 spin_unlock_bh(&br->multicast_lock); 2417 } 2418 2419 return err; 2420 } 2421 2422 #if IS_ENABLED(CONFIG_IPV6) 2423 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 2424 struct net_bridge_port *port, 2425 struct sk_buff *skb, 2426 u16 vid) 2427 { 2428 bool mldv1 = br->multicast_mld_version == 1; 2429 struct net_bridge_mdb_entry *mdst; 2430 struct net_bridge_port_group *pg; 2431 unsigned int nsrcs_offset; 2432 const unsigned char *src; 2433 struct icmp6hdr *icmp6h; 2434 struct in6_addr *h_addr; 2435 struct mld2_grec *grec; 2436 unsigned int grec_len; 2437 bool changed = false; 2438 int i, len, num; 2439 int err = 0; 2440 2441 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h))) 2442 return -EINVAL; 2443 2444 icmp6h = icmp6_hdr(skb); 2445 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 2446 len = skb_transport_offset(skb) + sizeof(*icmp6h); 2447 2448 for (i = 0; i < num; i++) { 2449 __be16 *_nsrcs, __nsrcs; 2450 u16 nsrcs; 2451 2452 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2453 2454 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2455 nsrcs_offset + sizeof(__nsrcs)) 2456 return -EINVAL; 2457 2458 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2459 sizeof(__nsrcs), &__nsrcs); 2460 if (!_nsrcs) 2461 return -EINVAL; 2462 2463 nsrcs = ntohs(*_nsrcs); 2464 grec_len = struct_size(grec, grec_src, nsrcs); 2465 2466 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2467 return -EINVAL; 2468 2469 grec = (struct mld2_grec *)(skb->data + len); 2470 len += grec_len; 2471 2472 switch (grec->grec_type) { 2473 case MLD2_MODE_IS_INCLUDE: 2474 case MLD2_MODE_IS_EXCLUDE: 2475 case MLD2_CHANGE_TO_INCLUDE: 2476 case MLD2_CHANGE_TO_EXCLUDE: 2477 case MLD2_ALLOW_NEW_SOURCES: 2478 case MLD2_BLOCK_OLD_SOURCES: 2479 break; 2480 2481 default: 2482 continue; 2483 } 2484 2485 src = eth_hdr(skb)->h_source; 2486 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2487 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2488 nsrcs == 0) { 2489 if (!port || mldv1) { 2490 br_ip6_multicast_leave_group(br, port, 2491 &grec->grec_mca, 2492 vid, src); 2493 continue; 2494 } 2495 } else { 2496 err = br_ip6_multicast_add_group(br, port, 2497 &grec->grec_mca, vid, 2498 src, mldv1); 2499 if (err) 2500 break; 2501 } 2502 2503 if (!port || mldv1) 2504 continue; 2505 2506 spin_lock_bh(&br->multicast_lock); 2507 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid); 2508 if (!mdst) 2509 goto unlock_continue; 2510 pg = br_multicast_find_port(mdst, port, src); 2511 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2512 goto unlock_continue; 2513 h_addr = &ipv6_hdr(skb)->saddr; 2514 switch (grec->grec_type) { 2515 case MLD2_ALLOW_NEW_SOURCES: 2516 changed = br_multicast_isinc_allow(pg, h_addr, 2517 grec->grec_src, nsrcs, 2518 sizeof(struct in6_addr), 2519 grec->grec_type); 2520 break; 2521 case MLD2_MODE_IS_INCLUDE: 2522 changed = br_multicast_isinc_allow(pg, h_addr, 2523 grec->grec_src, nsrcs, 2524 sizeof(struct in6_addr), 2525 grec->grec_type); 2526 break; 2527 case MLD2_MODE_IS_EXCLUDE: 2528 changed = br_multicast_isexc(pg, h_addr, 2529 grec->grec_src, nsrcs, 2530 sizeof(struct in6_addr), 2531 grec->grec_type); 2532 break; 2533 case MLD2_CHANGE_TO_INCLUDE: 2534 changed = br_multicast_toin(pg, h_addr, 2535 grec->grec_src, nsrcs, 2536 sizeof(struct in6_addr), 2537 grec->grec_type); 2538 break; 2539 case MLD2_CHANGE_TO_EXCLUDE: 2540 changed = br_multicast_toex(pg, h_addr, 2541 grec->grec_src, nsrcs, 2542 sizeof(struct in6_addr), 2543 grec->grec_type); 2544 break; 2545 case MLD2_BLOCK_OLD_SOURCES: 2546 changed = br_multicast_block(pg, h_addr, 2547 grec->grec_src, nsrcs, 2548 sizeof(struct in6_addr), 2549 grec->grec_type); 2550 break; 2551 } 2552 if (changed) 2553 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 2554 unlock_continue: 2555 spin_unlock_bh(&br->multicast_lock); 2556 } 2557 2558 return err; 2559 } 2560 #endif 2561 2562 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 2563 struct net_bridge_port *port, 2564 __be32 saddr) 2565 { 2566 if (!timer_pending(&br->ip4_own_query.timer) && 2567 !timer_pending(&br->ip4_other_query.timer)) 2568 goto update; 2569 2570 if (!br->ip4_querier.addr.src.ip4) 2571 goto update; 2572 2573 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.src.ip4)) 2574 goto update; 2575 2576 return false; 2577 2578 update: 2579 br->ip4_querier.addr.src.ip4 = saddr; 2580 2581 /* update protected by general multicast_lock by caller */ 2582 rcu_assign_pointer(br->ip4_querier.port, port); 2583 2584 return true; 2585 } 2586 2587 #if IS_ENABLED(CONFIG_IPV6) 2588 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 2589 struct net_bridge_port *port, 2590 struct in6_addr *saddr) 2591 { 2592 if (!timer_pending(&br->ip6_own_query.timer) && 2593 !timer_pending(&br->ip6_other_query.timer)) 2594 goto update; 2595 2596 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.src.ip6) <= 0) 2597 goto update; 2598 2599 return false; 2600 2601 update: 2602 br->ip6_querier.addr.src.ip6 = *saddr; 2603 2604 /* update protected by general multicast_lock by caller */ 2605 rcu_assign_pointer(br->ip6_querier.port, port); 2606 2607 return true; 2608 } 2609 #endif 2610 2611 static bool br_multicast_select_querier(struct net_bridge *br, 2612 struct net_bridge_port *port, 2613 struct br_ip *saddr) 2614 { 2615 switch (saddr->proto) { 2616 case htons(ETH_P_IP): 2617 return br_ip4_multicast_select_querier(br, port, saddr->src.ip4); 2618 #if IS_ENABLED(CONFIG_IPV6) 2619 case htons(ETH_P_IPV6): 2620 return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6); 2621 #endif 2622 } 2623 2624 return false; 2625 } 2626 2627 static void 2628 br_multicast_update_query_timer(struct net_bridge *br, 2629 struct bridge_mcast_other_query *query, 2630 unsigned long max_delay) 2631 { 2632 if (!timer_pending(&query->timer)) 2633 query->delay_time = jiffies + max_delay; 2634 2635 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 2636 } 2637 2638 static void br_port_mc_router_state_change(struct net_bridge_port *p, 2639 bool is_mc_router) 2640 { 2641 struct switchdev_attr attr = { 2642 .orig_dev = p->dev, 2643 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 2644 .flags = SWITCHDEV_F_DEFER, 2645 .u.mrouter = is_mc_router, 2646 }; 2647 2648 switchdev_port_attr_set(p->dev, &attr, NULL); 2649 } 2650 2651 /* 2652 * Add port to router_list 2653 * list is maintained ordered by pointer value 2654 * and locked by br->multicast_lock and RCU 2655 */ 2656 static void br_multicast_add_router(struct net_bridge *br, 2657 struct net_bridge_port *port) 2658 { 2659 struct net_bridge_port *p; 2660 struct hlist_node *slot = NULL; 2661 2662 if (!hlist_unhashed(&port->rlist)) 2663 return; 2664 2665 hlist_for_each_entry(p, &br->router_list, rlist) { 2666 if ((unsigned long) port >= (unsigned long) p) 2667 break; 2668 slot = &p->rlist; 2669 } 2670 2671 if (slot) 2672 hlist_add_behind_rcu(&port->rlist, slot); 2673 else 2674 hlist_add_head_rcu(&port->rlist, &br->router_list); 2675 br_rtr_notify(br->dev, port, RTM_NEWMDB); 2676 br_port_mc_router_state_change(port, true); 2677 } 2678 2679 static void br_multicast_mark_router(struct net_bridge *br, 2680 struct net_bridge_port *port) 2681 { 2682 unsigned long now = jiffies; 2683 2684 if (!port) { 2685 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 2686 if (!timer_pending(&br->multicast_router_timer)) 2687 br_mc_router_state_change(br, true); 2688 mod_timer(&br->multicast_router_timer, 2689 now + br->multicast_querier_interval); 2690 } 2691 return; 2692 } 2693 2694 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 2695 port->multicast_router == MDB_RTR_TYPE_PERM) 2696 return; 2697 2698 br_multicast_add_router(br, port); 2699 2700 mod_timer(&port->multicast_router_timer, 2701 now + br->multicast_querier_interval); 2702 } 2703 2704 static void br_multicast_query_received(struct net_bridge *br, 2705 struct net_bridge_port *port, 2706 struct bridge_mcast_other_query *query, 2707 struct br_ip *saddr, 2708 unsigned long max_delay) 2709 { 2710 if (!br_multicast_select_querier(br, port, saddr)) 2711 return; 2712 2713 br_multicast_update_query_timer(br, query, max_delay); 2714 br_multicast_mark_router(br, port); 2715 } 2716 2717 static void br_ip4_multicast_query(struct net_bridge *br, 2718 struct net_bridge_port *port, 2719 struct sk_buff *skb, 2720 u16 vid) 2721 { 2722 unsigned int transport_len = ip_transport_len(skb); 2723 const struct iphdr *iph = ip_hdr(skb); 2724 struct igmphdr *ih = igmp_hdr(skb); 2725 struct net_bridge_mdb_entry *mp; 2726 struct igmpv3_query *ih3; 2727 struct net_bridge_port_group *p; 2728 struct net_bridge_port_group __rcu **pp; 2729 struct br_ip saddr; 2730 unsigned long max_delay; 2731 unsigned long now = jiffies; 2732 __be32 group; 2733 2734 spin_lock(&br->multicast_lock); 2735 if (!netif_running(br->dev) || 2736 (port && port->state == BR_STATE_DISABLED)) 2737 goto out; 2738 2739 group = ih->group; 2740 2741 if (transport_len == sizeof(*ih)) { 2742 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 2743 2744 if (!max_delay) { 2745 max_delay = 10 * HZ; 2746 group = 0; 2747 } 2748 } else if (transport_len >= sizeof(*ih3)) { 2749 ih3 = igmpv3_query_hdr(skb); 2750 if (ih3->nsrcs || 2751 (br->multicast_igmp_version == 3 && group && ih3->suppress)) 2752 goto out; 2753 2754 max_delay = ih3->code ? 2755 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 2756 } else { 2757 goto out; 2758 } 2759 2760 if (!group) { 2761 saddr.proto = htons(ETH_P_IP); 2762 saddr.src.ip4 = iph->saddr; 2763 2764 br_multicast_query_received(br, port, &br->ip4_other_query, 2765 &saddr, max_delay); 2766 goto out; 2767 } 2768 2769 mp = br_mdb_ip4_get(br, group, vid); 2770 if (!mp) 2771 goto out; 2772 2773 max_delay *= br->multicast_last_member_count; 2774 2775 if (mp->host_joined && 2776 (timer_pending(&mp->timer) ? 2777 time_after(mp->timer.expires, now + max_delay) : 2778 try_to_del_timer_sync(&mp->timer) >= 0)) 2779 mod_timer(&mp->timer, now + max_delay); 2780 2781 for (pp = &mp->ports; 2782 (p = mlock_dereference(*pp, br)) != NULL; 2783 pp = &p->next) { 2784 if (timer_pending(&p->timer) ? 2785 time_after(p->timer.expires, now + max_delay) : 2786 try_to_del_timer_sync(&p->timer) >= 0 && 2787 (br->multicast_igmp_version == 2 || 2788 p->filter_mode == MCAST_EXCLUDE)) 2789 mod_timer(&p->timer, now + max_delay); 2790 } 2791 2792 out: 2793 spin_unlock(&br->multicast_lock); 2794 } 2795 2796 #if IS_ENABLED(CONFIG_IPV6) 2797 static int br_ip6_multicast_query(struct net_bridge *br, 2798 struct net_bridge_port *port, 2799 struct sk_buff *skb, 2800 u16 vid) 2801 { 2802 unsigned int transport_len = ipv6_transport_len(skb); 2803 struct mld_msg *mld; 2804 struct net_bridge_mdb_entry *mp; 2805 struct mld2_query *mld2q; 2806 struct net_bridge_port_group *p; 2807 struct net_bridge_port_group __rcu **pp; 2808 struct br_ip saddr; 2809 unsigned long max_delay; 2810 unsigned long now = jiffies; 2811 unsigned int offset = skb_transport_offset(skb); 2812 const struct in6_addr *group = NULL; 2813 bool is_general_query; 2814 int err = 0; 2815 2816 spin_lock(&br->multicast_lock); 2817 if (!netif_running(br->dev) || 2818 (port && port->state == BR_STATE_DISABLED)) 2819 goto out; 2820 2821 if (transport_len == sizeof(*mld)) { 2822 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 2823 err = -EINVAL; 2824 goto out; 2825 } 2826 mld = (struct mld_msg *) icmp6_hdr(skb); 2827 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 2828 if (max_delay) 2829 group = &mld->mld_mca; 2830 } else { 2831 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 2832 err = -EINVAL; 2833 goto out; 2834 } 2835 mld2q = (struct mld2_query *)icmp6_hdr(skb); 2836 if (!mld2q->mld2q_nsrcs) 2837 group = &mld2q->mld2q_mca; 2838 if (br->multicast_mld_version == 2 && 2839 !ipv6_addr_any(&mld2q->mld2q_mca) && 2840 mld2q->mld2q_suppress) 2841 goto out; 2842 2843 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 2844 } 2845 2846 is_general_query = group && ipv6_addr_any(group); 2847 2848 if (is_general_query) { 2849 saddr.proto = htons(ETH_P_IPV6); 2850 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 2851 2852 br_multicast_query_received(br, port, &br->ip6_other_query, 2853 &saddr, max_delay); 2854 goto out; 2855 } else if (!group) { 2856 goto out; 2857 } 2858 2859 mp = br_mdb_ip6_get(br, group, vid); 2860 if (!mp) 2861 goto out; 2862 2863 max_delay *= br->multicast_last_member_count; 2864 if (mp->host_joined && 2865 (timer_pending(&mp->timer) ? 2866 time_after(mp->timer.expires, now + max_delay) : 2867 try_to_del_timer_sync(&mp->timer) >= 0)) 2868 mod_timer(&mp->timer, now + max_delay); 2869 2870 for (pp = &mp->ports; 2871 (p = mlock_dereference(*pp, br)) != NULL; 2872 pp = &p->next) { 2873 if (timer_pending(&p->timer) ? 2874 time_after(p->timer.expires, now + max_delay) : 2875 try_to_del_timer_sync(&p->timer) >= 0 && 2876 (br->multicast_mld_version == 1 || 2877 p->filter_mode == MCAST_EXCLUDE)) 2878 mod_timer(&p->timer, now + max_delay); 2879 } 2880 2881 out: 2882 spin_unlock(&br->multicast_lock); 2883 return err; 2884 } 2885 #endif 2886 2887 static void 2888 br_multicast_leave_group(struct net_bridge *br, 2889 struct net_bridge_port *port, 2890 struct br_ip *group, 2891 struct bridge_mcast_other_query *other_query, 2892 struct bridge_mcast_own_query *own_query, 2893 const unsigned char *src) 2894 { 2895 struct net_bridge_mdb_entry *mp; 2896 struct net_bridge_port_group *p; 2897 unsigned long now; 2898 unsigned long time; 2899 2900 spin_lock(&br->multicast_lock); 2901 if (!netif_running(br->dev) || 2902 (port && port->state == BR_STATE_DISABLED)) 2903 goto out; 2904 2905 mp = br_mdb_ip_get(br, group); 2906 if (!mp) 2907 goto out; 2908 2909 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 2910 struct net_bridge_port_group __rcu **pp; 2911 2912 for (pp = &mp->ports; 2913 (p = mlock_dereference(*pp, br)) != NULL; 2914 pp = &p->next) { 2915 if (!br_port_group_equal(p, port, src)) 2916 continue; 2917 2918 if (p->flags & MDB_PG_FLAGS_PERMANENT) 2919 break; 2920 2921 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2922 br_multicast_del_pg(mp, p, pp); 2923 } 2924 goto out; 2925 } 2926 2927 if (timer_pending(&other_query->timer)) 2928 goto out; 2929 2930 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { 2931 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr, 2932 false, 0, NULL); 2933 2934 time = jiffies + br->multicast_last_member_count * 2935 br->multicast_last_member_interval; 2936 2937 mod_timer(&own_query->timer, time); 2938 2939 for (p = mlock_dereference(mp->ports, br); 2940 p != NULL; 2941 p = mlock_dereference(p->next, br)) { 2942 if (!br_port_group_equal(p, port, src)) 2943 continue; 2944 2945 if (!hlist_unhashed(&p->mglist) && 2946 (timer_pending(&p->timer) ? 2947 time_after(p->timer.expires, time) : 2948 try_to_del_timer_sync(&p->timer) >= 0)) { 2949 mod_timer(&p->timer, time); 2950 } 2951 2952 break; 2953 } 2954 } 2955 2956 now = jiffies; 2957 time = now + br->multicast_last_member_count * 2958 br->multicast_last_member_interval; 2959 2960 if (!port) { 2961 if (mp->host_joined && 2962 (timer_pending(&mp->timer) ? 2963 time_after(mp->timer.expires, time) : 2964 try_to_del_timer_sync(&mp->timer) >= 0)) { 2965 mod_timer(&mp->timer, time); 2966 } 2967 2968 goto out; 2969 } 2970 2971 for (p = mlock_dereference(mp->ports, br); 2972 p != NULL; 2973 p = mlock_dereference(p->next, br)) { 2974 if (p->key.port != port) 2975 continue; 2976 2977 if (!hlist_unhashed(&p->mglist) && 2978 (timer_pending(&p->timer) ? 2979 time_after(p->timer.expires, time) : 2980 try_to_del_timer_sync(&p->timer) >= 0)) { 2981 mod_timer(&p->timer, time); 2982 } 2983 2984 break; 2985 } 2986 out: 2987 spin_unlock(&br->multicast_lock); 2988 } 2989 2990 static void br_ip4_multicast_leave_group(struct net_bridge *br, 2991 struct net_bridge_port *port, 2992 __be32 group, 2993 __u16 vid, 2994 const unsigned char *src) 2995 { 2996 struct br_ip br_group; 2997 struct bridge_mcast_own_query *own_query; 2998 2999 if (ipv4_is_local_multicast(group)) 3000 return; 3001 3002 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 3003 3004 memset(&br_group, 0, sizeof(br_group)); 3005 br_group.dst.ip4 = group; 3006 br_group.proto = htons(ETH_P_IP); 3007 br_group.vid = vid; 3008 3009 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 3010 own_query, src); 3011 } 3012 3013 #if IS_ENABLED(CONFIG_IPV6) 3014 static void br_ip6_multicast_leave_group(struct net_bridge *br, 3015 struct net_bridge_port *port, 3016 const struct in6_addr *group, 3017 __u16 vid, 3018 const unsigned char *src) 3019 { 3020 struct br_ip br_group; 3021 struct bridge_mcast_own_query *own_query; 3022 3023 if (ipv6_addr_is_ll_all_nodes(group)) 3024 return; 3025 3026 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 3027 3028 memset(&br_group, 0, sizeof(br_group)); 3029 br_group.dst.ip6 = *group; 3030 br_group.proto = htons(ETH_P_IPV6); 3031 br_group.vid = vid; 3032 3033 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 3034 own_query, src); 3035 } 3036 #endif 3037 3038 static void br_multicast_err_count(const struct net_bridge *br, 3039 const struct net_bridge_port *p, 3040 __be16 proto) 3041 { 3042 struct bridge_mcast_stats __percpu *stats; 3043 struct bridge_mcast_stats *pstats; 3044 3045 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3046 return; 3047 3048 if (p) 3049 stats = p->mcast_stats; 3050 else 3051 stats = br->mcast_stats; 3052 if (WARN_ON(!stats)) 3053 return; 3054 3055 pstats = this_cpu_ptr(stats); 3056 3057 u64_stats_update_begin(&pstats->syncp); 3058 switch (proto) { 3059 case htons(ETH_P_IP): 3060 pstats->mstats.igmp_parse_errors++; 3061 break; 3062 #if IS_ENABLED(CONFIG_IPV6) 3063 case htons(ETH_P_IPV6): 3064 pstats->mstats.mld_parse_errors++; 3065 break; 3066 #endif 3067 } 3068 u64_stats_update_end(&pstats->syncp); 3069 } 3070 3071 static void br_multicast_pim(struct net_bridge *br, 3072 struct net_bridge_port *port, 3073 const struct sk_buff *skb) 3074 { 3075 unsigned int offset = skb_transport_offset(skb); 3076 struct pimhdr *pimhdr, _pimhdr; 3077 3078 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3079 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3080 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3081 return; 3082 3083 br_multicast_mark_router(br, port); 3084 } 3085 3086 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br, 3087 struct net_bridge_port *port, 3088 struct sk_buff *skb) 3089 { 3090 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3091 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3092 return -ENOMSG; 3093 3094 br_multicast_mark_router(br, port); 3095 3096 return 0; 3097 } 3098 3099 static int br_multicast_ipv4_rcv(struct net_bridge *br, 3100 struct net_bridge_port *port, 3101 struct sk_buff *skb, 3102 u16 vid) 3103 { 3104 const unsigned char *src; 3105 struct igmphdr *ih; 3106 int err; 3107 3108 err = ip_mc_check_igmp(skb); 3109 3110 if (err == -ENOMSG) { 3111 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3112 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3113 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3114 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3115 br_multicast_pim(br, port, skb); 3116 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3117 br_ip4_multicast_mrd_rcv(br, port, skb); 3118 } 3119 3120 return 0; 3121 } else if (err < 0) { 3122 br_multicast_err_count(br, port, skb->protocol); 3123 return err; 3124 } 3125 3126 ih = igmp_hdr(skb); 3127 src = eth_hdr(skb)->h_source; 3128 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3129 3130 switch (ih->type) { 3131 case IGMP_HOST_MEMBERSHIP_REPORT: 3132 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3133 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3134 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src, 3135 true); 3136 break; 3137 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3138 err = br_ip4_multicast_igmp3_report(br, port, skb, vid); 3139 break; 3140 case IGMP_HOST_MEMBERSHIP_QUERY: 3141 br_ip4_multicast_query(br, port, skb, vid); 3142 break; 3143 case IGMP_HOST_LEAVE_MESSAGE: 3144 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 3145 break; 3146 } 3147 3148 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 3149 BR_MCAST_DIR_RX); 3150 3151 return err; 3152 } 3153 3154 #if IS_ENABLED(CONFIG_IPV6) 3155 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br, 3156 struct net_bridge_port *port, 3157 struct sk_buff *skb) 3158 { 3159 int ret; 3160 3161 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) 3162 return -ENOMSG; 3163 3164 ret = ipv6_mc_check_icmpv6(skb); 3165 if (ret < 0) 3166 return ret; 3167 3168 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3169 return -ENOMSG; 3170 3171 br_multicast_mark_router(br, port); 3172 3173 return 0; 3174 } 3175 3176 static int br_multicast_ipv6_rcv(struct net_bridge *br, 3177 struct net_bridge_port *port, 3178 struct sk_buff *skb, 3179 u16 vid) 3180 { 3181 const unsigned char *src; 3182 struct mld_msg *mld; 3183 int err; 3184 3185 err = ipv6_mc_check_mld(skb); 3186 3187 if (err == -ENOMSG) { 3188 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3189 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3190 3191 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) { 3192 err = br_ip6_multicast_mrd_rcv(br, port, skb); 3193 3194 if (err < 0 && err != -ENOMSG) { 3195 br_multicast_err_count(br, port, skb->protocol); 3196 return err; 3197 } 3198 } 3199 3200 return 0; 3201 } else if (err < 0) { 3202 br_multicast_err_count(br, port, skb->protocol); 3203 return err; 3204 } 3205 3206 mld = (struct mld_msg *)skb_transport_header(skb); 3207 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3208 3209 switch (mld->mld_type) { 3210 case ICMPV6_MGM_REPORT: 3211 src = eth_hdr(skb)->h_source; 3212 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3213 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 3214 src, true); 3215 break; 3216 case ICMPV6_MLD2_REPORT: 3217 err = br_ip6_multicast_mld2_report(br, port, skb, vid); 3218 break; 3219 case ICMPV6_MGM_QUERY: 3220 err = br_ip6_multicast_query(br, port, skb, vid); 3221 break; 3222 case ICMPV6_MGM_REDUCTION: 3223 src = eth_hdr(skb)->h_source; 3224 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 3225 break; 3226 } 3227 3228 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 3229 BR_MCAST_DIR_RX); 3230 3231 return err; 3232 } 3233 #endif 3234 3235 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 3236 struct sk_buff *skb, u16 vid) 3237 { 3238 int ret = 0; 3239 3240 BR_INPUT_SKB_CB(skb)->igmp = 0; 3241 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3242 3243 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3244 return 0; 3245 3246 switch (skb->protocol) { 3247 case htons(ETH_P_IP): 3248 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 3249 break; 3250 #if IS_ENABLED(CONFIG_IPV6) 3251 case htons(ETH_P_IPV6): 3252 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 3253 break; 3254 #endif 3255 } 3256 3257 return ret; 3258 } 3259 3260 static void br_multicast_query_expired(struct net_bridge *br, 3261 struct bridge_mcast_own_query *query, 3262 struct bridge_mcast_querier *querier) 3263 { 3264 spin_lock(&br->multicast_lock); 3265 if (query->startup_sent < br->multicast_startup_query_count) 3266 query->startup_sent++; 3267 3268 RCU_INIT_POINTER(querier->port, NULL); 3269 br_multicast_send_query(br, NULL, query); 3270 spin_unlock(&br->multicast_lock); 3271 } 3272 3273 static void br_ip4_multicast_query_expired(struct timer_list *t) 3274 { 3275 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer); 3276 3277 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 3278 } 3279 3280 #if IS_ENABLED(CONFIG_IPV6) 3281 static void br_ip6_multicast_query_expired(struct timer_list *t) 3282 { 3283 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer); 3284 3285 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 3286 } 3287 #endif 3288 3289 static void br_multicast_gc_work(struct work_struct *work) 3290 { 3291 struct net_bridge *br = container_of(work, struct net_bridge, 3292 mcast_gc_work); 3293 HLIST_HEAD(deleted_head); 3294 3295 spin_lock_bh(&br->multicast_lock); 3296 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3297 spin_unlock_bh(&br->multicast_lock); 3298 3299 br_multicast_gc(&deleted_head); 3300 } 3301 3302 void br_multicast_init(struct net_bridge *br) 3303 { 3304 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 3305 3306 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3307 br->multicast_last_member_count = 2; 3308 br->multicast_startup_query_count = 2; 3309 3310 br->multicast_last_member_interval = HZ; 3311 br->multicast_query_response_interval = 10 * HZ; 3312 br->multicast_startup_query_interval = 125 * HZ / 4; 3313 br->multicast_query_interval = 125 * HZ; 3314 br->multicast_querier_interval = 255 * HZ; 3315 br->multicast_membership_interval = 260 * HZ; 3316 3317 br->ip4_other_query.delay_time = 0; 3318 br->ip4_querier.port = NULL; 3319 br->multicast_igmp_version = 2; 3320 #if IS_ENABLED(CONFIG_IPV6) 3321 br->multicast_mld_version = 1; 3322 br->ip6_other_query.delay_time = 0; 3323 br->ip6_querier.port = NULL; 3324 #endif 3325 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 3326 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 3327 3328 spin_lock_init(&br->multicast_lock); 3329 timer_setup(&br->multicast_router_timer, 3330 br_multicast_local_router_expired, 0); 3331 timer_setup(&br->ip4_other_query.timer, 3332 br_ip4_multicast_querier_expired, 0); 3333 timer_setup(&br->ip4_own_query.timer, 3334 br_ip4_multicast_query_expired, 0); 3335 #if IS_ENABLED(CONFIG_IPV6) 3336 timer_setup(&br->ip6_other_query.timer, 3337 br_ip6_multicast_querier_expired, 0); 3338 timer_setup(&br->ip6_own_query.timer, 3339 br_ip6_multicast_query_expired, 0); 3340 #endif 3341 INIT_HLIST_HEAD(&br->mdb_list); 3342 INIT_HLIST_HEAD(&br->mcast_gc_list); 3343 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 3344 } 3345 3346 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 3347 { 3348 struct in_device *in_dev = in_dev_get(br->dev); 3349 3350 if (!in_dev) 3351 return; 3352 3353 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3354 in_dev_put(in_dev); 3355 } 3356 3357 #if IS_ENABLED(CONFIG_IPV6) 3358 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3359 { 3360 struct in6_addr addr; 3361 3362 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3363 ipv6_dev_mc_inc(br->dev, &addr); 3364 } 3365 #else 3366 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3367 { 3368 } 3369 #endif 3370 3371 void br_multicast_join_snoopers(struct net_bridge *br) 3372 { 3373 br_ip4_multicast_join_snoopers(br); 3374 br_ip6_multicast_join_snoopers(br); 3375 } 3376 3377 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 3378 { 3379 struct in_device *in_dev = in_dev_get(br->dev); 3380 3381 if (WARN_ON(!in_dev)) 3382 return; 3383 3384 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3385 in_dev_put(in_dev); 3386 } 3387 3388 #if IS_ENABLED(CONFIG_IPV6) 3389 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3390 { 3391 struct in6_addr addr; 3392 3393 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3394 ipv6_dev_mc_dec(br->dev, &addr); 3395 } 3396 #else 3397 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3398 { 3399 } 3400 #endif 3401 3402 void br_multicast_leave_snoopers(struct net_bridge *br) 3403 { 3404 br_ip4_multicast_leave_snoopers(br); 3405 br_ip6_multicast_leave_snoopers(br); 3406 } 3407 3408 static void __br_multicast_open(struct net_bridge *br, 3409 struct bridge_mcast_own_query *query) 3410 { 3411 query->startup_sent = 0; 3412 3413 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3414 return; 3415 3416 mod_timer(&query->timer, jiffies); 3417 } 3418 3419 void br_multicast_open(struct net_bridge *br) 3420 { 3421 __br_multicast_open(br, &br->ip4_own_query); 3422 #if IS_ENABLED(CONFIG_IPV6) 3423 __br_multicast_open(br, &br->ip6_own_query); 3424 #endif 3425 } 3426 3427 void br_multicast_stop(struct net_bridge *br) 3428 { 3429 del_timer_sync(&br->multicast_router_timer); 3430 del_timer_sync(&br->ip4_other_query.timer); 3431 del_timer_sync(&br->ip4_own_query.timer); 3432 #if IS_ENABLED(CONFIG_IPV6) 3433 del_timer_sync(&br->ip6_other_query.timer); 3434 del_timer_sync(&br->ip6_own_query.timer); 3435 #endif 3436 } 3437 3438 void br_multicast_dev_del(struct net_bridge *br) 3439 { 3440 struct net_bridge_mdb_entry *mp; 3441 HLIST_HEAD(deleted_head); 3442 struct hlist_node *tmp; 3443 3444 spin_lock_bh(&br->multicast_lock); 3445 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 3446 br_multicast_del_mdb_entry(mp); 3447 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3448 spin_unlock_bh(&br->multicast_lock); 3449 3450 br_multicast_gc(&deleted_head); 3451 cancel_work_sync(&br->mcast_gc_work); 3452 3453 rcu_barrier(); 3454 } 3455 3456 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 3457 { 3458 int err = -EINVAL; 3459 3460 spin_lock_bh(&br->multicast_lock); 3461 3462 switch (val) { 3463 case MDB_RTR_TYPE_DISABLED: 3464 case MDB_RTR_TYPE_PERM: 3465 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 3466 del_timer(&br->multicast_router_timer); 3467 br->multicast_router = val; 3468 err = 0; 3469 break; 3470 case MDB_RTR_TYPE_TEMP_QUERY: 3471 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 3472 br_mc_router_state_change(br, false); 3473 br->multicast_router = val; 3474 err = 0; 3475 break; 3476 } 3477 3478 spin_unlock_bh(&br->multicast_lock); 3479 3480 return err; 3481 } 3482 3483 static void __del_port_router(struct net_bridge_port *p) 3484 { 3485 if (hlist_unhashed(&p->rlist)) 3486 return; 3487 hlist_del_init_rcu(&p->rlist); 3488 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 3489 br_port_mc_router_state_change(p, false); 3490 3491 /* don't allow timer refresh */ 3492 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 3493 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3494 } 3495 3496 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 3497 { 3498 struct net_bridge *br = p->br; 3499 unsigned long now = jiffies; 3500 int err = -EINVAL; 3501 3502 spin_lock(&br->multicast_lock); 3503 if (p->multicast_router == val) { 3504 /* Refresh the temp router port timer */ 3505 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 3506 mod_timer(&p->multicast_router_timer, 3507 now + br->multicast_querier_interval); 3508 err = 0; 3509 goto unlock; 3510 } 3511 switch (val) { 3512 case MDB_RTR_TYPE_DISABLED: 3513 p->multicast_router = MDB_RTR_TYPE_DISABLED; 3514 __del_port_router(p); 3515 del_timer(&p->multicast_router_timer); 3516 break; 3517 case MDB_RTR_TYPE_TEMP_QUERY: 3518 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3519 __del_port_router(p); 3520 break; 3521 case MDB_RTR_TYPE_PERM: 3522 p->multicast_router = MDB_RTR_TYPE_PERM; 3523 del_timer(&p->multicast_router_timer); 3524 br_multicast_add_router(br, p); 3525 break; 3526 case MDB_RTR_TYPE_TEMP: 3527 p->multicast_router = MDB_RTR_TYPE_TEMP; 3528 br_multicast_mark_router(br, p); 3529 break; 3530 default: 3531 goto unlock; 3532 } 3533 err = 0; 3534 unlock: 3535 spin_unlock(&br->multicast_lock); 3536 3537 return err; 3538 } 3539 3540 static void br_multicast_start_querier(struct net_bridge *br, 3541 struct bridge_mcast_own_query *query) 3542 { 3543 struct net_bridge_port *port; 3544 3545 __br_multicast_open(br, query); 3546 3547 rcu_read_lock(); 3548 list_for_each_entry_rcu(port, &br->port_list, list) { 3549 if (port->state == BR_STATE_DISABLED || 3550 port->state == BR_STATE_BLOCKING) 3551 continue; 3552 3553 if (query == &br->ip4_own_query) 3554 br_multicast_enable(&port->ip4_own_query); 3555 #if IS_ENABLED(CONFIG_IPV6) 3556 else 3557 br_multicast_enable(&port->ip6_own_query); 3558 #endif 3559 } 3560 rcu_read_unlock(); 3561 } 3562 3563 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 3564 { 3565 struct net_bridge_port *port; 3566 bool change_snoopers = false; 3567 3568 spin_lock_bh(&br->multicast_lock); 3569 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 3570 goto unlock; 3571 3572 br_mc_disabled_update(br->dev, val); 3573 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 3574 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 3575 change_snoopers = true; 3576 goto unlock; 3577 } 3578 3579 if (!netif_running(br->dev)) 3580 goto unlock; 3581 3582 br_multicast_open(br); 3583 list_for_each_entry(port, &br->port_list, list) 3584 __br_multicast_enable_port(port); 3585 3586 change_snoopers = true; 3587 3588 unlock: 3589 spin_unlock_bh(&br->multicast_lock); 3590 3591 /* br_multicast_join_snoopers has the potential to cause 3592 * an MLD Report/Leave to be delivered to br_multicast_rcv, 3593 * which would in turn call br_multicast_add_group, which would 3594 * attempt to acquire multicast_lock. This function should be 3595 * called after the lock has been released to avoid deadlocks on 3596 * multicast_lock. 3597 * 3598 * br_multicast_leave_snoopers does not have the problem since 3599 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 3600 * returns without calling br_multicast_ipv4/6_rcv if it's not 3601 * enabled. Moved both functions out just for symmetry. 3602 */ 3603 if (change_snoopers) { 3604 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3605 br_multicast_join_snoopers(br); 3606 else 3607 br_multicast_leave_snoopers(br); 3608 } 3609 3610 return 0; 3611 } 3612 3613 bool br_multicast_enabled(const struct net_device *dev) 3614 { 3615 struct net_bridge *br = netdev_priv(dev); 3616 3617 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 3618 } 3619 EXPORT_SYMBOL_GPL(br_multicast_enabled); 3620 3621 bool br_multicast_router(const struct net_device *dev) 3622 { 3623 struct net_bridge *br = netdev_priv(dev); 3624 bool is_router; 3625 3626 spin_lock_bh(&br->multicast_lock); 3627 is_router = br_multicast_is_router(br); 3628 spin_unlock_bh(&br->multicast_lock); 3629 return is_router; 3630 } 3631 EXPORT_SYMBOL_GPL(br_multicast_router); 3632 3633 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 3634 { 3635 unsigned long max_delay; 3636 3637 val = !!val; 3638 3639 spin_lock_bh(&br->multicast_lock); 3640 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) 3641 goto unlock; 3642 3643 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); 3644 if (!val) 3645 goto unlock; 3646 3647 max_delay = br->multicast_query_response_interval; 3648 3649 if (!timer_pending(&br->ip4_other_query.timer)) 3650 br->ip4_other_query.delay_time = jiffies + max_delay; 3651 3652 br_multicast_start_querier(br, &br->ip4_own_query); 3653 3654 #if IS_ENABLED(CONFIG_IPV6) 3655 if (!timer_pending(&br->ip6_other_query.timer)) 3656 br->ip6_other_query.delay_time = jiffies + max_delay; 3657 3658 br_multicast_start_querier(br, &br->ip6_own_query); 3659 #endif 3660 3661 unlock: 3662 spin_unlock_bh(&br->multicast_lock); 3663 3664 return 0; 3665 } 3666 3667 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 3668 { 3669 /* Currently we support only version 2 and 3 */ 3670 switch (val) { 3671 case 2: 3672 case 3: 3673 break; 3674 default: 3675 return -EINVAL; 3676 } 3677 3678 spin_lock_bh(&br->multicast_lock); 3679 br->multicast_igmp_version = val; 3680 spin_unlock_bh(&br->multicast_lock); 3681 3682 return 0; 3683 } 3684 3685 #if IS_ENABLED(CONFIG_IPV6) 3686 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 3687 { 3688 /* Currently we support version 1 and 2 */ 3689 switch (val) { 3690 case 1: 3691 case 2: 3692 break; 3693 default: 3694 return -EINVAL; 3695 } 3696 3697 spin_lock_bh(&br->multicast_lock); 3698 br->multicast_mld_version = val; 3699 spin_unlock_bh(&br->multicast_lock); 3700 3701 return 0; 3702 } 3703 #endif 3704 3705 /** 3706 * br_multicast_list_adjacent - Returns snooped multicast addresses 3707 * @dev: The bridge port adjacent to which to retrieve addresses 3708 * @br_ip_list: The list to store found, snooped multicast IP addresses in 3709 * 3710 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 3711 * snooping feature on all bridge ports of dev's bridge device, excluding 3712 * the addresses from dev itself. 3713 * 3714 * Returns the number of items added to br_ip_list. 3715 * 3716 * Notes: 3717 * - br_ip_list needs to be initialized by caller 3718 * - br_ip_list might contain duplicates in the end 3719 * (needs to be taken care of by caller) 3720 * - br_ip_list needs to be freed by caller 3721 */ 3722 int br_multicast_list_adjacent(struct net_device *dev, 3723 struct list_head *br_ip_list) 3724 { 3725 struct net_bridge *br; 3726 struct net_bridge_port *port; 3727 struct net_bridge_port_group *group; 3728 struct br_ip_list *entry; 3729 int count = 0; 3730 3731 rcu_read_lock(); 3732 if (!br_ip_list || !netif_is_bridge_port(dev)) 3733 goto unlock; 3734 3735 port = br_port_get_rcu(dev); 3736 if (!port || !port->br) 3737 goto unlock; 3738 3739 br = port->br; 3740 3741 list_for_each_entry_rcu(port, &br->port_list, list) { 3742 if (!port->dev || port->dev == dev) 3743 continue; 3744 3745 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 3746 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 3747 if (!entry) 3748 goto unlock; 3749 3750 entry->addr = group->key.addr; 3751 list_add(&entry->list, br_ip_list); 3752 count++; 3753 } 3754 } 3755 3756 unlock: 3757 rcu_read_unlock(); 3758 return count; 3759 } 3760 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 3761 3762 /** 3763 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 3764 * @dev: The bridge port providing the bridge on which to check for a querier 3765 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3766 * 3767 * Checks whether the given interface has a bridge on top and if so returns 3768 * true if a valid querier exists anywhere on the bridged link layer. 3769 * Otherwise returns false. 3770 */ 3771 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 3772 { 3773 struct net_bridge *br; 3774 struct net_bridge_port *port; 3775 struct ethhdr eth; 3776 bool ret = false; 3777 3778 rcu_read_lock(); 3779 if (!netif_is_bridge_port(dev)) 3780 goto unlock; 3781 3782 port = br_port_get_rcu(dev); 3783 if (!port || !port->br) 3784 goto unlock; 3785 3786 br = port->br; 3787 3788 memset(ð, 0, sizeof(eth)); 3789 eth.h_proto = htons(proto); 3790 3791 ret = br_multicast_querier_exists(br, ð, NULL); 3792 3793 unlock: 3794 rcu_read_unlock(); 3795 return ret; 3796 } 3797 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 3798 3799 /** 3800 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 3801 * @dev: The bridge port adjacent to which to check for a querier 3802 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3803 * 3804 * Checks whether the given interface has a bridge on top and if so returns 3805 * true if a selected querier is behind one of the other ports of this 3806 * bridge. Otherwise returns false. 3807 */ 3808 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 3809 { 3810 struct net_bridge *br; 3811 struct net_bridge_port *port; 3812 bool ret = false; 3813 3814 rcu_read_lock(); 3815 if (!netif_is_bridge_port(dev)) 3816 goto unlock; 3817 3818 port = br_port_get_rcu(dev); 3819 if (!port || !port->br) 3820 goto unlock; 3821 3822 br = port->br; 3823 3824 switch (proto) { 3825 case ETH_P_IP: 3826 if (!timer_pending(&br->ip4_other_query.timer) || 3827 rcu_dereference(br->ip4_querier.port) == port) 3828 goto unlock; 3829 break; 3830 #if IS_ENABLED(CONFIG_IPV6) 3831 case ETH_P_IPV6: 3832 if (!timer_pending(&br->ip6_other_query.timer) || 3833 rcu_dereference(br->ip6_querier.port) == port) 3834 goto unlock; 3835 break; 3836 #endif 3837 default: 3838 goto unlock; 3839 } 3840 3841 ret = true; 3842 unlock: 3843 rcu_read_unlock(); 3844 return ret; 3845 } 3846 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 3847 3848 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 3849 const struct sk_buff *skb, u8 type, u8 dir) 3850 { 3851 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 3852 __be16 proto = skb->protocol; 3853 unsigned int t_len; 3854 3855 u64_stats_update_begin(&pstats->syncp); 3856 switch (proto) { 3857 case htons(ETH_P_IP): 3858 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 3859 switch (type) { 3860 case IGMP_HOST_MEMBERSHIP_REPORT: 3861 pstats->mstats.igmp_v1reports[dir]++; 3862 break; 3863 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3864 pstats->mstats.igmp_v2reports[dir]++; 3865 break; 3866 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3867 pstats->mstats.igmp_v3reports[dir]++; 3868 break; 3869 case IGMP_HOST_MEMBERSHIP_QUERY: 3870 if (t_len != sizeof(struct igmphdr)) { 3871 pstats->mstats.igmp_v3queries[dir]++; 3872 } else { 3873 unsigned int offset = skb_transport_offset(skb); 3874 struct igmphdr *ih, _ihdr; 3875 3876 ih = skb_header_pointer(skb, offset, 3877 sizeof(_ihdr), &_ihdr); 3878 if (!ih) 3879 break; 3880 if (!ih->code) 3881 pstats->mstats.igmp_v1queries[dir]++; 3882 else 3883 pstats->mstats.igmp_v2queries[dir]++; 3884 } 3885 break; 3886 case IGMP_HOST_LEAVE_MESSAGE: 3887 pstats->mstats.igmp_leaves[dir]++; 3888 break; 3889 } 3890 break; 3891 #if IS_ENABLED(CONFIG_IPV6) 3892 case htons(ETH_P_IPV6): 3893 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 3894 sizeof(struct ipv6hdr); 3895 t_len -= skb_network_header_len(skb); 3896 switch (type) { 3897 case ICMPV6_MGM_REPORT: 3898 pstats->mstats.mld_v1reports[dir]++; 3899 break; 3900 case ICMPV6_MLD2_REPORT: 3901 pstats->mstats.mld_v2reports[dir]++; 3902 break; 3903 case ICMPV6_MGM_QUERY: 3904 if (t_len != sizeof(struct mld_msg)) 3905 pstats->mstats.mld_v2queries[dir]++; 3906 else 3907 pstats->mstats.mld_v1queries[dir]++; 3908 break; 3909 case ICMPV6_MGM_REDUCTION: 3910 pstats->mstats.mld_leaves[dir]++; 3911 break; 3912 } 3913 break; 3914 #endif /* CONFIG_IPV6 */ 3915 } 3916 u64_stats_update_end(&pstats->syncp); 3917 } 3918 3919 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 3920 const struct sk_buff *skb, u8 type, u8 dir) 3921 { 3922 struct bridge_mcast_stats __percpu *stats; 3923 3924 /* if multicast_disabled is true then igmp type can't be set */ 3925 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3926 return; 3927 3928 if (p) 3929 stats = p->mcast_stats; 3930 else 3931 stats = br->mcast_stats; 3932 if (WARN_ON(!stats)) 3933 return; 3934 3935 br_mcast_stats_add(stats, skb, type, dir); 3936 } 3937 3938 int br_multicast_init_stats(struct net_bridge *br) 3939 { 3940 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 3941 if (!br->mcast_stats) 3942 return -ENOMEM; 3943 3944 return 0; 3945 } 3946 3947 void br_multicast_uninit_stats(struct net_bridge *br) 3948 { 3949 free_percpu(br->mcast_stats); 3950 } 3951 3952 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 3953 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 3954 { 3955 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 3956 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 3957 } 3958 3959 void br_multicast_get_stats(const struct net_bridge *br, 3960 const struct net_bridge_port *p, 3961 struct br_mcast_stats *dest) 3962 { 3963 struct bridge_mcast_stats __percpu *stats; 3964 struct br_mcast_stats tdst; 3965 int i; 3966 3967 memset(dest, 0, sizeof(*dest)); 3968 if (p) 3969 stats = p->mcast_stats; 3970 else 3971 stats = br->mcast_stats; 3972 if (WARN_ON(!stats)) 3973 return; 3974 3975 memset(&tdst, 0, sizeof(tdst)); 3976 for_each_possible_cpu(i) { 3977 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 3978 struct br_mcast_stats temp; 3979 unsigned int start; 3980 3981 do { 3982 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 3983 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 3984 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 3985 3986 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 3987 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 3988 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 3989 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 3990 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 3991 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 3992 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 3993 tdst.igmp_parse_errors += temp.igmp_parse_errors; 3994 3995 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 3996 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 3997 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 3998 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 3999 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 4000 tdst.mld_parse_errors += temp.mld_parse_errors; 4001 } 4002 memcpy(dest, &tdst, sizeof(*dest)); 4003 } 4004 4005 int br_mdb_hash_init(struct net_bridge *br) 4006 { 4007 int err; 4008 4009 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 4010 if (err) 4011 return err; 4012 4013 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 4014 if (err) { 4015 rhashtable_destroy(&br->sg_port_tbl); 4016 return err; 4017 } 4018 4019 return 0; 4020 } 4021 4022 void br_mdb_hash_fini(struct net_bridge *br) 4023 { 4024 rhashtable_destroy(&br->sg_port_tbl); 4025 rhashtable_destroy(&br->mdb_hash_tbl); 4026 } 4027