1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 #include <trace/events/bridge.h> 35 36 #include "br_private.h" 37 #include "br_private_mcast_eht.h" 38 39 static const struct rhashtable_params br_mdb_rht_params = { 40 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 41 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 42 .key_len = sizeof(struct br_ip), 43 .automatic_shrinking = true, 44 }; 45 46 static const struct rhashtable_params br_sg_port_rht_params = { 47 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 48 .key_offset = offsetof(struct net_bridge_port_group, key), 49 .key_len = sizeof(struct net_bridge_port_group_sg_key), 50 .automatic_shrinking = true, 51 }; 52 53 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 54 struct bridge_mcast_own_query *query); 55 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 56 struct net_bridge_mcast_port *pmctx); 57 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 58 struct net_bridge_mcast_port *pmctx, 59 __be32 group, 60 __u16 vid, 61 const unsigned char *src); 62 static void br_multicast_port_group_rexmit(struct timer_list *t); 63 64 static void 65 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); 66 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 67 struct net_bridge_mcast_port *pmctx); 68 #if IS_ENABLED(CONFIG_IPV6) 69 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 70 struct net_bridge_mcast_port *pmctx, 71 const struct in6_addr *group, 72 __u16 vid, const unsigned char *src); 73 #endif 74 static struct net_bridge_port_group * 75 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 76 struct net_bridge_mcast_port *pmctx, 77 struct br_ip *group, 78 const unsigned char *src, 79 u8 filter_mode, 80 bool igmpv2_mldv1, 81 bool blocked); 82 static void br_multicast_find_del_pg(struct net_bridge *br, 83 struct net_bridge_port_group *pg); 84 static void __br_multicast_stop(struct net_bridge_mcast *brmctx); 85 86 static int br_mc_disabled_update(struct net_device *dev, bool value, 87 struct netlink_ext_ack *extack); 88 89 static struct net_bridge_port_group * 90 br_sg_port_find(struct net_bridge *br, 91 struct net_bridge_port_group_sg_key *sg_p) 92 { 93 lockdep_assert_held_once(&br->multicast_lock); 94 95 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 96 br_sg_port_rht_params); 97 } 98 99 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 100 struct br_ip *dst) 101 { 102 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 103 } 104 105 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 106 struct br_ip *dst) 107 { 108 struct net_bridge_mdb_entry *ent; 109 110 lockdep_assert_held_once(&br->multicast_lock); 111 112 rcu_read_lock(); 113 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 114 rcu_read_unlock(); 115 116 return ent; 117 } 118 119 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 120 __be32 dst, __u16 vid) 121 { 122 struct br_ip br_dst; 123 124 memset(&br_dst, 0, sizeof(br_dst)); 125 br_dst.dst.ip4 = dst; 126 br_dst.proto = htons(ETH_P_IP); 127 br_dst.vid = vid; 128 129 return br_mdb_ip_get(br, &br_dst); 130 } 131 132 #if IS_ENABLED(CONFIG_IPV6) 133 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 134 const struct in6_addr *dst, 135 __u16 vid) 136 { 137 struct br_ip br_dst; 138 139 memset(&br_dst, 0, sizeof(br_dst)); 140 br_dst.dst.ip6 = *dst; 141 br_dst.proto = htons(ETH_P_IPV6); 142 br_dst.vid = vid; 143 144 return br_mdb_ip_get(br, &br_dst); 145 } 146 #endif 147 148 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, 149 struct sk_buff *skb, u16 vid) 150 { 151 struct net_bridge *br = brmctx->br; 152 struct br_ip ip; 153 154 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 155 br_multicast_ctx_vlan_global_disabled(brmctx)) 156 return NULL; 157 158 if (BR_INPUT_SKB_CB(skb)->igmp) 159 return NULL; 160 161 memset(&ip, 0, sizeof(ip)); 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.dst.ip4 = ip_hdr(skb)->daddr; 168 if (brmctx->multicast_igmp_version == 3) { 169 struct net_bridge_mdb_entry *mdb; 170 171 ip.src.ip4 = ip_hdr(skb)->saddr; 172 mdb = br_mdb_ip_get_rcu(br, &ip); 173 if (mdb) 174 return mdb; 175 ip.src.ip4 = 0; 176 } 177 break; 178 #if IS_ENABLED(CONFIG_IPV6) 179 case htons(ETH_P_IPV6): 180 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 181 if (brmctx->multicast_mld_version == 2) { 182 struct net_bridge_mdb_entry *mdb; 183 184 ip.src.ip6 = ipv6_hdr(skb)->saddr; 185 mdb = br_mdb_ip_get_rcu(br, &ip); 186 if (mdb) 187 return mdb; 188 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 189 } 190 break; 191 #endif 192 default: 193 ip.proto = 0; 194 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 195 } 196 197 return br_mdb_ip_get_rcu(br, &ip); 198 } 199 200 /* IMPORTANT: this function must be used only when the contexts cannot be 201 * passed down (e.g. timer) and must be used for read-only purposes because 202 * the vlan snooping option can change, so it can return any context 203 * (non-vlan or vlan). Its initial intended purpose is to read timer values 204 * from the *current* context based on the option. At worst that could lead 205 * to inconsistent timers when the contexts are changed, i.e. src timer 206 * which needs to re-arm with a specific delay taken from the old context 207 */ 208 static struct net_bridge_mcast_port * 209 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) 210 { 211 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; 212 struct net_bridge_vlan *vlan; 213 214 lockdep_assert_held_once(&pg->key.port->br->multicast_lock); 215 216 /* if vlan snooping is disabled use the port's multicast context */ 217 if (!pg->key.addr.vid || 218 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 219 goto out; 220 221 /* locking is tricky here, due to different rules for multicast and 222 * vlans we need to take rcu to find the vlan and make sure it has 223 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under 224 * multicast_lock which must be already held here, so the vlan's pmctx 225 * can safely be used on return 226 */ 227 rcu_read_lock(); 228 vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid); 229 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 230 pmctx = &vlan->port_mcast_ctx; 231 else 232 pmctx = NULL; 233 rcu_read_unlock(); 234 out: 235 return pmctx; 236 } 237 238 static struct net_bridge_mcast_port * 239 br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid) 240 { 241 struct net_bridge_mcast_port *pmctx = NULL; 242 struct net_bridge_vlan *vlan; 243 244 lockdep_assert_held_once(&port->br->multicast_lock); 245 246 if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 247 return NULL; 248 249 /* Take RCU to access the vlan. */ 250 rcu_read_lock(); 251 252 vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid); 253 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 254 pmctx = &vlan->port_mcast_ctx; 255 256 rcu_read_unlock(); 257 258 return pmctx; 259 } 260 261 /* when snooping we need to check if the contexts should be used 262 * in the following order: 263 * - if pmctx is non-NULL (port), check if it should be used 264 * - if pmctx is NULL (bridge), check if brmctx should be used 265 */ 266 static bool 267 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx, 268 const struct net_bridge_mcast_port *pmctx) 269 { 270 if (!netif_running(brmctx->br->dev)) 271 return false; 272 273 if (pmctx) 274 return !br_multicast_port_ctx_state_disabled(pmctx); 275 else 276 return !br_multicast_ctx_vlan_disabled(brmctx); 277 } 278 279 static bool br_port_group_equal(struct net_bridge_port_group *p, 280 struct net_bridge_port *port, 281 const unsigned char *src) 282 { 283 if (p->key.port != port) 284 return false; 285 286 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 287 return true; 288 289 return ether_addr_equal(src, p->eth_addr); 290 } 291 292 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, 293 struct net_bridge_port_group *pg, 294 struct br_ip *sg_ip) 295 { 296 struct net_bridge_port_group_sg_key sg_key; 297 struct net_bridge_port_group *src_pg; 298 struct net_bridge_mcast *brmctx; 299 300 memset(&sg_key, 0, sizeof(sg_key)); 301 brmctx = br_multicast_port_ctx_get_global(pmctx); 302 sg_key.port = pg->key.port; 303 sg_key.addr = *sg_ip; 304 if (br_sg_port_find(brmctx->br, &sg_key)) 305 return; 306 307 src_pg = __br_multicast_add_group(brmctx, pmctx, 308 sg_ip, pg->eth_addr, 309 MCAST_INCLUDE, false, false); 310 if (IS_ERR_OR_NULL(src_pg) || 311 src_pg->rt_protocol != RTPROT_KERNEL) 312 return; 313 314 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 315 } 316 317 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 318 struct br_ip *sg_ip) 319 { 320 struct net_bridge_port_group_sg_key sg_key; 321 struct net_bridge *br = pg->key.port->br; 322 struct net_bridge_port_group *src_pg; 323 324 memset(&sg_key, 0, sizeof(sg_key)); 325 sg_key.port = pg->key.port; 326 sg_key.addr = *sg_ip; 327 src_pg = br_sg_port_find(br, &sg_key); 328 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 329 src_pg->rt_protocol != RTPROT_KERNEL) 330 return; 331 332 br_multicast_find_del_pg(br, src_pg); 333 } 334 335 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 336 * to all other ports' S,G entries which are not blocked by the current group 337 * for proper replication, the assumption is that any S,G blocked entries 338 * are already added so the S,G,port lookup should skip them. 339 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 340 * deleted we need to remove it from all ports' S,G entries where it was 341 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 342 */ 343 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 344 u8 filter_mode) 345 { 346 struct net_bridge *br = pg->key.port->br; 347 struct net_bridge_port_group *pg_lst; 348 struct net_bridge_mcast_port *pmctx; 349 struct net_bridge_mdb_entry *mp; 350 struct br_ip sg_ip; 351 352 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 353 return; 354 355 mp = br_mdb_ip_get(br, &pg->key.addr); 356 if (!mp) 357 return; 358 pmctx = br_multicast_pg_to_port_ctx(pg); 359 if (!pmctx) 360 return; 361 362 memset(&sg_ip, 0, sizeof(sg_ip)); 363 sg_ip = pg->key.addr; 364 365 for (pg_lst = mlock_dereference(mp->ports, br); 366 pg_lst; 367 pg_lst = mlock_dereference(pg_lst->next, br)) { 368 struct net_bridge_group_src *src_ent; 369 370 if (pg_lst == pg) 371 continue; 372 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 373 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 374 continue; 375 sg_ip.src = src_ent->addr.src; 376 switch (filter_mode) { 377 case MCAST_INCLUDE: 378 __fwd_del_star_excl(pg, &sg_ip); 379 break; 380 case MCAST_EXCLUDE: 381 __fwd_add_star_excl(pmctx, pg, &sg_ip); 382 break; 383 } 384 } 385 } 386 } 387 388 /* called when adding a new S,G with host_joined == false by default */ 389 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 390 struct net_bridge_port_group *sg) 391 { 392 struct net_bridge_mdb_entry *sg_mp; 393 394 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 395 return; 396 if (!star_mp->host_joined) 397 return; 398 399 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 400 if (!sg_mp) 401 return; 402 sg_mp->host_joined = true; 403 } 404 405 /* set the host_joined state of all of *,G's S,G entries */ 406 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 407 { 408 struct net_bridge *br = star_mp->br; 409 struct net_bridge_mdb_entry *sg_mp; 410 struct net_bridge_port_group *pg; 411 struct br_ip sg_ip; 412 413 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 414 return; 415 416 memset(&sg_ip, 0, sizeof(sg_ip)); 417 sg_ip = star_mp->addr; 418 for (pg = mlock_dereference(star_mp->ports, br); 419 pg; 420 pg = mlock_dereference(pg->next, br)) { 421 struct net_bridge_group_src *src_ent; 422 423 hlist_for_each_entry(src_ent, &pg->src_list, node) { 424 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 425 continue; 426 sg_ip.src = src_ent->addr.src; 427 sg_mp = br_mdb_ip_get(br, &sg_ip); 428 if (!sg_mp) 429 continue; 430 sg_mp->host_joined = star_mp->host_joined; 431 } 432 } 433 } 434 435 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 436 { 437 struct net_bridge_port_group __rcu **pp; 438 struct net_bridge_port_group *p; 439 440 /* *,G exclude ports are only added to S,G entries */ 441 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 442 return; 443 444 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 445 * we should ignore perm entries since they're managed by user-space 446 */ 447 for (pp = &sgmp->ports; 448 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 449 pp = &p->next) 450 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 451 MDB_PG_FLAGS_PERMANENT))) 452 return; 453 454 /* currently the host can only have joined the *,G which means 455 * we treat it as EXCLUDE {}, so for an S,G it's considered a 456 * STAR_EXCLUDE entry and we can safely leave it 457 */ 458 sgmp->host_joined = false; 459 460 for (pp = &sgmp->ports; 461 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 462 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 463 br_multicast_del_pg(sgmp, p, pp); 464 else 465 pp = &p->next; 466 } 467 } 468 469 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 470 struct net_bridge_port_group *sg) 471 { 472 struct net_bridge_port_group_sg_key sg_key; 473 struct net_bridge *br = star_mp->br; 474 struct net_bridge_mcast_port *pmctx; 475 struct net_bridge_port_group *pg; 476 struct net_bridge_mcast *brmctx; 477 478 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 479 return; 480 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 481 return; 482 483 br_multicast_sg_host_state(star_mp, sg); 484 memset(&sg_key, 0, sizeof(sg_key)); 485 sg_key.addr = sg->key.addr; 486 /* we need to add all exclude ports to the S,G */ 487 for (pg = mlock_dereference(star_mp->ports, br); 488 pg; 489 pg = mlock_dereference(pg->next, br)) { 490 struct net_bridge_port_group *src_pg; 491 492 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 493 continue; 494 495 sg_key.port = pg->key.port; 496 if (br_sg_port_find(br, &sg_key)) 497 continue; 498 499 pmctx = br_multicast_pg_to_port_ctx(pg); 500 if (!pmctx) 501 continue; 502 brmctx = br_multicast_port_ctx_get_global(pmctx); 503 504 src_pg = __br_multicast_add_group(brmctx, pmctx, 505 &sg->key.addr, 506 sg->eth_addr, 507 MCAST_INCLUDE, false, false); 508 if (IS_ERR_OR_NULL(src_pg) || 509 src_pg->rt_protocol != RTPROT_KERNEL) 510 continue; 511 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 512 } 513 } 514 515 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 516 { 517 struct net_bridge_mdb_entry *star_mp; 518 struct net_bridge_mcast_port *pmctx; 519 struct net_bridge_port_group *sg; 520 struct net_bridge_mcast *brmctx; 521 struct br_ip sg_ip; 522 523 if (src->flags & BR_SGRP_F_INSTALLED) 524 return; 525 526 memset(&sg_ip, 0, sizeof(sg_ip)); 527 pmctx = br_multicast_pg_to_port_ctx(src->pg); 528 if (!pmctx) 529 return; 530 brmctx = br_multicast_port_ctx_get_global(pmctx); 531 sg_ip = src->pg->key.addr; 532 sg_ip.src = src->addr.src; 533 534 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip, 535 src->pg->eth_addr, MCAST_INCLUDE, false, 536 !timer_pending(&src->timer)); 537 if (IS_ERR_OR_NULL(sg)) 538 return; 539 src->flags |= BR_SGRP_F_INSTALLED; 540 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 541 542 /* if it was added by user-space as perm we can skip next steps */ 543 if (sg->rt_protocol != RTPROT_KERNEL && 544 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 545 return; 546 547 /* the kernel is now responsible for removing this S,G */ 548 del_timer(&sg->timer); 549 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 550 if (!star_mp) 551 return; 552 553 br_multicast_sg_add_exclude_ports(star_mp, sg); 554 } 555 556 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 557 bool fastleave) 558 { 559 struct net_bridge_port_group *p, *pg = src->pg; 560 struct net_bridge_port_group __rcu **pp; 561 struct net_bridge_mdb_entry *mp; 562 struct br_ip sg_ip; 563 564 memset(&sg_ip, 0, sizeof(sg_ip)); 565 sg_ip = pg->key.addr; 566 sg_ip.src = src->addr.src; 567 568 mp = br_mdb_ip_get(src->br, &sg_ip); 569 if (!mp) 570 return; 571 572 for (pp = &mp->ports; 573 (p = mlock_dereference(*pp, src->br)) != NULL; 574 pp = &p->next) { 575 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 576 continue; 577 578 if (p->rt_protocol != RTPROT_KERNEL && 579 (p->flags & MDB_PG_FLAGS_PERMANENT) && 580 !(src->flags & BR_SGRP_F_USER_ADDED)) 581 break; 582 583 if (fastleave) 584 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 585 br_multicast_del_pg(mp, p, pp); 586 break; 587 } 588 src->flags &= ~BR_SGRP_F_INSTALLED; 589 } 590 591 /* install S,G and based on src's timer enable or disable forwarding */ 592 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 593 { 594 struct net_bridge_port_group_sg_key sg_key; 595 struct net_bridge_port_group *sg; 596 u8 old_flags; 597 598 br_multicast_fwd_src_add(src); 599 600 memset(&sg_key, 0, sizeof(sg_key)); 601 sg_key.addr = src->pg->key.addr; 602 sg_key.addr.src = src->addr.src; 603 sg_key.port = src->pg->key.port; 604 605 sg = br_sg_port_find(src->br, &sg_key); 606 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 607 return; 608 609 old_flags = sg->flags; 610 if (timer_pending(&src->timer)) 611 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 612 else 613 sg->flags |= MDB_PG_FLAGS_BLOCKED; 614 615 if (old_flags != sg->flags) { 616 struct net_bridge_mdb_entry *sg_mp; 617 618 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 619 if (!sg_mp) 620 return; 621 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 622 } 623 } 624 625 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 626 { 627 struct net_bridge_mdb_entry *mp; 628 629 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 630 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 631 WARN_ON(mp->ports); 632 633 timer_shutdown_sync(&mp->timer); 634 kfree_rcu(mp, rcu); 635 } 636 637 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 638 { 639 struct net_bridge *br = mp->br; 640 641 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 642 br_mdb_rht_params); 643 hlist_del_init_rcu(&mp->mdb_node); 644 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 645 queue_work(system_long_wq, &br->mcast_gc_work); 646 } 647 648 static void br_multicast_group_expired(struct timer_list *t) 649 { 650 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 651 struct net_bridge *br = mp->br; 652 653 spin_lock(&br->multicast_lock); 654 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 655 timer_pending(&mp->timer)) 656 goto out; 657 658 br_multicast_host_leave(mp, true); 659 660 if (mp->ports) 661 goto out; 662 br_multicast_del_mdb_entry(mp); 663 out: 664 spin_unlock(&br->multicast_lock); 665 } 666 667 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 668 { 669 struct net_bridge_group_src *src; 670 671 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 672 WARN_ON(!hlist_unhashed(&src->node)); 673 674 timer_shutdown_sync(&src->timer); 675 kfree_rcu(src, rcu); 676 } 677 678 void __br_multicast_del_group_src(struct net_bridge_group_src *src) 679 { 680 struct net_bridge *br = src->pg->key.port->br; 681 682 hlist_del_init_rcu(&src->node); 683 src->pg->src_ents--; 684 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 685 queue_work(system_long_wq, &br->mcast_gc_work); 686 } 687 688 void br_multicast_del_group_src(struct net_bridge_group_src *src, 689 bool fastleave) 690 { 691 br_multicast_fwd_src_remove(src, fastleave); 692 __br_multicast_del_group_src(src); 693 } 694 695 static int 696 br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx, 697 struct netlink_ext_ack *extack, 698 const char *what) 699 { 700 u32 max = READ_ONCE(pmctx->mdb_max_entries); 701 u32 n = READ_ONCE(pmctx->mdb_n_entries); 702 703 if (max && n >= max) { 704 NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u", 705 what, n, max); 706 return -E2BIG; 707 } 708 709 WRITE_ONCE(pmctx->mdb_n_entries, n + 1); 710 return 0; 711 } 712 713 static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx) 714 { 715 u32 n = READ_ONCE(pmctx->mdb_n_entries); 716 717 WARN_ON_ONCE(n == 0); 718 WRITE_ONCE(pmctx->mdb_n_entries, n - 1); 719 } 720 721 static int br_multicast_port_ngroups_inc(struct net_bridge_port *port, 722 const struct br_ip *group, 723 struct netlink_ext_ack *extack) 724 { 725 struct net_bridge_mcast_port *pmctx; 726 int err; 727 728 lockdep_assert_held_once(&port->br->multicast_lock); 729 730 /* Always count on the port context. */ 731 err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack, 732 "Port"); 733 if (err) { 734 trace_br_mdb_full(port->dev, group); 735 return err; 736 } 737 738 /* Only count on the VLAN context if VID is given, and if snooping on 739 * that VLAN is enabled. 740 */ 741 if (!group->vid) 742 return 0; 743 744 pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid); 745 if (!pmctx) 746 return 0; 747 748 err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN"); 749 if (err) { 750 trace_br_mdb_full(port->dev, group); 751 goto dec_one_out; 752 } 753 754 return 0; 755 756 dec_one_out: 757 br_multicast_port_ngroups_dec_one(&port->multicast_ctx); 758 return err; 759 } 760 761 static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid) 762 { 763 struct net_bridge_mcast_port *pmctx; 764 765 lockdep_assert_held_once(&port->br->multicast_lock); 766 767 if (vid) { 768 pmctx = br_multicast_port_vid_to_port_ctx(port, vid); 769 if (pmctx) 770 br_multicast_port_ngroups_dec_one(pmctx); 771 } 772 br_multicast_port_ngroups_dec_one(&port->multicast_ctx); 773 } 774 775 u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx) 776 { 777 return READ_ONCE(pmctx->mdb_n_entries); 778 } 779 780 void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max) 781 { 782 WRITE_ONCE(pmctx->mdb_max_entries, max); 783 } 784 785 u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx) 786 { 787 return READ_ONCE(pmctx->mdb_max_entries); 788 } 789 790 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 791 { 792 struct net_bridge_port_group *pg; 793 794 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 795 WARN_ON(!hlist_unhashed(&pg->mglist)); 796 WARN_ON(!hlist_empty(&pg->src_list)); 797 798 timer_shutdown_sync(&pg->rexmit_timer); 799 timer_shutdown_sync(&pg->timer); 800 kfree_rcu(pg, rcu); 801 } 802 803 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 804 struct net_bridge_port_group *pg, 805 struct net_bridge_port_group __rcu **pp) 806 { 807 struct net_bridge *br = pg->key.port->br; 808 struct net_bridge_group_src *ent; 809 struct hlist_node *tmp; 810 811 rcu_assign_pointer(*pp, pg->next); 812 hlist_del_init(&pg->mglist); 813 br_multicast_eht_clean_sets(pg); 814 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 815 br_multicast_del_group_src(ent, false); 816 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 817 if (!br_multicast_is_star_g(&mp->addr)) { 818 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 819 br_sg_port_rht_params); 820 br_multicast_sg_del_exclude_ports(mp); 821 } else { 822 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 823 } 824 br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid); 825 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 826 queue_work(system_long_wq, &br->mcast_gc_work); 827 828 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 829 mod_timer(&mp->timer, jiffies); 830 } 831 832 static void br_multicast_find_del_pg(struct net_bridge *br, 833 struct net_bridge_port_group *pg) 834 { 835 struct net_bridge_port_group __rcu **pp; 836 struct net_bridge_mdb_entry *mp; 837 struct net_bridge_port_group *p; 838 839 mp = br_mdb_ip_get(br, &pg->key.addr); 840 if (WARN_ON(!mp)) 841 return; 842 843 for (pp = &mp->ports; 844 (p = mlock_dereference(*pp, br)) != NULL; 845 pp = &p->next) { 846 if (p != pg) 847 continue; 848 849 br_multicast_del_pg(mp, pg, pp); 850 return; 851 } 852 853 WARN_ON(1); 854 } 855 856 static void br_multicast_port_group_expired(struct timer_list *t) 857 { 858 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 859 struct net_bridge_group_src *src_ent; 860 struct net_bridge *br = pg->key.port->br; 861 struct hlist_node *tmp; 862 bool changed; 863 864 spin_lock(&br->multicast_lock); 865 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 866 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 867 goto out; 868 869 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 870 pg->filter_mode = MCAST_INCLUDE; 871 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 872 if (!timer_pending(&src_ent->timer)) { 873 br_multicast_del_group_src(src_ent, false); 874 changed = true; 875 } 876 } 877 878 if (hlist_empty(&pg->src_list)) { 879 br_multicast_find_del_pg(br, pg); 880 } else if (changed) { 881 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 882 883 if (changed && br_multicast_is_star_g(&pg->key.addr)) 884 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 885 886 if (WARN_ON(!mp)) 887 goto out; 888 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 889 } 890 out: 891 spin_unlock(&br->multicast_lock); 892 } 893 894 static void br_multicast_gc(struct hlist_head *head) 895 { 896 struct net_bridge_mcast_gc *gcent; 897 struct hlist_node *tmp; 898 899 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 900 hlist_del_init(&gcent->gc_node); 901 gcent->destroy(gcent); 902 } 903 } 904 905 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx, 906 struct net_bridge_mcast_port *pmctx, 907 struct sk_buff *skb) 908 { 909 struct net_bridge_vlan *vlan = NULL; 910 911 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx)) 912 vlan = pmctx->vlan; 913 else if (br_multicast_ctx_is_vlan(brmctx)) 914 vlan = brmctx->vlan; 915 916 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 917 u16 vlan_proto; 918 919 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0) 920 return; 921 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid); 922 } 923 } 924 925 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, 926 struct net_bridge_mcast_port *pmctx, 927 struct net_bridge_port_group *pg, 928 __be32 ip_dst, __be32 group, 929 bool with_srcs, bool over_lmqt, 930 u8 sflag, u8 *igmp_type, 931 bool *need_rexmit) 932 { 933 struct net_bridge_port *p = pg ? pg->key.port : NULL; 934 struct net_bridge_group_src *ent; 935 size_t pkt_size, igmp_hdr_size; 936 unsigned long now = jiffies; 937 struct igmpv3_query *ihv3; 938 void *csum_start = NULL; 939 __sum16 *csum = NULL; 940 struct sk_buff *skb; 941 struct igmphdr *ih; 942 struct ethhdr *eth; 943 unsigned long lmqt; 944 struct iphdr *iph; 945 u16 lmqt_srcs = 0; 946 947 igmp_hdr_size = sizeof(*ih); 948 if (brmctx->multicast_igmp_version == 3) { 949 igmp_hdr_size = sizeof(*ihv3); 950 if (pg && with_srcs) { 951 lmqt = now + (brmctx->multicast_last_member_interval * 952 brmctx->multicast_last_member_count); 953 hlist_for_each_entry(ent, &pg->src_list, node) { 954 if (over_lmqt == time_after(ent->timer.expires, 955 lmqt) && 956 ent->src_query_rexmit_cnt > 0) 957 lmqt_srcs++; 958 } 959 960 if (!lmqt_srcs) 961 return NULL; 962 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 963 } 964 } 965 966 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 967 if ((p && pkt_size > p->dev->mtu) || 968 pkt_size > brmctx->br->dev->mtu) 969 return NULL; 970 971 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 972 if (!skb) 973 goto out; 974 975 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 976 skb->protocol = htons(ETH_P_IP); 977 978 skb_reset_mac_header(skb); 979 eth = eth_hdr(skb); 980 981 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 982 ip_eth_mc_map(ip_dst, eth->h_dest); 983 eth->h_proto = htons(ETH_P_IP); 984 skb_put(skb, sizeof(*eth)); 985 986 skb_set_network_header(skb, skb->len); 987 iph = ip_hdr(skb); 988 iph->tot_len = htons(pkt_size - sizeof(*eth)); 989 990 iph->version = 4; 991 iph->ihl = 6; 992 iph->tos = 0xc0; 993 iph->id = 0; 994 iph->frag_off = htons(IP_DF); 995 iph->ttl = 1; 996 iph->protocol = IPPROTO_IGMP; 997 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 998 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0; 999 iph->daddr = ip_dst; 1000 ((u8 *)&iph[1])[0] = IPOPT_RA; 1001 ((u8 *)&iph[1])[1] = 4; 1002 ((u8 *)&iph[1])[2] = 0; 1003 ((u8 *)&iph[1])[3] = 0; 1004 ip_send_check(iph); 1005 skb_put(skb, 24); 1006 1007 skb_set_transport_header(skb, skb->len); 1008 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 1009 1010 switch (brmctx->multicast_igmp_version) { 1011 case 2: 1012 ih = igmp_hdr(skb); 1013 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 1014 ih->code = (group ? brmctx->multicast_last_member_interval : 1015 brmctx->multicast_query_response_interval) / 1016 (HZ / IGMP_TIMER_SCALE); 1017 ih->group = group; 1018 ih->csum = 0; 1019 csum = &ih->csum; 1020 csum_start = (void *)ih; 1021 break; 1022 case 3: 1023 ihv3 = igmpv3_query_hdr(skb); 1024 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 1025 ihv3->code = (group ? brmctx->multicast_last_member_interval : 1026 brmctx->multicast_query_response_interval) / 1027 (HZ / IGMP_TIMER_SCALE); 1028 ihv3->group = group; 1029 ihv3->qqic = brmctx->multicast_query_interval / HZ; 1030 ihv3->nsrcs = htons(lmqt_srcs); 1031 ihv3->resv = 0; 1032 ihv3->suppress = sflag; 1033 ihv3->qrv = 2; 1034 ihv3->csum = 0; 1035 csum = &ihv3->csum; 1036 csum_start = (void *)ihv3; 1037 if (!pg || !with_srcs) 1038 break; 1039 1040 lmqt_srcs = 0; 1041 hlist_for_each_entry(ent, &pg->src_list, node) { 1042 if (over_lmqt == time_after(ent->timer.expires, 1043 lmqt) && 1044 ent->src_query_rexmit_cnt > 0) { 1045 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 1046 ent->src_query_rexmit_cnt--; 1047 if (need_rexmit && ent->src_query_rexmit_cnt) 1048 *need_rexmit = true; 1049 } 1050 } 1051 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 1052 kfree_skb(skb); 1053 return NULL; 1054 } 1055 break; 1056 } 1057 1058 if (WARN_ON(!csum || !csum_start)) { 1059 kfree_skb(skb); 1060 return NULL; 1061 } 1062 1063 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 1064 skb_put(skb, igmp_hdr_size); 1065 __skb_pull(skb, sizeof(*eth)); 1066 1067 out: 1068 return skb; 1069 } 1070 1071 #if IS_ENABLED(CONFIG_IPV6) 1072 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1073 struct net_bridge_mcast_port *pmctx, 1074 struct net_bridge_port_group *pg, 1075 const struct in6_addr *ip6_dst, 1076 const struct in6_addr *group, 1077 bool with_srcs, bool over_llqt, 1078 u8 sflag, u8 *igmp_type, 1079 bool *need_rexmit) 1080 { 1081 struct net_bridge_port *p = pg ? pg->key.port : NULL; 1082 struct net_bridge_group_src *ent; 1083 size_t pkt_size, mld_hdr_size; 1084 unsigned long now = jiffies; 1085 struct mld2_query *mld2q; 1086 void *csum_start = NULL; 1087 unsigned long interval; 1088 __sum16 *csum = NULL; 1089 struct ipv6hdr *ip6h; 1090 struct mld_msg *mldq; 1091 struct sk_buff *skb; 1092 unsigned long llqt; 1093 struct ethhdr *eth; 1094 u16 llqt_srcs = 0; 1095 u8 *hopopt; 1096 1097 mld_hdr_size = sizeof(*mldq); 1098 if (brmctx->multicast_mld_version == 2) { 1099 mld_hdr_size = sizeof(*mld2q); 1100 if (pg && with_srcs) { 1101 llqt = now + (brmctx->multicast_last_member_interval * 1102 brmctx->multicast_last_member_count); 1103 hlist_for_each_entry(ent, &pg->src_list, node) { 1104 if (over_llqt == time_after(ent->timer.expires, 1105 llqt) && 1106 ent->src_query_rexmit_cnt > 0) 1107 llqt_srcs++; 1108 } 1109 1110 if (!llqt_srcs) 1111 return NULL; 1112 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 1113 } 1114 } 1115 1116 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 1117 if ((p && pkt_size > p->dev->mtu) || 1118 pkt_size > brmctx->br->dev->mtu) 1119 return NULL; 1120 1121 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 1122 if (!skb) 1123 goto out; 1124 1125 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 1126 skb->protocol = htons(ETH_P_IPV6); 1127 1128 /* Ethernet header */ 1129 skb_reset_mac_header(skb); 1130 eth = eth_hdr(skb); 1131 1132 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 1133 eth->h_proto = htons(ETH_P_IPV6); 1134 skb_put(skb, sizeof(*eth)); 1135 1136 /* IPv6 header + HbH option */ 1137 skb_set_network_header(skb, skb->len); 1138 ip6h = ipv6_hdr(skb); 1139 1140 *(__force __be32 *)ip6h = htonl(0x60000000); 1141 ip6h->payload_len = htons(8 + mld_hdr_size); 1142 ip6h->nexthdr = IPPROTO_HOPOPTS; 1143 ip6h->hop_limit = 1; 1144 ip6h->daddr = *ip6_dst; 1145 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev, 1146 &ip6h->daddr, 0, &ip6h->saddr)) { 1147 kfree_skb(skb); 1148 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false); 1149 return NULL; 1150 } 1151 1152 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true); 1153 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 1154 1155 hopopt = (u8 *)(ip6h + 1); 1156 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 1157 hopopt[1] = 0; /* length of HbH */ 1158 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 1159 hopopt[3] = 2; /* Length of RA Option */ 1160 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 1161 hopopt[5] = 0; 1162 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 1163 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 1164 1165 skb_put(skb, sizeof(*ip6h) + 8); 1166 1167 /* ICMPv6 */ 1168 skb_set_transport_header(skb, skb->len); 1169 interval = ipv6_addr_any(group) ? 1170 brmctx->multicast_query_response_interval : 1171 brmctx->multicast_last_member_interval; 1172 *igmp_type = ICMPV6_MGM_QUERY; 1173 switch (brmctx->multicast_mld_version) { 1174 case 1: 1175 mldq = (struct mld_msg *)icmp6_hdr(skb); 1176 mldq->mld_type = ICMPV6_MGM_QUERY; 1177 mldq->mld_code = 0; 1178 mldq->mld_cksum = 0; 1179 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 1180 mldq->mld_reserved = 0; 1181 mldq->mld_mca = *group; 1182 csum = &mldq->mld_cksum; 1183 csum_start = (void *)mldq; 1184 break; 1185 case 2: 1186 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1187 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 1188 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 1189 mld2q->mld2q_code = 0; 1190 mld2q->mld2q_cksum = 0; 1191 mld2q->mld2q_resv1 = 0; 1192 mld2q->mld2q_resv2 = 0; 1193 mld2q->mld2q_suppress = sflag; 1194 mld2q->mld2q_qrv = 2; 1195 mld2q->mld2q_nsrcs = htons(llqt_srcs); 1196 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; 1197 mld2q->mld2q_mca = *group; 1198 csum = &mld2q->mld2q_cksum; 1199 csum_start = (void *)mld2q; 1200 if (!pg || !with_srcs) 1201 break; 1202 1203 llqt_srcs = 0; 1204 hlist_for_each_entry(ent, &pg->src_list, node) { 1205 if (over_llqt == time_after(ent->timer.expires, 1206 llqt) && 1207 ent->src_query_rexmit_cnt > 0) { 1208 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 1209 ent->src_query_rexmit_cnt--; 1210 if (need_rexmit && ent->src_query_rexmit_cnt) 1211 *need_rexmit = true; 1212 } 1213 } 1214 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 1215 kfree_skb(skb); 1216 return NULL; 1217 } 1218 break; 1219 } 1220 1221 if (WARN_ON(!csum || !csum_start)) { 1222 kfree_skb(skb); 1223 return NULL; 1224 } 1225 1226 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 1227 IPPROTO_ICMPV6, 1228 csum_partial(csum_start, mld_hdr_size, 0)); 1229 skb_put(skb, mld_hdr_size); 1230 __skb_pull(skb, sizeof(*eth)); 1231 1232 out: 1233 return skb; 1234 } 1235 #endif 1236 1237 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1238 struct net_bridge_mcast_port *pmctx, 1239 struct net_bridge_port_group *pg, 1240 struct br_ip *ip_dst, 1241 struct br_ip *group, 1242 bool with_srcs, bool over_lmqt, 1243 u8 sflag, u8 *igmp_type, 1244 bool *need_rexmit) 1245 { 1246 __be32 ip4_dst; 1247 1248 switch (group->proto) { 1249 case htons(ETH_P_IP): 1250 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1251 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg, 1252 ip4_dst, group->dst.ip4, 1253 with_srcs, over_lmqt, 1254 sflag, igmp_type, 1255 need_rexmit); 1256 #if IS_ENABLED(CONFIG_IPV6) 1257 case htons(ETH_P_IPV6): { 1258 struct in6_addr ip6_dst; 1259 1260 if (ip_dst) 1261 ip6_dst = ip_dst->dst.ip6; 1262 else 1263 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1264 htonl(1)); 1265 1266 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg, 1267 &ip6_dst, &group->dst.ip6, 1268 with_srcs, over_lmqt, 1269 sflag, igmp_type, 1270 need_rexmit); 1271 } 1272 #endif 1273 } 1274 return NULL; 1275 } 1276 1277 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1278 struct br_ip *group) 1279 { 1280 struct net_bridge_mdb_entry *mp; 1281 int err; 1282 1283 mp = br_mdb_ip_get(br, group); 1284 if (mp) 1285 return mp; 1286 1287 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1288 trace_br_mdb_full(br->dev, group); 1289 br_mc_disabled_update(br->dev, false, NULL); 1290 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1291 return ERR_PTR(-E2BIG); 1292 } 1293 1294 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1295 if (unlikely(!mp)) 1296 return ERR_PTR(-ENOMEM); 1297 1298 mp->br = br; 1299 mp->addr = *group; 1300 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1301 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1302 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1303 br_mdb_rht_params); 1304 if (err) { 1305 kfree(mp); 1306 mp = ERR_PTR(err); 1307 } else { 1308 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1309 } 1310 1311 return mp; 1312 } 1313 1314 static void br_multicast_group_src_expired(struct timer_list *t) 1315 { 1316 struct net_bridge_group_src *src = from_timer(src, t, timer); 1317 struct net_bridge_port_group *pg; 1318 struct net_bridge *br = src->br; 1319 1320 spin_lock(&br->multicast_lock); 1321 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1322 timer_pending(&src->timer)) 1323 goto out; 1324 1325 pg = src->pg; 1326 if (pg->filter_mode == MCAST_INCLUDE) { 1327 br_multicast_del_group_src(src, false); 1328 if (!hlist_empty(&pg->src_list)) 1329 goto out; 1330 br_multicast_find_del_pg(br, pg); 1331 } else { 1332 br_multicast_fwd_src_handle(src); 1333 } 1334 1335 out: 1336 spin_unlock(&br->multicast_lock); 1337 } 1338 1339 struct net_bridge_group_src * 1340 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1341 { 1342 struct net_bridge_group_src *ent; 1343 1344 switch (ip->proto) { 1345 case htons(ETH_P_IP): 1346 hlist_for_each_entry(ent, &pg->src_list, node) 1347 if (ip->src.ip4 == ent->addr.src.ip4) 1348 return ent; 1349 break; 1350 #if IS_ENABLED(CONFIG_IPV6) 1351 case htons(ETH_P_IPV6): 1352 hlist_for_each_entry(ent, &pg->src_list, node) 1353 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1354 return ent; 1355 break; 1356 #endif 1357 } 1358 1359 return NULL; 1360 } 1361 1362 struct net_bridge_group_src * 1363 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1364 { 1365 struct net_bridge_group_src *grp_src; 1366 1367 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1368 return NULL; 1369 1370 switch (src_ip->proto) { 1371 case htons(ETH_P_IP): 1372 if (ipv4_is_zeronet(src_ip->src.ip4) || 1373 ipv4_is_multicast(src_ip->src.ip4)) 1374 return NULL; 1375 break; 1376 #if IS_ENABLED(CONFIG_IPV6) 1377 case htons(ETH_P_IPV6): 1378 if (ipv6_addr_any(&src_ip->src.ip6) || 1379 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1380 return NULL; 1381 break; 1382 #endif 1383 } 1384 1385 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1386 if (unlikely(!grp_src)) 1387 return NULL; 1388 1389 grp_src->pg = pg; 1390 grp_src->br = pg->key.port->br; 1391 grp_src->addr = *src_ip; 1392 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1393 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1394 1395 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1396 pg->src_ents++; 1397 1398 return grp_src; 1399 } 1400 1401 struct net_bridge_port_group *br_multicast_new_port_group( 1402 struct net_bridge_port *port, 1403 const struct br_ip *group, 1404 struct net_bridge_port_group __rcu *next, 1405 unsigned char flags, 1406 const unsigned char *src, 1407 u8 filter_mode, 1408 u8 rt_protocol, 1409 struct netlink_ext_ack *extack) 1410 { 1411 struct net_bridge_port_group *p; 1412 int err; 1413 1414 err = br_multicast_port_ngroups_inc(port, group, extack); 1415 if (err) 1416 return NULL; 1417 1418 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1419 if (unlikely(!p)) { 1420 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group"); 1421 goto dec_out; 1422 } 1423 1424 p->key.addr = *group; 1425 p->key.port = port; 1426 p->flags = flags; 1427 p->filter_mode = filter_mode; 1428 p->rt_protocol = rt_protocol; 1429 p->eht_host_tree = RB_ROOT; 1430 p->eht_set_tree = RB_ROOT; 1431 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1432 INIT_HLIST_HEAD(&p->src_list); 1433 1434 if (!br_multicast_is_star_g(group) && 1435 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1436 br_sg_port_rht_params)) { 1437 NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group"); 1438 goto free_out; 1439 } 1440 1441 rcu_assign_pointer(p->next, next); 1442 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1443 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1444 hlist_add_head(&p->mglist, &port->mglist); 1445 1446 if (src) 1447 memcpy(p->eth_addr, src, ETH_ALEN); 1448 else 1449 eth_broadcast_addr(p->eth_addr); 1450 1451 return p; 1452 1453 free_out: 1454 kfree(p); 1455 dec_out: 1456 br_multicast_port_ngroups_dec(port, group->vid); 1457 return NULL; 1458 } 1459 1460 void br_multicast_del_port_group(struct net_bridge_port_group *p) 1461 { 1462 struct net_bridge_port *port = p->key.port; 1463 __u16 vid = p->key.addr.vid; 1464 1465 hlist_del_init(&p->mglist); 1466 if (!br_multicast_is_star_g(&p->key.addr)) 1467 rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode, 1468 br_sg_port_rht_params); 1469 kfree(p); 1470 br_multicast_port_ngroups_dec(port, vid); 1471 } 1472 1473 void br_multicast_host_join(const struct net_bridge_mcast *brmctx, 1474 struct net_bridge_mdb_entry *mp, bool notify) 1475 { 1476 if (!mp->host_joined) { 1477 mp->host_joined = true; 1478 if (br_multicast_is_star_g(&mp->addr)) 1479 br_multicast_star_g_host_state(mp); 1480 if (notify) 1481 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1482 } 1483 1484 if (br_group_is_l2(&mp->addr)) 1485 return; 1486 1487 mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval); 1488 } 1489 1490 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1491 { 1492 if (!mp->host_joined) 1493 return; 1494 1495 mp->host_joined = false; 1496 if (br_multicast_is_star_g(&mp->addr)) 1497 br_multicast_star_g_host_state(mp); 1498 if (notify) 1499 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1500 } 1501 1502 static struct net_bridge_port_group * 1503 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 1504 struct net_bridge_mcast_port *pmctx, 1505 struct br_ip *group, 1506 const unsigned char *src, 1507 u8 filter_mode, 1508 bool igmpv2_mldv1, 1509 bool blocked) 1510 { 1511 struct net_bridge_port_group __rcu **pp; 1512 struct net_bridge_port_group *p = NULL; 1513 struct net_bridge_mdb_entry *mp; 1514 unsigned long now = jiffies; 1515 1516 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 1517 goto out; 1518 1519 mp = br_multicast_new_group(brmctx->br, group); 1520 if (IS_ERR(mp)) 1521 return ERR_CAST(mp); 1522 1523 if (!pmctx) { 1524 br_multicast_host_join(brmctx, mp, true); 1525 goto out; 1526 } 1527 1528 for (pp = &mp->ports; 1529 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 1530 pp = &p->next) { 1531 if (br_port_group_equal(p, pmctx->port, src)) 1532 goto found; 1533 if ((unsigned long)p->key.port < (unsigned long)pmctx->port) 1534 break; 1535 } 1536 1537 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src, 1538 filter_mode, RTPROT_KERNEL, NULL); 1539 if (unlikely(!p)) { 1540 p = ERR_PTR(-ENOMEM); 1541 goto out; 1542 } 1543 rcu_assign_pointer(*pp, p); 1544 if (blocked) 1545 p->flags |= MDB_PG_FLAGS_BLOCKED; 1546 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB); 1547 1548 found: 1549 if (igmpv2_mldv1) 1550 mod_timer(&p->timer, 1551 now + brmctx->multicast_membership_interval); 1552 1553 out: 1554 return p; 1555 } 1556 1557 static int br_multicast_add_group(struct net_bridge_mcast *brmctx, 1558 struct net_bridge_mcast_port *pmctx, 1559 struct br_ip *group, 1560 const unsigned char *src, 1561 u8 filter_mode, 1562 bool igmpv2_mldv1) 1563 { 1564 struct net_bridge_port_group *pg; 1565 int err; 1566 1567 spin_lock(&brmctx->br->multicast_lock); 1568 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, 1569 igmpv2_mldv1, false); 1570 /* NULL is considered valid for host joined groups */ 1571 err = PTR_ERR_OR_ZERO(pg); 1572 spin_unlock(&brmctx->br->multicast_lock); 1573 1574 return err; 1575 } 1576 1577 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, 1578 struct net_bridge_mcast_port *pmctx, 1579 __be32 group, 1580 __u16 vid, 1581 const unsigned char *src, 1582 bool igmpv2) 1583 { 1584 struct br_ip br_group; 1585 u8 filter_mode; 1586 1587 if (ipv4_is_local_multicast(group)) 1588 return 0; 1589 1590 memset(&br_group, 0, sizeof(br_group)); 1591 br_group.dst.ip4 = group; 1592 br_group.proto = htons(ETH_P_IP); 1593 br_group.vid = vid; 1594 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1595 1596 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1597 filter_mode, igmpv2); 1598 } 1599 1600 #if IS_ENABLED(CONFIG_IPV6) 1601 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, 1602 struct net_bridge_mcast_port *pmctx, 1603 const struct in6_addr *group, 1604 __u16 vid, 1605 const unsigned char *src, 1606 bool mldv1) 1607 { 1608 struct br_ip br_group; 1609 u8 filter_mode; 1610 1611 if (ipv6_addr_is_ll_all_nodes(group)) 1612 return 0; 1613 1614 memset(&br_group, 0, sizeof(br_group)); 1615 br_group.dst.ip6 = *group; 1616 br_group.proto = htons(ETH_P_IPV6); 1617 br_group.vid = vid; 1618 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1619 1620 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1621 filter_mode, mldv1); 1622 } 1623 #endif 1624 1625 static bool br_multicast_rport_del(struct hlist_node *rlist) 1626 { 1627 if (hlist_unhashed(rlist)) 1628 return false; 1629 1630 hlist_del_init_rcu(rlist); 1631 return true; 1632 } 1633 1634 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1635 { 1636 return br_multicast_rport_del(&pmctx->ip4_rlist); 1637 } 1638 1639 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1640 { 1641 #if IS_ENABLED(CONFIG_IPV6) 1642 return br_multicast_rport_del(&pmctx->ip6_rlist); 1643 #else 1644 return false; 1645 #endif 1646 } 1647 1648 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, 1649 struct timer_list *t, 1650 struct hlist_node *rlist) 1651 { 1652 struct net_bridge *br = pmctx->port->br; 1653 bool del; 1654 1655 spin_lock(&br->multicast_lock); 1656 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1657 pmctx->multicast_router == MDB_RTR_TYPE_PERM || 1658 timer_pending(t)) 1659 goto out; 1660 1661 del = br_multicast_rport_del(rlist); 1662 br_multicast_rport_del_notify(pmctx, del); 1663 out: 1664 spin_unlock(&br->multicast_lock); 1665 } 1666 1667 static void br_ip4_multicast_router_expired(struct timer_list *t) 1668 { 1669 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1670 ip4_mc_router_timer); 1671 1672 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); 1673 } 1674 1675 #if IS_ENABLED(CONFIG_IPV6) 1676 static void br_ip6_multicast_router_expired(struct timer_list *t) 1677 { 1678 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1679 ip6_mc_router_timer); 1680 1681 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); 1682 } 1683 #endif 1684 1685 static void br_mc_router_state_change(struct net_bridge *p, 1686 bool is_mc_router) 1687 { 1688 struct switchdev_attr attr = { 1689 .orig_dev = p->dev, 1690 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1691 .flags = SWITCHDEV_F_DEFER, 1692 .u.mrouter = is_mc_router, 1693 }; 1694 1695 switchdev_port_attr_set(p->dev, &attr, NULL); 1696 } 1697 1698 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, 1699 struct timer_list *timer) 1700 { 1701 spin_lock(&brmctx->br->multicast_lock); 1702 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1703 brmctx->multicast_router == MDB_RTR_TYPE_PERM || 1704 br_ip4_multicast_is_router(brmctx) || 1705 br_ip6_multicast_is_router(brmctx)) 1706 goto out; 1707 1708 br_mc_router_state_change(brmctx->br, false); 1709 out: 1710 spin_unlock(&brmctx->br->multicast_lock); 1711 } 1712 1713 static void br_ip4_multicast_local_router_expired(struct timer_list *t) 1714 { 1715 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1716 ip4_mc_router_timer); 1717 1718 br_multicast_local_router_expired(brmctx, t); 1719 } 1720 1721 #if IS_ENABLED(CONFIG_IPV6) 1722 static void br_ip6_multicast_local_router_expired(struct timer_list *t) 1723 { 1724 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1725 ip6_mc_router_timer); 1726 1727 br_multicast_local_router_expired(brmctx, t); 1728 } 1729 #endif 1730 1731 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, 1732 struct bridge_mcast_own_query *query) 1733 { 1734 spin_lock(&brmctx->br->multicast_lock); 1735 if (!netif_running(brmctx->br->dev) || 1736 br_multicast_ctx_vlan_global_disabled(brmctx) || 1737 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1738 goto out; 1739 1740 br_multicast_start_querier(brmctx, query); 1741 1742 out: 1743 spin_unlock(&brmctx->br->multicast_lock); 1744 } 1745 1746 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1747 { 1748 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1749 ip4_other_query.timer); 1750 1751 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); 1752 } 1753 1754 #if IS_ENABLED(CONFIG_IPV6) 1755 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1756 { 1757 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1758 ip6_other_query.timer); 1759 1760 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); 1761 } 1762 #endif 1763 1764 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, 1765 struct br_ip *ip, 1766 struct sk_buff *skb) 1767 { 1768 if (ip->proto == htons(ETH_P_IP)) 1769 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1770 #if IS_ENABLED(CONFIG_IPV6) 1771 else 1772 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1773 #endif 1774 } 1775 1776 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, 1777 struct net_bridge_mcast_port *pmctx, 1778 struct net_bridge_port_group *pg, 1779 struct br_ip *ip_dst, 1780 struct br_ip *group, 1781 bool with_srcs, 1782 u8 sflag, 1783 bool *need_rexmit) 1784 { 1785 bool over_lmqt = !!sflag; 1786 struct sk_buff *skb; 1787 u8 igmp_type; 1788 1789 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1790 !br_multicast_ctx_matches_vlan_snooping(brmctx)) 1791 return; 1792 1793 again_under_lmqt: 1794 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group, 1795 with_srcs, over_lmqt, sflag, &igmp_type, 1796 need_rexmit); 1797 if (!skb) 1798 return; 1799 1800 if (pmctx) { 1801 skb->dev = pmctx->port->dev; 1802 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type, 1803 BR_MCAST_DIR_TX); 1804 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1805 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev, 1806 br_dev_queue_push_xmit); 1807 1808 if (over_lmqt && with_srcs && sflag) { 1809 over_lmqt = false; 1810 goto again_under_lmqt; 1811 } 1812 } else { 1813 br_multicast_select_own_querier(brmctx, group, skb); 1814 br_multicast_count(brmctx->br, NULL, skb, igmp_type, 1815 BR_MCAST_DIR_RX); 1816 netif_rx(skb); 1817 } 1818 } 1819 1820 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier, 1821 struct bridge_mcast_querier *dest) 1822 { 1823 unsigned int seq; 1824 1825 memset(dest, 0, sizeof(*dest)); 1826 do { 1827 seq = read_seqcount_begin(&querier->seq); 1828 dest->port_ifidx = querier->port_ifidx; 1829 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip)); 1830 } while (read_seqcount_retry(&querier->seq, seq)); 1831 } 1832 1833 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx, 1834 struct bridge_mcast_querier *querier, 1835 int ifindex, 1836 struct br_ip *saddr) 1837 { 1838 write_seqcount_begin(&querier->seq); 1839 querier->port_ifidx = ifindex; 1840 memcpy(&querier->addr, saddr, sizeof(*saddr)); 1841 write_seqcount_end(&querier->seq); 1842 } 1843 1844 static void br_multicast_send_query(struct net_bridge_mcast *brmctx, 1845 struct net_bridge_mcast_port *pmctx, 1846 struct bridge_mcast_own_query *own_query) 1847 { 1848 struct bridge_mcast_other_query *other_query = NULL; 1849 struct bridge_mcast_querier *querier; 1850 struct br_ip br_group; 1851 unsigned long time; 1852 1853 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1854 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 1855 !brmctx->multicast_querier) 1856 return; 1857 1858 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1859 1860 if (pmctx ? (own_query == &pmctx->ip4_own_query) : 1861 (own_query == &brmctx->ip4_own_query)) { 1862 querier = &brmctx->ip4_querier; 1863 other_query = &brmctx->ip4_other_query; 1864 br_group.proto = htons(ETH_P_IP); 1865 #if IS_ENABLED(CONFIG_IPV6) 1866 } else { 1867 querier = &brmctx->ip6_querier; 1868 other_query = &brmctx->ip6_other_query; 1869 br_group.proto = htons(ETH_P_IPV6); 1870 #endif 1871 } 1872 1873 if (!other_query || timer_pending(&other_query->timer)) 1874 return; 1875 1876 /* we're about to select ourselves as querier */ 1877 if (!pmctx && querier->port_ifidx) { 1878 struct br_ip zeroip = {}; 1879 1880 br_multicast_update_querier(brmctx, querier, 0, &zeroip); 1881 } 1882 1883 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false, 1884 0, NULL); 1885 1886 time = jiffies; 1887 time += own_query->startup_sent < brmctx->multicast_startup_query_count ? 1888 brmctx->multicast_startup_query_interval : 1889 brmctx->multicast_query_interval; 1890 mod_timer(&own_query->timer, time); 1891 } 1892 1893 static void 1894 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, 1895 struct bridge_mcast_own_query *query) 1896 { 1897 struct net_bridge *br = pmctx->port->br; 1898 struct net_bridge_mcast *brmctx; 1899 1900 spin_lock(&br->multicast_lock); 1901 if (br_multicast_port_ctx_state_stopped(pmctx)) 1902 goto out; 1903 1904 brmctx = br_multicast_port_ctx_get_global(pmctx); 1905 if (query->startup_sent < brmctx->multicast_startup_query_count) 1906 query->startup_sent++; 1907 1908 br_multicast_send_query(brmctx, pmctx, query); 1909 1910 out: 1911 spin_unlock(&br->multicast_lock); 1912 } 1913 1914 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1915 { 1916 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1917 ip4_own_query.timer); 1918 1919 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); 1920 } 1921 1922 #if IS_ENABLED(CONFIG_IPV6) 1923 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1924 { 1925 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1926 ip6_own_query.timer); 1927 1928 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); 1929 } 1930 #endif 1931 1932 static void br_multicast_port_group_rexmit(struct timer_list *t) 1933 { 1934 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1935 struct bridge_mcast_other_query *other_query = NULL; 1936 struct net_bridge *br = pg->key.port->br; 1937 struct net_bridge_mcast_port *pmctx; 1938 struct net_bridge_mcast *brmctx; 1939 bool need_rexmit = false; 1940 1941 spin_lock(&br->multicast_lock); 1942 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1943 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1944 goto out; 1945 1946 pmctx = br_multicast_pg_to_port_ctx(pg); 1947 if (!pmctx) 1948 goto out; 1949 brmctx = br_multicast_port_ctx_get_global(pmctx); 1950 if (!brmctx->multicast_querier) 1951 goto out; 1952 1953 if (pg->key.addr.proto == htons(ETH_P_IP)) 1954 other_query = &brmctx->ip4_other_query; 1955 #if IS_ENABLED(CONFIG_IPV6) 1956 else 1957 other_query = &brmctx->ip6_other_query; 1958 #endif 1959 1960 if (!other_query || timer_pending(&other_query->timer)) 1961 goto out; 1962 1963 if (pg->grp_query_rexmit_cnt) { 1964 pg->grp_query_rexmit_cnt--; 1965 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1966 &pg->key.addr, false, 1, NULL); 1967 } 1968 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1969 &pg->key.addr, true, 0, &need_rexmit); 1970 1971 if (pg->grp_query_rexmit_cnt || need_rexmit) 1972 mod_timer(&pg->rexmit_timer, jiffies + 1973 brmctx->multicast_last_member_interval); 1974 out: 1975 spin_unlock(&br->multicast_lock); 1976 } 1977 1978 static int br_mc_disabled_update(struct net_device *dev, bool value, 1979 struct netlink_ext_ack *extack) 1980 { 1981 struct switchdev_attr attr = { 1982 .orig_dev = dev, 1983 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1984 .flags = SWITCHDEV_F_DEFER, 1985 .u.mc_disabled = !value, 1986 }; 1987 1988 return switchdev_port_attr_set(dev, &attr, extack); 1989 } 1990 1991 void br_multicast_port_ctx_init(struct net_bridge_port *port, 1992 struct net_bridge_vlan *vlan, 1993 struct net_bridge_mcast_port *pmctx) 1994 { 1995 pmctx->port = port; 1996 pmctx->vlan = vlan; 1997 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1998 timer_setup(&pmctx->ip4_mc_router_timer, 1999 br_ip4_multicast_router_expired, 0); 2000 timer_setup(&pmctx->ip4_own_query.timer, 2001 br_ip4_multicast_port_query_expired, 0); 2002 #if IS_ENABLED(CONFIG_IPV6) 2003 timer_setup(&pmctx->ip6_mc_router_timer, 2004 br_ip6_multicast_router_expired, 0); 2005 timer_setup(&pmctx->ip6_own_query.timer, 2006 br_ip6_multicast_port_query_expired, 0); 2007 #endif 2008 } 2009 2010 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 2011 { 2012 #if IS_ENABLED(CONFIG_IPV6) 2013 del_timer_sync(&pmctx->ip6_mc_router_timer); 2014 #endif 2015 del_timer_sync(&pmctx->ip4_mc_router_timer); 2016 } 2017 2018 int br_multicast_add_port(struct net_bridge_port *port) 2019 { 2020 int err; 2021 2022 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 2023 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx); 2024 2025 err = br_mc_disabled_update(port->dev, 2026 br_opt_get(port->br, 2027 BROPT_MULTICAST_ENABLED), 2028 NULL); 2029 if (err && err != -EOPNOTSUPP) 2030 return err; 2031 2032 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2033 if (!port->mcast_stats) 2034 return -ENOMEM; 2035 2036 return 0; 2037 } 2038 2039 void br_multicast_del_port(struct net_bridge_port *port) 2040 { 2041 struct net_bridge *br = port->br; 2042 struct net_bridge_port_group *pg; 2043 HLIST_HEAD(deleted_head); 2044 struct hlist_node *n; 2045 2046 /* Take care of the remaining groups, only perm ones should be left */ 2047 spin_lock_bh(&br->multicast_lock); 2048 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 2049 br_multicast_find_del_pg(br, pg); 2050 hlist_move_list(&br->mcast_gc_list, &deleted_head); 2051 spin_unlock_bh(&br->multicast_lock); 2052 br_multicast_gc(&deleted_head); 2053 br_multicast_port_ctx_deinit(&port->multicast_ctx); 2054 free_percpu(port->mcast_stats); 2055 } 2056 2057 static void br_multicast_enable(struct bridge_mcast_own_query *query) 2058 { 2059 query->startup_sent = 0; 2060 2061 if (try_to_del_timer_sync(&query->timer) >= 0 || 2062 del_timer(&query->timer)) 2063 mod_timer(&query->timer, jiffies); 2064 } 2065 2066 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) 2067 { 2068 struct net_bridge *br = pmctx->port->br; 2069 struct net_bridge_mcast *brmctx; 2070 2071 brmctx = br_multicast_port_ctx_get_global(pmctx); 2072 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 2073 !netif_running(br->dev)) 2074 return; 2075 2076 br_multicast_enable(&pmctx->ip4_own_query); 2077 #if IS_ENABLED(CONFIG_IPV6) 2078 br_multicast_enable(&pmctx->ip6_own_query); 2079 #endif 2080 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { 2081 br_ip4_multicast_add_router(brmctx, pmctx); 2082 br_ip6_multicast_add_router(brmctx, pmctx); 2083 } 2084 2085 if (br_multicast_port_ctx_is_vlan(pmctx)) { 2086 struct net_bridge_port_group *pg; 2087 u32 n = 0; 2088 2089 /* The mcast_n_groups counter might be wrong. First, 2090 * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries 2091 * are flushed, thus mcast_n_groups after the toggle does not 2092 * reflect the true values. And second, permanent entries added 2093 * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected 2094 * either. Thus we have to refresh the counter. 2095 */ 2096 2097 hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) { 2098 if (pg->key.addr.vid == pmctx->vlan->vid) 2099 n++; 2100 } 2101 WRITE_ONCE(pmctx->mdb_n_entries, n); 2102 } 2103 } 2104 2105 void br_multicast_enable_port(struct net_bridge_port *port) 2106 { 2107 struct net_bridge *br = port->br; 2108 2109 spin_lock_bh(&br->multicast_lock); 2110 __br_multicast_enable_port_ctx(&port->multicast_ctx); 2111 spin_unlock_bh(&br->multicast_lock); 2112 } 2113 2114 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) 2115 { 2116 struct net_bridge_port_group *pg; 2117 struct hlist_node *n; 2118 bool del = false; 2119 2120 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) 2121 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && 2122 (!br_multicast_port_ctx_is_vlan(pmctx) || 2123 pg->key.addr.vid == pmctx->vlan->vid)) 2124 br_multicast_find_del_pg(pmctx->port->br, pg); 2125 2126 del |= br_ip4_multicast_rport_del(pmctx); 2127 del_timer(&pmctx->ip4_mc_router_timer); 2128 del_timer(&pmctx->ip4_own_query.timer); 2129 del |= br_ip6_multicast_rport_del(pmctx); 2130 #if IS_ENABLED(CONFIG_IPV6) 2131 del_timer(&pmctx->ip6_mc_router_timer); 2132 del_timer(&pmctx->ip6_own_query.timer); 2133 #endif 2134 br_multicast_rport_del_notify(pmctx, del); 2135 } 2136 2137 void br_multicast_disable_port(struct net_bridge_port *port) 2138 { 2139 spin_lock_bh(&port->br->multicast_lock); 2140 __br_multicast_disable_port_ctx(&port->multicast_ctx); 2141 spin_unlock_bh(&port->br->multicast_lock); 2142 } 2143 2144 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 2145 { 2146 struct net_bridge_group_src *ent; 2147 struct hlist_node *tmp; 2148 int deleted = 0; 2149 2150 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 2151 if (ent->flags & BR_SGRP_F_DELETE) { 2152 br_multicast_del_group_src(ent, false); 2153 deleted++; 2154 } 2155 2156 return deleted; 2157 } 2158 2159 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 2160 unsigned long expires) 2161 { 2162 mod_timer(&src->timer, expires); 2163 br_multicast_fwd_src_handle(src); 2164 } 2165 2166 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, 2167 struct net_bridge_mcast_port *pmctx, 2168 struct net_bridge_port_group *pg) 2169 { 2170 struct bridge_mcast_other_query *other_query = NULL; 2171 u32 lmqc = brmctx->multicast_last_member_count; 2172 unsigned long lmqt, lmi, now = jiffies; 2173 struct net_bridge_group_src *ent; 2174 2175 if (!netif_running(brmctx->br->dev) || 2176 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2177 return; 2178 2179 if (pg->key.addr.proto == htons(ETH_P_IP)) 2180 other_query = &brmctx->ip4_other_query; 2181 #if IS_ENABLED(CONFIG_IPV6) 2182 else 2183 other_query = &brmctx->ip6_other_query; 2184 #endif 2185 2186 lmqt = now + br_multicast_lmqt(brmctx); 2187 hlist_for_each_entry(ent, &pg->src_list, node) { 2188 if (ent->flags & BR_SGRP_F_SEND) { 2189 ent->flags &= ~BR_SGRP_F_SEND; 2190 if (ent->timer.expires > lmqt) { 2191 if (brmctx->multicast_querier && 2192 other_query && 2193 !timer_pending(&other_query->timer)) 2194 ent->src_query_rexmit_cnt = lmqc; 2195 __grp_src_mod_timer(ent, lmqt); 2196 } 2197 } 2198 } 2199 2200 if (!brmctx->multicast_querier || 2201 !other_query || timer_pending(&other_query->timer)) 2202 return; 2203 2204 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2205 &pg->key.addr, true, 1, NULL); 2206 2207 lmi = now + brmctx->multicast_last_member_interval; 2208 if (!timer_pending(&pg->rexmit_timer) || 2209 time_after(pg->rexmit_timer.expires, lmi)) 2210 mod_timer(&pg->rexmit_timer, lmi); 2211 } 2212 2213 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, 2214 struct net_bridge_mcast_port *pmctx, 2215 struct net_bridge_port_group *pg) 2216 { 2217 struct bridge_mcast_other_query *other_query = NULL; 2218 unsigned long now = jiffies, lmi; 2219 2220 if (!netif_running(brmctx->br->dev) || 2221 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2222 return; 2223 2224 if (pg->key.addr.proto == htons(ETH_P_IP)) 2225 other_query = &brmctx->ip4_other_query; 2226 #if IS_ENABLED(CONFIG_IPV6) 2227 else 2228 other_query = &brmctx->ip6_other_query; 2229 #endif 2230 2231 if (brmctx->multicast_querier && 2232 other_query && !timer_pending(&other_query->timer)) { 2233 lmi = now + brmctx->multicast_last_member_interval; 2234 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; 2235 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2236 &pg->key.addr, false, 0, NULL); 2237 if (!timer_pending(&pg->rexmit_timer) || 2238 time_after(pg->rexmit_timer.expires, lmi)) 2239 mod_timer(&pg->rexmit_timer, lmi); 2240 } 2241 2242 if (pg->filter_mode == MCAST_EXCLUDE && 2243 (!timer_pending(&pg->timer) || 2244 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) 2245 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx)); 2246 } 2247 2248 /* State Msg type New state Actions 2249 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 2250 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 2251 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 2252 */ 2253 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, 2254 struct net_bridge_port_group *pg, void *h_addr, 2255 void *srcs, u32 nsrcs, size_t addr_size, 2256 int grec_type) 2257 { 2258 struct net_bridge_group_src *ent; 2259 unsigned long now = jiffies; 2260 bool changed = false; 2261 struct br_ip src_ip; 2262 u32 src_idx; 2263 2264 memset(&src_ip, 0, sizeof(src_ip)); 2265 src_ip.proto = pg->key.addr.proto; 2266 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2267 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2268 ent = br_multicast_find_group_src(pg, &src_ip); 2269 if (!ent) { 2270 ent = br_multicast_new_group_src(pg, &src_ip); 2271 if (ent) 2272 changed = true; 2273 } 2274 2275 if (ent) 2276 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2277 } 2278 2279 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2280 grec_type)) 2281 changed = true; 2282 2283 return changed; 2284 } 2285 2286 /* State Msg type New state Actions 2287 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2288 * Delete (A-B) 2289 * Group Timer=GMI 2290 */ 2291 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, 2292 struct net_bridge_port_group *pg, void *h_addr, 2293 void *srcs, u32 nsrcs, size_t addr_size, 2294 int grec_type) 2295 { 2296 struct net_bridge_group_src *ent; 2297 struct br_ip src_ip; 2298 u32 src_idx; 2299 2300 hlist_for_each_entry(ent, &pg->src_list, node) 2301 ent->flags |= BR_SGRP_F_DELETE; 2302 2303 memset(&src_ip, 0, sizeof(src_ip)); 2304 src_ip.proto = pg->key.addr.proto; 2305 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2306 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2307 ent = br_multicast_find_group_src(pg, &src_ip); 2308 if (ent) 2309 ent->flags &= ~BR_SGRP_F_DELETE; 2310 else 2311 ent = br_multicast_new_group_src(pg, &src_ip); 2312 if (ent) 2313 br_multicast_fwd_src_handle(ent); 2314 } 2315 2316 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2317 grec_type); 2318 2319 __grp_src_delete_marked(pg); 2320 } 2321 2322 /* State Msg type New state Actions 2323 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 2324 * Delete (X-A) 2325 * Delete (Y-A) 2326 * Group Timer=GMI 2327 */ 2328 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, 2329 struct net_bridge_port_group *pg, void *h_addr, 2330 void *srcs, u32 nsrcs, size_t addr_size, 2331 int grec_type) 2332 { 2333 struct net_bridge_group_src *ent; 2334 unsigned long now = jiffies; 2335 bool changed = false; 2336 struct br_ip src_ip; 2337 u32 src_idx; 2338 2339 hlist_for_each_entry(ent, &pg->src_list, node) 2340 ent->flags |= BR_SGRP_F_DELETE; 2341 2342 memset(&src_ip, 0, sizeof(src_ip)); 2343 src_ip.proto = pg->key.addr.proto; 2344 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2345 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2346 ent = br_multicast_find_group_src(pg, &src_ip); 2347 if (ent) { 2348 ent->flags &= ~BR_SGRP_F_DELETE; 2349 } else { 2350 ent = br_multicast_new_group_src(pg, &src_ip); 2351 if (ent) { 2352 __grp_src_mod_timer(ent, 2353 now + br_multicast_gmi(brmctx)); 2354 changed = true; 2355 } 2356 } 2357 } 2358 2359 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2360 grec_type)) 2361 changed = true; 2362 2363 if (__grp_src_delete_marked(pg)) 2364 changed = true; 2365 2366 return changed; 2367 } 2368 2369 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, 2370 struct net_bridge_port_group *pg, void *h_addr, 2371 void *srcs, u32 nsrcs, size_t addr_size, 2372 int grec_type) 2373 { 2374 bool changed = false; 2375 2376 switch (pg->filter_mode) { 2377 case MCAST_INCLUDE: 2378 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2379 grec_type); 2380 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2381 changed = true; 2382 break; 2383 case MCAST_EXCLUDE: 2384 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, 2385 addr_size, grec_type); 2386 break; 2387 } 2388 2389 pg->filter_mode = MCAST_EXCLUDE; 2390 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2391 2392 return changed; 2393 } 2394 2395 /* State Msg type New state Actions 2396 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 2397 * Send Q(G,A-B) 2398 */ 2399 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, 2400 struct net_bridge_mcast_port *pmctx, 2401 struct net_bridge_port_group *pg, void *h_addr, 2402 void *srcs, u32 nsrcs, size_t addr_size, 2403 int grec_type) 2404 { 2405 u32 src_idx, to_send = pg->src_ents; 2406 struct net_bridge_group_src *ent; 2407 unsigned long now = jiffies; 2408 bool changed = false; 2409 struct br_ip src_ip; 2410 2411 hlist_for_each_entry(ent, &pg->src_list, node) 2412 ent->flags |= BR_SGRP_F_SEND; 2413 2414 memset(&src_ip, 0, sizeof(src_ip)); 2415 src_ip.proto = pg->key.addr.proto; 2416 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2417 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2418 ent = br_multicast_find_group_src(pg, &src_ip); 2419 if (ent) { 2420 ent->flags &= ~BR_SGRP_F_SEND; 2421 to_send--; 2422 } else { 2423 ent = br_multicast_new_group_src(pg, &src_ip); 2424 if (ent) 2425 changed = true; 2426 } 2427 if (ent) 2428 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2429 } 2430 2431 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2432 grec_type)) 2433 changed = true; 2434 2435 if (to_send) 2436 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2437 2438 return changed; 2439 } 2440 2441 /* State Msg type New state Actions 2442 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 2443 * Send Q(G,X-A) 2444 * Send Q(G) 2445 */ 2446 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, 2447 struct net_bridge_mcast_port *pmctx, 2448 struct net_bridge_port_group *pg, void *h_addr, 2449 void *srcs, u32 nsrcs, size_t addr_size, 2450 int grec_type) 2451 { 2452 u32 src_idx, to_send = pg->src_ents; 2453 struct net_bridge_group_src *ent; 2454 unsigned long now = jiffies; 2455 bool changed = false; 2456 struct br_ip src_ip; 2457 2458 hlist_for_each_entry(ent, &pg->src_list, node) 2459 if (timer_pending(&ent->timer)) 2460 ent->flags |= BR_SGRP_F_SEND; 2461 2462 memset(&src_ip, 0, sizeof(src_ip)); 2463 src_ip.proto = pg->key.addr.proto; 2464 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2465 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2466 ent = br_multicast_find_group_src(pg, &src_ip); 2467 if (ent) { 2468 if (timer_pending(&ent->timer)) { 2469 ent->flags &= ~BR_SGRP_F_SEND; 2470 to_send--; 2471 } 2472 } else { 2473 ent = br_multicast_new_group_src(pg, &src_ip); 2474 if (ent) 2475 changed = true; 2476 } 2477 if (ent) 2478 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2479 } 2480 2481 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2482 grec_type)) 2483 changed = true; 2484 2485 if (to_send) 2486 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2487 2488 __grp_send_query_and_rexmit(brmctx, pmctx, pg); 2489 2490 return changed; 2491 } 2492 2493 static bool br_multicast_toin(struct net_bridge_mcast *brmctx, 2494 struct net_bridge_mcast_port *pmctx, 2495 struct net_bridge_port_group *pg, void *h_addr, 2496 void *srcs, u32 nsrcs, size_t addr_size, 2497 int grec_type) 2498 { 2499 bool changed = false; 2500 2501 switch (pg->filter_mode) { 2502 case MCAST_INCLUDE: 2503 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, 2504 nsrcs, addr_size, grec_type); 2505 break; 2506 case MCAST_EXCLUDE: 2507 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, 2508 nsrcs, addr_size, grec_type); 2509 break; 2510 } 2511 2512 if (br_multicast_eht_should_del_pg(pg)) { 2513 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2514 br_multicast_find_del_pg(pg->key.port->br, pg); 2515 /* a notification has already been sent and we shouldn't 2516 * access pg after the delete so we have to return false 2517 */ 2518 changed = false; 2519 } 2520 2521 return changed; 2522 } 2523 2524 /* State Msg type New state Actions 2525 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2526 * Delete (A-B) 2527 * Send Q(G,A*B) 2528 * Group Timer=GMI 2529 */ 2530 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, 2531 struct net_bridge_mcast_port *pmctx, 2532 struct net_bridge_port_group *pg, void *h_addr, 2533 void *srcs, u32 nsrcs, size_t addr_size, 2534 int grec_type) 2535 { 2536 struct net_bridge_group_src *ent; 2537 u32 src_idx, to_send = 0; 2538 struct br_ip src_ip; 2539 2540 hlist_for_each_entry(ent, &pg->src_list, node) 2541 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2542 2543 memset(&src_ip, 0, sizeof(src_ip)); 2544 src_ip.proto = pg->key.addr.proto; 2545 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2546 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2547 ent = br_multicast_find_group_src(pg, &src_ip); 2548 if (ent) { 2549 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2550 BR_SGRP_F_SEND; 2551 to_send++; 2552 } else { 2553 ent = br_multicast_new_group_src(pg, &src_ip); 2554 } 2555 if (ent) 2556 br_multicast_fwd_src_handle(ent); 2557 } 2558 2559 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2560 grec_type); 2561 2562 __grp_src_delete_marked(pg); 2563 if (to_send) 2564 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2565 } 2566 2567 /* State Msg type New state Actions 2568 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2569 * Delete (X-A) 2570 * Delete (Y-A) 2571 * Send Q(G,A-Y) 2572 * Group Timer=GMI 2573 */ 2574 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, 2575 struct net_bridge_mcast_port *pmctx, 2576 struct net_bridge_port_group *pg, void *h_addr, 2577 void *srcs, u32 nsrcs, size_t addr_size, 2578 int grec_type) 2579 { 2580 struct net_bridge_group_src *ent; 2581 u32 src_idx, to_send = 0; 2582 bool changed = false; 2583 struct br_ip src_ip; 2584 2585 hlist_for_each_entry(ent, &pg->src_list, node) 2586 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2587 2588 memset(&src_ip, 0, sizeof(src_ip)); 2589 src_ip.proto = pg->key.addr.proto; 2590 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2591 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2592 ent = br_multicast_find_group_src(pg, &src_ip); 2593 if (ent) { 2594 ent->flags &= ~BR_SGRP_F_DELETE; 2595 } else { 2596 ent = br_multicast_new_group_src(pg, &src_ip); 2597 if (ent) { 2598 __grp_src_mod_timer(ent, pg->timer.expires); 2599 changed = true; 2600 } 2601 } 2602 if (ent && timer_pending(&ent->timer)) { 2603 ent->flags |= BR_SGRP_F_SEND; 2604 to_send++; 2605 } 2606 } 2607 2608 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2609 grec_type)) 2610 changed = true; 2611 2612 if (__grp_src_delete_marked(pg)) 2613 changed = true; 2614 if (to_send) 2615 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2616 2617 return changed; 2618 } 2619 2620 static bool br_multicast_toex(struct net_bridge_mcast *brmctx, 2621 struct net_bridge_mcast_port *pmctx, 2622 struct net_bridge_port_group *pg, void *h_addr, 2623 void *srcs, u32 nsrcs, size_t addr_size, 2624 int grec_type) 2625 { 2626 bool changed = false; 2627 2628 switch (pg->filter_mode) { 2629 case MCAST_INCLUDE: 2630 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, 2631 addr_size, grec_type); 2632 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2633 changed = true; 2634 break; 2635 case MCAST_EXCLUDE: 2636 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, 2637 nsrcs, addr_size, grec_type); 2638 break; 2639 } 2640 2641 pg->filter_mode = MCAST_EXCLUDE; 2642 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2643 2644 return changed; 2645 } 2646 2647 /* State Msg type New state Actions 2648 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2649 */ 2650 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, 2651 struct net_bridge_mcast_port *pmctx, 2652 struct net_bridge_port_group *pg, void *h_addr, 2653 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2654 { 2655 struct net_bridge_group_src *ent; 2656 u32 src_idx, to_send = 0; 2657 bool changed = false; 2658 struct br_ip src_ip; 2659 2660 hlist_for_each_entry(ent, &pg->src_list, node) 2661 ent->flags &= ~BR_SGRP_F_SEND; 2662 2663 memset(&src_ip, 0, sizeof(src_ip)); 2664 src_ip.proto = pg->key.addr.proto; 2665 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2666 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2667 ent = br_multicast_find_group_src(pg, &src_ip); 2668 if (ent) { 2669 ent->flags |= BR_SGRP_F_SEND; 2670 to_send++; 2671 } 2672 } 2673 2674 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2675 grec_type)) 2676 changed = true; 2677 2678 if (to_send) 2679 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2680 2681 return changed; 2682 } 2683 2684 /* State Msg type New state Actions 2685 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2686 * Send Q(G,A-Y) 2687 */ 2688 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, 2689 struct net_bridge_mcast_port *pmctx, 2690 struct net_bridge_port_group *pg, void *h_addr, 2691 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2692 { 2693 struct net_bridge_group_src *ent; 2694 u32 src_idx, to_send = 0; 2695 bool changed = false; 2696 struct br_ip src_ip; 2697 2698 hlist_for_each_entry(ent, &pg->src_list, node) 2699 ent->flags &= ~BR_SGRP_F_SEND; 2700 2701 memset(&src_ip, 0, sizeof(src_ip)); 2702 src_ip.proto = pg->key.addr.proto; 2703 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2704 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2705 ent = br_multicast_find_group_src(pg, &src_ip); 2706 if (!ent) { 2707 ent = br_multicast_new_group_src(pg, &src_ip); 2708 if (ent) { 2709 __grp_src_mod_timer(ent, pg->timer.expires); 2710 changed = true; 2711 } 2712 } 2713 if (ent && timer_pending(&ent->timer)) { 2714 ent->flags |= BR_SGRP_F_SEND; 2715 to_send++; 2716 } 2717 } 2718 2719 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2720 grec_type)) 2721 changed = true; 2722 2723 if (to_send) 2724 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2725 2726 return changed; 2727 } 2728 2729 static bool br_multicast_block(struct net_bridge_mcast *brmctx, 2730 struct net_bridge_mcast_port *pmctx, 2731 struct net_bridge_port_group *pg, void *h_addr, 2732 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2733 { 2734 bool changed = false; 2735 2736 switch (pg->filter_mode) { 2737 case MCAST_INCLUDE: 2738 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, 2739 nsrcs, addr_size, grec_type); 2740 break; 2741 case MCAST_EXCLUDE: 2742 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, 2743 nsrcs, addr_size, grec_type); 2744 break; 2745 } 2746 2747 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2748 br_multicast_eht_should_del_pg(pg)) { 2749 if (br_multicast_eht_should_del_pg(pg)) 2750 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2751 br_multicast_find_del_pg(pg->key.port->br, pg); 2752 /* a notification has already been sent and we shouldn't 2753 * access pg after the delete so we have to return false 2754 */ 2755 changed = false; 2756 } 2757 2758 return changed; 2759 } 2760 2761 static struct net_bridge_port_group * 2762 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2763 struct net_bridge_port *p, 2764 const unsigned char *src) 2765 { 2766 struct net_bridge *br __maybe_unused = mp->br; 2767 struct net_bridge_port_group *pg; 2768 2769 for (pg = mlock_dereference(mp->ports, br); 2770 pg; 2771 pg = mlock_dereference(pg->next, br)) 2772 if (br_port_group_equal(pg, p, src)) 2773 return pg; 2774 2775 return NULL; 2776 } 2777 2778 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, 2779 struct net_bridge_mcast_port *pmctx, 2780 struct sk_buff *skb, 2781 u16 vid) 2782 { 2783 bool igmpv2 = brmctx->multicast_igmp_version == 2; 2784 struct net_bridge_mdb_entry *mdst; 2785 struct net_bridge_port_group *pg; 2786 const unsigned char *src; 2787 struct igmpv3_report *ih; 2788 struct igmpv3_grec *grec; 2789 int i, len, num, type; 2790 __be32 group, *h_addr; 2791 bool changed = false; 2792 int err = 0; 2793 u16 nsrcs; 2794 2795 ih = igmpv3_report_hdr(skb); 2796 num = ntohs(ih->ngrec); 2797 len = skb_transport_offset(skb) + sizeof(*ih); 2798 2799 for (i = 0; i < num; i++) { 2800 len += sizeof(*grec); 2801 if (!ip_mc_may_pull(skb, len)) 2802 return -EINVAL; 2803 2804 grec = (void *)(skb->data + len - sizeof(*grec)); 2805 group = grec->grec_mca; 2806 type = grec->grec_type; 2807 nsrcs = ntohs(grec->grec_nsrcs); 2808 2809 len += nsrcs * 4; 2810 if (!ip_mc_may_pull(skb, len)) 2811 return -EINVAL; 2812 2813 switch (type) { 2814 case IGMPV3_MODE_IS_INCLUDE: 2815 case IGMPV3_MODE_IS_EXCLUDE: 2816 case IGMPV3_CHANGE_TO_INCLUDE: 2817 case IGMPV3_CHANGE_TO_EXCLUDE: 2818 case IGMPV3_ALLOW_NEW_SOURCES: 2819 case IGMPV3_BLOCK_OLD_SOURCES: 2820 break; 2821 2822 default: 2823 continue; 2824 } 2825 2826 src = eth_hdr(skb)->h_source; 2827 if (nsrcs == 0 && 2828 (type == IGMPV3_CHANGE_TO_INCLUDE || 2829 type == IGMPV3_MODE_IS_INCLUDE)) { 2830 if (!pmctx || igmpv2) { 2831 br_ip4_multicast_leave_group(brmctx, pmctx, 2832 group, vid, src); 2833 continue; 2834 } 2835 } else { 2836 err = br_ip4_multicast_add_group(brmctx, pmctx, group, 2837 vid, src, igmpv2); 2838 if (err) 2839 break; 2840 } 2841 2842 if (!pmctx || igmpv2) 2843 continue; 2844 2845 spin_lock(&brmctx->br->multicast_lock); 2846 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2847 goto unlock_continue; 2848 2849 mdst = br_mdb_ip4_get(brmctx->br, group, vid); 2850 if (!mdst) 2851 goto unlock_continue; 2852 pg = br_multicast_find_port(mdst, pmctx->port, src); 2853 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2854 goto unlock_continue; 2855 /* reload grec and host addr */ 2856 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2857 h_addr = &ip_hdr(skb)->saddr; 2858 switch (type) { 2859 case IGMPV3_ALLOW_NEW_SOURCES: 2860 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2861 grec->grec_src, 2862 nsrcs, sizeof(__be32), type); 2863 break; 2864 case IGMPV3_MODE_IS_INCLUDE: 2865 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2866 grec->grec_src, 2867 nsrcs, sizeof(__be32), type); 2868 break; 2869 case IGMPV3_MODE_IS_EXCLUDE: 2870 changed = br_multicast_isexc(brmctx, pg, h_addr, 2871 grec->grec_src, 2872 nsrcs, sizeof(__be32), type); 2873 break; 2874 case IGMPV3_CHANGE_TO_INCLUDE: 2875 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2876 grec->grec_src, 2877 nsrcs, sizeof(__be32), type); 2878 break; 2879 case IGMPV3_CHANGE_TO_EXCLUDE: 2880 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2881 grec->grec_src, 2882 nsrcs, sizeof(__be32), type); 2883 break; 2884 case IGMPV3_BLOCK_OLD_SOURCES: 2885 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2886 grec->grec_src, 2887 nsrcs, sizeof(__be32), type); 2888 break; 2889 } 2890 if (changed) 2891 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2892 unlock_continue: 2893 spin_unlock(&brmctx->br->multicast_lock); 2894 } 2895 2896 return err; 2897 } 2898 2899 #if IS_ENABLED(CONFIG_IPV6) 2900 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, 2901 struct net_bridge_mcast_port *pmctx, 2902 struct sk_buff *skb, 2903 u16 vid) 2904 { 2905 bool mldv1 = brmctx->multicast_mld_version == 1; 2906 struct net_bridge_mdb_entry *mdst; 2907 struct net_bridge_port_group *pg; 2908 unsigned int nsrcs_offset; 2909 struct mld2_report *mld2r; 2910 const unsigned char *src; 2911 struct in6_addr *h_addr; 2912 struct mld2_grec *grec; 2913 unsigned int grec_len; 2914 bool changed = false; 2915 int i, len, num; 2916 int err = 0; 2917 2918 if (!ipv6_mc_may_pull(skb, sizeof(*mld2r))) 2919 return -EINVAL; 2920 2921 mld2r = (struct mld2_report *)icmp6_hdr(skb); 2922 num = ntohs(mld2r->mld2r_ngrec); 2923 len = skb_transport_offset(skb) + sizeof(*mld2r); 2924 2925 for (i = 0; i < num; i++) { 2926 __be16 *_nsrcs, __nsrcs; 2927 u16 nsrcs; 2928 2929 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2930 2931 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2932 nsrcs_offset + sizeof(__nsrcs)) 2933 return -EINVAL; 2934 2935 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2936 sizeof(__nsrcs), &__nsrcs); 2937 if (!_nsrcs) 2938 return -EINVAL; 2939 2940 nsrcs = ntohs(*_nsrcs); 2941 grec_len = struct_size(grec, grec_src, nsrcs); 2942 2943 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2944 return -EINVAL; 2945 2946 grec = (struct mld2_grec *)(skb->data + len); 2947 len += grec_len; 2948 2949 switch (grec->grec_type) { 2950 case MLD2_MODE_IS_INCLUDE: 2951 case MLD2_MODE_IS_EXCLUDE: 2952 case MLD2_CHANGE_TO_INCLUDE: 2953 case MLD2_CHANGE_TO_EXCLUDE: 2954 case MLD2_ALLOW_NEW_SOURCES: 2955 case MLD2_BLOCK_OLD_SOURCES: 2956 break; 2957 2958 default: 2959 continue; 2960 } 2961 2962 src = eth_hdr(skb)->h_source; 2963 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2964 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2965 nsrcs == 0) { 2966 if (!pmctx || mldv1) { 2967 br_ip6_multicast_leave_group(brmctx, pmctx, 2968 &grec->grec_mca, 2969 vid, src); 2970 continue; 2971 } 2972 } else { 2973 err = br_ip6_multicast_add_group(brmctx, pmctx, 2974 &grec->grec_mca, vid, 2975 src, mldv1); 2976 if (err) 2977 break; 2978 } 2979 2980 if (!pmctx || mldv1) 2981 continue; 2982 2983 spin_lock(&brmctx->br->multicast_lock); 2984 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2985 goto unlock_continue; 2986 2987 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid); 2988 if (!mdst) 2989 goto unlock_continue; 2990 pg = br_multicast_find_port(mdst, pmctx->port, src); 2991 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2992 goto unlock_continue; 2993 h_addr = &ipv6_hdr(skb)->saddr; 2994 switch (grec->grec_type) { 2995 case MLD2_ALLOW_NEW_SOURCES: 2996 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2997 grec->grec_src, nsrcs, 2998 sizeof(struct in6_addr), 2999 grec->grec_type); 3000 break; 3001 case MLD2_MODE_IS_INCLUDE: 3002 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 3003 grec->grec_src, nsrcs, 3004 sizeof(struct in6_addr), 3005 grec->grec_type); 3006 break; 3007 case MLD2_MODE_IS_EXCLUDE: 3008 changed = br_multicast_isexc(brmctx, pg, h_addr, 3009 grec->grec_src, nsrcs, 3010 sizeof(struct in6_addr), 3011 grec->grec_type); 3012 break; 3013 case MLD2_CHANGE_TO_INCLUDE: 3014 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 3015 grec->grec_src, nsrcs, 3016 sizeof(struct in6_addr), 3017 grec->grec_type); 3018 break; 3019 case MLD2_CHANGE_TO_EXCLUDE: 3020 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 3021 grec->grec_src, nsrcs, 3022 sizeof(struct in6_addr), 3023 grec->grec_type); 3024 break; 3025 case MLD2_BLOCK_OLD_SOURCES: 3026 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 3027 grec->grec_src, nsrcs, 3028 sizeof(struct in6_addr), 3029 grec->grec_type); 3030 break; 3031 } 3032 if (changed) 3033 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 3034 unlock_continue: 3035 spin_unlock(&brmctx->br->multicast_lock); 3036 } 3037 3038 return err; 3039 } 3040 #endif 3041 3042 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx, 3043 struct net_bridge_mcast_port *pmctx, 3044 struct br_ip *saddr) 3045 { 3046 int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0; 3047 struct timer_list *own_timer, *other_timer; 3048 struct bridge_mcast_querier *querier; 3049 3050 switch (saddr->proto) { 3051 case htons(ETH_P_IP): 3052 querier = &brmctx->ip4_querier; 3053 own_timer = &brmctx->ip4_own_query.timer; 3054 other_timer = &brmctx->ip4_other_query.timer; 3055 if (!querier->addr.src.ip4 || 3056 ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4)) 3057 goto update; 3058 break; 3059 #if IS_ENABLED(CONFIG_IPV6) 3060 case htons(ETH_P_IPV6): 3061 querier = &brmctx->ip6_querier; 3062 own_timer = &brmctx->ip6_own_query.timer; 3063 other_timer = &brmctx->ip6_other_query.timer; 3064 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0) 3065 goto update; 3066 break; 3067 #endif 3068 default: 3069 return false; 3070 } 3071 3072 if (!timer_pending(own_timer) && !timer_pending(other_timer)) 3073 goto update; 3074 3075 return false; 3076 3077 update: 3078 br_multicast_update_querier(brmctx, querier, port_ifidx, saddr); 3079 3080 return true; 3081 } 3082 3083 static struct net_bridge_port * 3084 __br_multicast_get_querier_port(struct net_bridge *br, 3085 const struct bridge_mcast_querier *querier) 3086 { 3087 int port_ifidx = READ_ONCE(querier->port_ifidx); 3088 struct net_bridge_port *p; 3089 struct net_device *dev; 3090 3091 if (port_ifidx == 0) 3092 return NULL; 3093 3094 dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx); 3095 if (!dev) 3096 return NULL; 3097 p = br_port_get_rtnl_rcu(dev); 3098 if (!p || p->br != br) 3099 return NULL; 3100 3101 return p; 3102 } 3103 3104 size_t br_multicast_querier_state_size(void) 3105 { 3106 return nla_total_size(0) + /* nest attribute */ 3107 nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */ 3108 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */ 3109 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */ 3110 #if IS_ENABLED(CONFIG_IPV6) 3111 nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */ 3112 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */ 3113 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */ 3114 #endif 3115 0; 3116 } 3117 3118 /* protected by rtnl or rcu */ 3119 int br_multicast_dump_querier_state(struct sk_buff *skb, 3120 const struct net_bridge_mcast *brmctx, 3121 int nest_attr) 3122 { 3123 struct bridge_mcast_querier querier = {}; 3124 struct net_bridge_port *p; 3125 struct nlattr *nest; 3126 3127 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 3128 br_multicast_ctx_vlan_global_disabled(brmctx)) 3129 return 0; 3130 3131 nest = nla_nest_start(skb, nest_attr); 3132 if (!nest) 3133 return -EMSGSIZE; 3134 3135 rcu_read_lock(); 3136 if (!brmctx->multicast_querier && 3137 !timer_pending(&brmctx->ip4_other_query.timer)) 3138 goto out_v6; 3139 3140 br_multicast_read_querier(&brmctx->ip4_querier, &querier); 3141 if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS, 3142 querier.addr.src.ip4)) { 3143 rcu_read_unlock(); 3144 goto out_err; 3145 } 3146 3147 p = __br_multicast_get_querier_port(brmctx->br, &querier); 3148 if (timer_pending(&brmctx->ip4_other_query.timer) && 3149 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER, 3150 br_timer_value(&brmctx->ip4_other_query.timer), 3151 BRIDGE_QUERIER_PAD) || 3152 (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) { 3153 rcu_read_unlock(); 3154 goto out_err; 3155 } 3156 3157 out_v6: 3158 #if IS_ENABLED(CONFIG_IPV6) 3159 if (!brmctx->multicast_querier && 3160 !timer_pending(&brmctx->ip6_other_query.timer)) 3161 goto out; 3162 3163 br_multicast_read_querier(&brmctx->ip6_querier, &querier); 3164 if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS, 3165 &querier.addr.src.ip6)) { 3166 rcu_read_unlock(); 3167 goto out_err; 3168 } 3169 3170 p = __br_multicast_get_querier_port(brmctx->br, &querier); 3171 if (timer_pending(&brmctx->ip6_other_query.timer) && 3172 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER, 3173 br_timer_value(&brmctx->ip6_other_query.timer), 3174 BRIDGE_QUERIER_PAD) || 3175 (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT, 3176 p->dev->ifindex)))) { 3177 rcu_read_unlock(); 3178 goto out_err; 3179 } 3180 out: 3181 #endif 3182 rcu_read_unlock(); 3183 nla_nest_end(skb, nest); 3184 if (!nla_len(nest)) 3185 nla_nest_cancel(skb, nest); 3186 3187 return 0; 3188 3189 out_err: 3190 nla_nest_cancel(skb, nest); 3191 return -EMSGSIZE; 3192 } 3193 3194 static void 3195 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, 3196 struct bridge_mcast_other_query *query, 3197 unsigned long max_delay) 3198 { 3199 if (!timer_pending(&query->timer)) 3200 query->delay_time = jiffies + max_delay; 3201 3202 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); 3203 } 3204 3205 static void br_port_mc_router_state_change(struct net_bridge_port *p, 3206 bool is_mc_router) 3207 { 3208 struct switchdev_attr attr = { 3209 .orig_dev = p->dev, 3210 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 3211 .flags = SWITCHDEV_F_DEFER, 3212 .u.mrouter = is_mc_router, 3213 }; 3214 3215 switchdev_port_attr_set(p->dev, &attr, NULL); 3216 } 3217 3218 static struct net_bridge_port * 3219 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, 3220 struct hlist_head *mc_router_list, 3221 struct hlist_node *rlist) 3222 { 3223 struct net_bridge_mcast_port *pmctx; 3224 3225 #if IS_ENABLED(CONFIG_IPV6) 3226 if (mc_router_list == &brmctx->ip6_mc_router_list) 3227 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3228 ip6_rlist); 3229 else 3230 #endif 3231 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3232 ip4_rlist); 3233 3234 return pmctx->port; 3235 } 3236 3237 static struct hlist_node * 3238 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, 3239 struct net_bridge_port *port, 3240 struct hlist_head *mc_router_list) 3241 3242 { 3243 struct hlist_node *slot = NULL; 3244 struct net_bridge_port *p; 3245 struct hlist_node *rlist; 3246 3247 hlist_for_each(rlist, mc_router_list) { 3248 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); 3249 3250 if ((unsigned long)port >= (unsigned long)p) 3251 break; 3252 3253 slot = rlist; 3254 } 3255 3256 return slot; 3257 } 3258 3259 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, 3260 struct hlist_node *rnode) 3261 { 3262 #if IS_ENABLED(CONFIG_IPV6) 3263 if (rnode != &pmctx->ip6_rlist) 3264 return hlist_unhashed(&pmctx->ip6_rlist); 3265 else 3266 return hlist_unhashed(&pmctx->ip4_rlist); 3267 #else 3268 return true; 3269 #endif 3270 } 3271 3272 /* Add port to router_list 3273 * list is maintained ordered by pointer value 3274 * and locked by br->multicast_lock and RCU 3275 */ 3276 static void br_multicast_add_router(struct net_bridge_mcast *brmctx, 3277 struct net_bridge_mcast_port *pmctx, 3278 struct hlist_node *rlist, 3279 struct hlist_head *mc_router_list) 3280 { 3281 struct hlist_node *slot; 3282 3283 if (!hlist_unhashed(rlist)) 3284 return; 3285 3286 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list); 3287 3288 if (slot) 3289 hlist_add_behind_rcu(rlist, slot); 3290 else 3291 hlist_add_head_rcu(rlist, mc_router_list); 3292 3293 /* For backwards compatibility for now, only notify if we 3294 * switched from no IPv4/IPv6 multicast router to a new 3295 * IPv4 or IPv6 multicast router. 3296 */ 3297 if (br_multicast_no_router_otherpf(pmctx, rlist)) { 3298 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB); 3299 br_port_mc_router_state_change(pmctx->port, true); 3300 } 3301 } 3302 3303 /* Add port to router_list 3304 * list is maintained ordered by pointer value 3305 * and locked by br->multicast_lock and RCU 3306 */ 3307 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 3308 struct net_bridge_mcast_port *pmctx) 3309 { 3310 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist, 3311 &brmctx->ip4_mc_router_list); 3312 } 3313 3314 /* Add port to router_list 3315 * list is maintained ordered by pointer value 3316 * and locked by br->multicast_lock and RCU 3317 */ 3318 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 3319 struct net_bridge_mcast_port *pmctx) 3320 { 3321 #if IS_ENABLED(CONFIG_IPV6) 3322 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist, 3323 &brmctx->ip6_mc_router_list); 3324 #endif 3325 } 3326 3327 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, 3328 struct net_bridge_mcast_port *pmctx, 3329 struct timer_list *timer, 3330 struct hlist_node *rlist, 3331 struct hlist_head *mc_router_list) 3332 { 3333 unsigned long now = jiffies; 3334 3335 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3336 return; 3337 3338 if (!pmctx) { 3339 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 3340 if (!br_ip4_multicast_is_router(brmctx) && 3341 !br_ip6_multicast_is_router(brmctx)) 3342 br_mc_router_state_change(brmctx->br, true); 3343 mod_timer(timer, now + brmctx->multicast_querier_interval); 3344 } 3345 return; 3346 } 3347 3348 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 3349 pmctx->multicast_router == MDB_RTR_TYPE_PERM) 3350 return; 3351 3352 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); 3353 mod_timer(timer, now + brmctx->multicast_querier_interval); 3354 } 3355 3356 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, 3357 struct net_bridge_mcast_port *pmctx) 3358 { 3359 struct timer_list *timer = &brmctx->ip4_mc_router_timer; 3360 struct hlist_node *rlist = NULL; 3361 3362 if (pmctx) { 3363 timer = &pmctx->ip4_mc_router_timer; 3364 rlist = &pmctx->ip4_rlist; 3365 } 3366 3367 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3368 &brmctx->ip4_mc_router_list); 3369 } 3370 3371 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, 3372 struct net_bridge_mcast_port *pmctx) 3373 { 3374 #if IS_ENABLED(CONFIG_IPV6) 3375 struct timer_list *timer = &brmctx->ip6_mc_router_timer; 3376 struct hlist_node *rlist = NULL; 3377 3378 if (pmctx) { 3379 timer = &pmctx->ip6_mc_router_timer; 3380 rlist = &pmctx->ip6_rlist; 3381 } 3382 3383 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3384 &brmctx->ip6_mc_router_list); 3385 #endif 3386 } 3387 3388 static void 3389 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, 3390 struct net_bridge_mcast_port *pmctx, 3391 struct bridge_mcast_other_query *query, 3392 struct br_ip *saddr, 3393 unsigned long max_delay) 3394 { 3395 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3396 return; 3397 3398 br_multicast_update_query_timer(brmctx, query, max_delay); 3399 br_ip4_multicast_mark_router(brmctx, pmctx); 3400 } 3401 3402 #if IS_ENABLED(CONFIG_IPV6) 3403 static void 3404 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, 3405 struct net_bridge_mcast_port *pmctx, 3406 struct bridge_mcast_other_query *query, 3407 struct br_ip *saddr, 3408 unsigned long max_delay) 3409 { 3410 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3411 return; 3412 3413 br_multicast_update_query_timer(brmctx, query, max_delay); 3414 br_ip6_multicast_mark_router(brmctx, pmctx); 3415 } 3416 #endif 3417 3418 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, 3419 struct net_bridge_mcast_port *pmctx, 3420 struct sk_buff *skb, 3421 u16 vid) 3422 { 3423 unsigned int transport_len = ip_transport_len(skb); 3424 const struct iphdr *iph = ip_hdr(skb); 3425 struct igmphdr *ih = igmp_hdr(skb); 3426 struct net_bridge_mdb_entry *mp; 3427 struct igmpv3_query *ih3; 3428 struct net_bridge_port_group *p; 3429 struct net_bridge_port_group __rcu **pp; 3430 struct br_ip saddr = {}; 3431 unsigned long max_delay; 3432 unsigned long now = jiffies; 3433 __be32 group; 3434 3435 spin_lock(&brmctx->br->multicast_lock); 3436 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3437 goto out; 3438 3439 group = ih->group; 3440 3441 if (transport_len == sizeof(*ih)) { 3442 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 3443 3444 if (!max_delay) { 3445 max_delay = 10 * HZ; 3446 group = 0; 3447 } 3448 } else if (transport_len >= sizeof(*ih3)) { 3449 ih3 = igmpv3_query_hdr(skb); 3450 if (ih3->nsrcs || 3451 (brmctx->multicast_igmp_version == 3 && group && 3452 ih3->suppress)) 3453 goto out; 3454 3455 max_delay = ih3->code ? 3456 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 3457 } else { 3458 goto out; 3459 } 3460 3461 if (!group) { 3462 saddr.proto = htons(ETH_P_IP); 3463 saddr.src.ip4 = iph->saddr; 3464 3465 br_ip4_multicast_query_received(brmctx, pmctx, 3466 &brmctx->ip4_other_query, 3467 &saddr, max_delay); 3468 goto out; 3469 } 3470 3471 mp = br_mdb_ip4_get(brmctx->br, group, vid); 3472 if (!mp) 3473 goto out; 3474 3475 max_delay *= brmctx->multicast_last_member_count; 3476 3477 if (mp->host_joined && 3478 (timer_pending(&mp->timer) ? 3479 time_after(mp->timer.expires, now + max_delay) : 3480 try_to_del_timer_sync(&mp->timer) >= 0)) 3481 mod_timer(&mp->timer, now + max_delay); 3482 3483 for (pp = &mp->ports; 3484 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3485 pp = &p->next) { 3486 if (timer_pending(&p->timer) ? 3487 time_after(p->timer.expires, now + max_delay) : 3488 try_to_del_timer_sync(&p->timer) >= 0 && 3489 (brmctx->multicast_igmp_version == 2 || 3490 p->filter_mode == MCAST_EXCLUDE)) 3491 mod_timer(&p->timer, now + max_delay); 3492 } 3493 3494 out: 3495 spin_unlock(&brmctx->br->multicast_lock); 3496 } 3497 3498 #if IS_ENABLED(CONFIG_IPV6) 3499 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, 3500 struct net_bridge_mcast_port *pmctx, 3501 struct sk_buff *skb, 3502 u16 vid) 3503 { 3504 unsigned int transport_len = ipv6_transport_len(skb); 3505 struct mld_msg *mld; 3506 struct net_bridge_mdb_entry *mp; 3507 struct mld2_query *mld2q; 3508 struct net_bridge_port_group *p; 3509 struct net_bridge_port_group __rcu **pp; 3510 struct br_ip saddr = {}; 3511 unsigned long max_delay; 3512 unsigned long now = jiffies; 3513 unsigned int offset = skb_transport_offset(skb); 3514 const struct in6_addr *group = NULL; 3515 bool is_general_query; 3516 int err = 0; 3517 3518 spin_lock(&brmctx->br->multicast_lock); 3519 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3520 goto out; 3521 3522 if (transport_len == sizeof(*mld)) { 3523 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 3524 err = -EINVAL; 3525 goto out; 3526 } 3527 mld = (struct mld_msg *) icmp6_hdr(skb); 3528 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 3529 if (max_delay) 3530 group = &mld->mld_mca; 3531 } else { 3532 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 3533 err = -EINVAL; 3534 goto out; 3535 } 3536 mld2q = (struct mld2_query *)icmp6_hdr(skb); 3537 if (!mld2q->mld2q_nsrcs) 3538 group = &mld2q->mld2q_mca; 3539 if (brmctx->multicast_mld_version == 2 && 3540 !ipv6_addr_any(&mld2q->mld2q_mca) && 3541 mld2q->mld2q_suppress) 3542 goto out; 3543 3544 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 3545 } 3546 3547 is_general_query = group && ipv6_addr_any(group); 3548 3549 if (is_general_query) { 3550 saddr.proto = htons(ETH_P_IPV6); 3551 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 3552 3553 br_ip6_multicast_query_received(brmctx, pmctx, 3554 &brmctx->ip6_other_query, 3555 &saddr, max_delay); 3556 goto out; 3557 } else if (!group) { 3558 goto out; 3559 } 3560 3561 mp = br_mdb_ip6_get(brmctx->br, group, vid); 3562 if (!mp) 3563 goto out; 3564 3565 max_delay *= brmctx->multicast_last_member_count; 3566 if (mp->host_joined && 3567 (timer_pending(&mp->timer) ? 3568 time_after(mp->timer.expires, now + max_delay) : 3569 try_to_del_timer_sync(&mp->timer) >= 0)) 3570 mod_timer(&mp->timer, now + max_delay); 3571 3572 for (pp = &mp->ports; 3573 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3574 pp = &p->next) { 3575 if (timer_pending(&p->timer) ? 3576 time_after(p->timer.expires, now + max_delay) : 3577 try_to_del_timer_sync(&p->timer) >= 0 && 3578 (brmctx->multicast_mld_version == 1 || 3579 p->filter_mode == MCAST_EXCLUDE)) 3580 mod_timer(&p->timer, now + max_delay); 3581 } 3582 3583 out: 3584 spin_unlock(&brmctx->br->multicast_lock); 3585 return err; 3586 } 3587 #endif 3588 3589 static void 3590 br_multicast_leave_group(struct net_bridge_mcast *brmctx, 3591 struct net_bridge_mcast_port *pmctx, 3592 struct br_ip *group, 3593 struct bridge_mcast_other_query *other_query, 3594 struct bridge_mcast_own_query *own_query, 3595 const unsigned char *src) 3596 { 3597 struct net_bridge_mdb_entry *mp; 3598 struct net_bridge_port_group *p; 3599 unsigned long now; 3600 unsigned long time; 3601 3602 spin_lock(&brmctx->br->multicast_lock); 3603 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3604 goto out; 3605 3606 mp = br_mdb_ip_get(brmctx->br, group); 3607 if (!mp) 3608 goto out; 3609 3610 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { 3611 struct net_bridge_port_group __rcu **pp; 3612 3613 for (pp = &mp->ports; 3614 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3615 pp = &p->next) { 3616 if (!br_port_group_equal(p, pmctx->port, src)) 3617 continue; 3618 3619 if (p->flags & MDB_PG_FLAGS_PERMANENT) 3620 break; 3621 3622 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 3623 br_multicast_del_pg(mp, p, pp); 3624 } 3625 goto out; 3626 } 3627 3628 if (timer_pending(&other_query->timer)) 3629 goto out; 3630 3631 if (brmctx->multicast_querier) { 3632 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr, 3633 false, 0, NULL); 3634 3635 time = jiffies + brmctx->multicast_last_member_count * 3636 brmctx->multicast_last_member_interval; 3637 3638 mod_timer(&own_query->timer, time); 3639 3640 for (p = mlock_dereference(mp->ports, brmctx->br); 3641 p != NULL && pmctx != NULL; 3642 p = mlock_dereference(p->next, brmctx->br)) { 3643 if (!br_port_group_equal(p, pmctx->port, src)) 3644 continue; 3645 3646 if (!hlist_unhashed(&p->mglist) && 3647 (timer_pending(&p->timer) ? 3648 time_after(p->timer.expires, time) : 3649 try_to_del_timer_sync(&p->timer) >= 0)) { 3650 mod_timer(&p->timer, time); 3651 } 3652 3653 break; 3654 } 3655 } 3656 3657 now = jiffies; 3658 time = now + brmctx->multicast_last_member_count * 3659 brmctx->multicast_last_member_interval; 3660 3661 if (!pmctx) { 3662 if (mp->host_joined && 3663 (timer_pending(&mp->timer) ? 3664 time_after(mp->timer.expires, time) : 3665 try_to_del_timer_sync(&mp->timer) >= 0)) { 3666 mod_timer(&mp->timer, time); 3667 } 3668 3669 goto out; 3670 } 3671 3672 for (p = mlock_dereference(mp->ports, brmctx->br); 3673 p != NULL; 3674 p = mlock_dereference(p->next, brmctx->br)) { 3675 if (p->key.port != pmctx->port) 3676 continue; 3677 3678 if (!hlist_unhashed(&p->mglist) && 3679 (timer_pending(&p->timer) ? 3680 time_after(p->timer.expires, time) : 3681 try_to_del_timer_sync(&p->timer) >= 0)) { 3682 mod_timer(&p->timer, time); 3683 } 3684 3685 break; 3686 } 3687 out: 3688 spin_unlock(&brmctx->br->multicast_lock); 3689 } 3690 3691 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 3692 struct net_bridge_mcast_port *pmctx, 3693 __be32 group, 3694 __u16 vid, 3695 const unsigned char *src) 3696 { 3697 struct br_ip br_group; 3698 struct bridge_mcast_own_query *own_query; 3699 3700 if (ipv4_is_local_multicast(group)) 3701 return; 3702 3703 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; 3704 3705 memset(&br_group, 0, sizeof(br_group)); 3706 br_group.dst.ip4 = group; 3707 br_group.proto = htons(ETH_P_IP); 3708 br_group.vid = vid; 3709 3710 br_multicast_leave_group(brmctx, pmctx, &br_group, 3711 &brmctx->ip4_other_query, 3712 own_query, src); 3713 } 3714 3715 #if IS_ENABLED(CONFIG_IPV6) 3716 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 3717 struct net_bridge_mcast_port *pmctx, 3718 const struct in6_addr *group, 3719 __u16 vid, 3720 const unsigned char *src) 3721 { 3722 struct br_ip br_group; 3723 struct bridge_mcast_own_query *own_query; 3724 3725 if (ipv6_addr_is_ll_all_nodes(group)) 3726 return; 3727 3728 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; 3729 3730 memset(&br_group, 0, sizeof(br_group)); 3731 br_group.dst.ip6 = *group; 3732 br_group.proto = htons(ETH_P_IPV6); 3733 br_group.vid = vid; 3734 3735 br_multicast_leave_group(brmctx, pmctx, &br_group, 3736 &brmctx->ip6_other_query, 3737 own_query, src); 3738 } 3739 #endif 3740 3741 static void br_multicast_err_count(const struct net_bridge *br, 3742 const struct net_bridge_port *p, 3743 __be16 proto) 3744 { 3745 struct bridge_mcast_stats __percpu *stats; 3746 struct bridge_mcast_stats *pstats; 3747 3748 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3749 return; 3750 3751 if (p) 3752 stats = p->mcast_stats; 3753 else 3754 stats = br->mcast_stats; 3755 if (WARN_ON(!stats)) 3756 return; 3757 3758 pstats = this_cpu_ptr(stats); 3759 3760 u64_stats_update_begin(&pstats->syncp); 3761 switch (proto) { 3762 case htons(ETH_P_IP): 3763 pstats->mstats.igmp_parse_errors++; 3764 break; 3765 #if IS_ENABLED(CONFIG_IPV6) 3766 case htons(ETH_P_IPV6): 3767 pstats->mstats.mld_parse_errors++; 3768 break; 3769 #endif 3770 } 3771 u64_stats_update_end(&pstats->syncp); 3772 } 3773 3774 static void br_multicast_pim(struct net_bridge_mcast *brmctx, 3775 struct net_bridge_mcast_port *pmctx, 3776 const struct sk_buff *skb) 3777 { 3778 unsigned int offset = skb_transport_offset(skb); 3779 struct pimhdr *pimhdr, _pimhdr; 3780 3781 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3782 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3783 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3784 return; 3785 3786 spin_lock(&brmctx->br->multicast_lock); 3787 br_ip4_multicast_mark_router(brmctx, pmctx); 3788 spin_unlock(&brmctx->br->multicast_lock); 3789 } 3790 3791 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3792 struct net_bridge_mcast_port *pmctx, 3793 struct sk_buff *skb) 3794 { 3795 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3796 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3797 return -ENOMSG; 3798 3799 spin_lock(&brmctx->br->multicast_lock); 3800 br_ip4_multicast_mark_router(brmctx, pmctx); 3801 spin_unlock(&brmctx->br->multicast_lock); 3802 3803 return 0; 3804 } 3805 3806 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, 3807 struct net_bridge_mcast_port *pmctx, 3808 struct sk_buff *skb, 3809 u16 vid) 3810 { 3811 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3812 const unsigned char *src; 3813 struct igmphdr *ih; 3814 int err; 3815 3816 err = ip_mc_check_igmp(skb); 3817 3818 if (err == -ENOMSG) { 3819 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3820 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3821 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3822 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3823 br_multicast_pim(brmctx, pmctx, skb); 3824 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3825 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); 3826 } 3827 3828 return 0; 3829 } else if (err < 0) { 3830 br_multicast_err_count(brmctx->br, p, skb->protocol); 3831 return err; 3832 } 3833 3834 ih = igmp_hdr(skb); 3835 src = eth_hdr(skb)->h_source; 3836 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3837 3838 switch (ih->type) { 3839 case IGMP_HOST_MEMBERSHIP_REPORT: 3840 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3841 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3842 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid, 3843 src, true); 3844 break; 3845 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3846 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); 3847 break; 3848 case IGMP_HOST_MEMBERSHIP_QUERY: 3849 br_ip4_multicast_query(brmctx, pmctx, skb, vid); 3850 break; 3851 case IGMP_HOST_LEAVE_MESSAGE: 3852 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src); 3853 break; 3854 } 3855 3856 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3857 BR_MCAST_DIR_RX); 3858 3859 return err; 3860 } 3861 3862 #if IS_ENABLED(CONFIG_IPV6) 3863 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3864 struct net_bridge_mcast_port *pmctx, 3865 struct sk_buff *skb) 3866 { 3867 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3868 return; 3869 3870 spin_lock(&brmctx->br->multicast_lock); 3871 br_ip6_multicast_mark_router(brmctx, pmctx); 3872 spin_unlock(&brmctx->br->multicast_lock); 3873 } 3874 3875 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, 3876 struct net_bridge_mcast_port *pmctx, 3877 struct sk_buff *skb, 3878 u16 vid) 3879 { 3880 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3881 const unsigned char *src; 3882 struct mld_msg *mld; 3883 int err; 3884 3885 err = ipv6_mc_check_mld(skb); 3886 3887 if (err == -ENOMSG || err == -ENODATA) { 3888 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3889 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3890 if (err == -ENODATA && 3891 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) 3892 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); 3893 3894 return 0; 3895 } else if (err < 0) { 3896 br_multicast_err_count(brmctx->br, p, skb->protocol); 3897 return err; 3898 } 3899 3900 mld = (struct mld_msg *)skb_transport_header(skb); 3901 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3902 3903 switch (mld->mld_type) { 3904 case ICMPV6_MGM_REPORT: 3905 src = eth_hdr(skb)->h_source; 3906 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3907 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca, 3908 vid, src, true); 3909 break; 3910 case ICMPV6_MLD2_REPORT: 3911 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); 3912 break; 3913 case ICMPV6_MGM_QUERY: 3914 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); 3915 break; 3916 case ICMPV6_MGM_REDUCTION: 3917 src = eth_hdr(skb)->h_source; 3918 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid, 3919 src); 3920 break; 3921 } 3922 3923 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3924 BR_MCAST_DIR_RX); 3925 3926 return err; 3927 } 3928 #endif 3929 3930 int br_multicast_rcv(struct net_bridge_mcast **brmctx, 3931 struct net_bridge_mcast_port **pmctx, 3932 struct net_bridge_vlan *vlan, 3933 struct sk_buff *skb, u16 vid) 3934 { 3935 int ret = 0; 3936 3937 BR_INPUT_SKB_CB(skb)->igmp = 0; 3938 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3939 3940 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED)) 3941 return 0; 3942 3943 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { 3944 const struct net_bridge_vlan *masterv; 3945 3946 /* the vlan has the master flag set only when transmitting 3947 * through the bridge device 3948 */ 3949 if (br_vlan_is_master(vlan)) { 3950 masterv = vlan; 3951 *brmctx = &vlan->br_mcast_ctx; 3952 *pmctx = NULL; 3953 } else { 3954 masterv = vlan->brvlan; 3955 *brmctx = &vlan->brvlan->br_mcast_ctx; 3956 *pmctx = &vlan->port_mcast_ctx; 3957 } 3958 3959 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 3960 return 0; 3961 } 3962 3963 switch (skb->protocol) { 3964 case htons(ETH_P_IP): 3965 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid); 3966 break; 3967 #if IS_ENABLED(CONFIG_IPV6) 3968 case htons(ETH_P_IPV6): 3969 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid); 3970 break; 3971 #endif 3972 } 3973 3974 return ret; 3975 } 3976 3977 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, 3978 struct bridge_mcast_own_query *query, 3979 struct bridge_mcast_querier *querier) 3980 { 3981 spin_lock(&brmctx->br->multicast_lock); 3982 if (br_multicast_ctx_vlan_disabled(brmctx)) 3983 goto out; 3984 3985 if (query->startup_sent < brmctx->multicast_startup_query_count) 3986 query->startup_sent++; 3987 3988 br_multicast_send_query(brmctx, NULL, query); 3989 out: 3990 spin_unlock(&brmctx->br->multicast_lock); 3991 } 3992 3993 static void br_ip4_multicast_query_expired(struct timer_list *t) 3994 { 3995 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3996 ip4_own_query.timer); 3997 3998 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, 3999 &brmctx->ip4_querier); 4000 } 4001 4002 #if IS_ENABLED(CONFIG_IPV6) 4003 static void br_ip6_multicast_query_expired(struct timer_list *t) 4004 { 4005 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 4006 ip6_own_query.timer); 4007 4008 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, 4009 &brmctx->ip6_querier); 4010 } 4011 #endif 4012 4013 static void br_multicast_gc_work(struct work_struct *work) 4014 { 4015 struct net_bridge *br = container_of(work, struct net_bridge, 4016 mcast_gc_work); 4017 HLIST_HEAD(deleted_head); 4018 4019 spin_lock_bh(&br->multicast_lock); 4020 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4021 spin_unlock_bh(&br->multicast_lock); 4022 4023 br_multicast_gc(&deleted_head); 4024 } 4025 4026 void br_multicast_ctx_init(struct net_bridge *br, 4027 struct net_bridge_vlan *vlan, 4028 struct net_bridge_mcast *brmctx) 4029 { 4030 brmctx->br = br; 4031 brmctx->vlan = vlan; 4032 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4033 brmctx->multicast_last_member_count = 2; 4034 brmctx->multicast_startup_query_count = 2; 4035 4036 brmctx->multicast_last_member_interval = HZ; 4037 brmctx->multicast_query_response_interval = 10 * HZ; 4038 brmctx->multicast_startup_query_interval = 125 * HZ / 4; 4039 brmctx->multicast_query_interval = 125 * HZ; 4040 brmctx->multicast_querier_interval = 255 * HZ; 4041 brmctx->multicast_membership_interval = 260 * HZ; 4042 4043 brmctx->ip4_other_query.delay_time = 0; 4044 brmctx->ip4_querier.port_ifidx = 0; 4045 seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); 4046 brmctx->multicast_igmp_version = 2; 4047 #if IS_ENABLED(CONFIG_IPV6) 4048 brmctx->multicast_mld_version = 1; 4049 brmctx->ip6_other_query.delay_time = 0; 4050 brmctx->ip6_querier.port_ifidx = 0; 4051 seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); 4052 #endif 4053 4054 timer_setup(&brmctx->ip4_mc_router_timer, 4055 br_ip4_multicast_local_router_expired, 0); 4056 timer_setup(&brmctx->ip4_other_query.timer, 4057 br_ip4_multicast_querier_expired, 0); 4058 timer_setup(&brmctx->ip4_own_query.timer, 4059 br_ip4_multicast_query_expired, 0); 4060 #if IS_ENABLED(CONFIG_IPV6) 4061 timer_setup(&brmctx->ip6_mc_router_timer, 4062 br_ip6_multicast_local_router_expired, 0); 4063 timer_setup(&brmctx->ip6_other_query.timer, 4064 br_ip6_multicast_querier_expired, 0); 4065 timer_setup(&brmctx->ip6_own_query.timer, 4066 br_ip6_multicast_query_expired, 0); 4067 #endif 4068 } 4069 4070 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) 4071 { 4072 __br_multicast_stop(brmctx); 4073 } 4074 4075 void br_multicast_init(struct net_bridge *br) 4076 { 4077 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 4078 4079 br_multicast_ctx_init(br, NULL, &br->multicast_ctx); 4080 4081 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 4082 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 4083 4084 spin_lock_init(&br->multicast_lock); 4085 INIT_HLIST_HEAD(&br->mdb_list); 4086 INIT_HLIST_HEAD(&br->mcast_gc_list); 4087 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 4088 } 4089 4090 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 4091 { 4092 struct in_device *in_dev = in_dev_get(br->dev); 4093 4094 if (!in_dev) 4095 return; 4096 4097 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 4098 in_dev_put(in_dev); 4099 } 4100 4101 #if IS_ENABLED(CONFIG_IPV6) 4102 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 4103 { 4104 struct in6_addr addr; 4105 4106 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 4107 ipv6_dev_mc_inc(br->dev, &addr); 4108 } 4109 #else 4110 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 4111 { 4112 } 4113 #endif 4114 4115 void br_multicast_join_snoopers(struct net_bridge *br) 4116 { 4117 br_ip4_multicast_join_snoopers(br); 4118 br_ip6_multicast_join_snoopers(br); 4119 } 4120 4121 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 4122 { 4123 struct in_device *in_dev = in_dev_get(br->dev); 4124 4125 if (WARN_ON(!in_dev)) 4126 return; 4127 4128 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 4129 in_dev_put(in_dev); 4130 } 4131 4132 #if IS_ENABLED(CONFIG_IPV6) 4133 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 4134 { 4135 struct in6_addr addr; 4136 4137 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 4138 ipv6_dev_mc_dec(br->dev, &addr); 4139 } 4140 #else 4141 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 4142 { 4143 } 4144 #endif 4145 4146 void br_multicast_leave_snoopers(struct net_bridge *br) 4147 { 4148 br_ip4_multicast_leave_snoopers(br); 4149 br_ip6_multicast_leave_snoopers(br); 4150 } 4151 4152 static void __br_multicast_open_query(struct net_bridge *br, 4153 struct bridge_mcast_own_query *query) 4154 { 4155 query->startup_sent = 0; 4156 4157 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4158 return; 4159 4160 mod_timer(&query->timer, jiffies); 4161 } 4162 4163 static void __br_multicast_open(struct net_bridge_mcast *brmctx) 4164 { 4165 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query); 4166 #if IS_ENABLED(CONFIG_IPV6) 4167 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query); 4168 #endif 4169 } 4170 4171 void br_multicast_open(struct net_bridge *br) 4172 { 4173 ASSERT_RTNL(); 4174 4175 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4176 struct net_bridge_vlan_group *vg; 4177 struct net_bridge_vlan *vlan; 4178 4179 vg = br_vlan_group(br); 4180 if (vg) { 4181 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4182 struct net_bridge_mcast *brmctx; 4183 4184 brmctx = &vlan->br_mcast_ctx; 4185 if (br_vlan_is_brentry(vlan) && 4186 !br_multicast_ctx_vlan_disabled(brmctx)) 4187 __br_multicast_open(&vlan->br_mcast_ctx); 4188 } 4189 } 4190 } else { 4191 __br_multicast_open(&br->multicast_ctx); 4192 } 4193 } 4194 4195 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 4196 { 4197 del_timer_sync(&brmctx->ip4_mc_router_timer); 4198 del_timer_sync(&brmctx->ip4_other_query.timer); 4199 del_timer_sync(&brmctx->ip4_own_query.timer); 4200 #if IS_ENABLED(CONFIG_IPV6) 4201 del_timer_sync(&brmctx->ip6_mc_router_timer); 4202 del_timer_sync(&brmctx->ip6_other_query.timer); 4203 del_timer_sync(&brmctx->ip6_own_query.timer); 4204 #endif 4205 } 4206 4207 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) 4208 { 4209 struct net_bridge *br; 4210 4211 /* it's okay to check for the flag without the multicast lock because it 4212 * can only change under RTNL -> multicast_lock, we need the latter to 4213 * sync with timers and packets 4214 */ 4215 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) 4216 return; 4217 4218 if (br_vlan_is_master(vlan)) { 4219 br = vlan->br; 4220 4221 if (!br_vlan_is_brentry(vlan) || 4222 (on && 4223 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx))) 4224 return; 4225 4226 spin_lock_bh(&br->multicast_lock); 4227 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4228 spin_unlock_bh(&br->multicast_lock); 4229 4230 if (on) 4231 __br_multicast_open(&vlan->br_mcast_ctx); 4232 else 4233 __br_multicast_stop(&vlan->br_mcast_ctx); 4234 } else { 4235 struct net_bridge_mcast *brmctx; 4236 4237 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx); 4238 if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) 4239 return; 4240 4241 br = vlan->port->br; 4242 spin_lock_bh(&br->multicast_lock); 4243 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4244 if (on) 4245 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx); 4246 else 4247 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx); 4248 spin_unlock_bh(&br->multicast_lock); 4249 } 4250 } 4251 4252 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) 4253 { 4254 struct net_bridge_port *p; 4255 4256 if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) 4257 return; 4258 4259 list_for_each_entry(p, &vlan->br->port_list, list) { 4260 struct net_bridge_vlan *vport; 4261 4262 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid); 4263 if (!vport) 4264 continue; 4265 br_multicast_toggle_one_vlan(vport, on); 4266 } 4267 4268 if (br_vlan_is_brentry(vlan)) 4269 br_multicast_toggle_one_vlan(vlan, on); 4270 } 4271 4272 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, 4273 struct netlink_ext_ack *extack) 4274 { 4275 struct net_bridge_vlan_group *vg; 4276 struct net_bridge_vlan *vlan; 4277 struct net_bridge_port *p; 4278 4279 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) 4280 return 0; 4281 4282 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) { 4283 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled"); 4284 return -EINVAL; 4285 } 4286 4287 vg = br_vlan_group(br); 4288 if (!vg) 4289 return 0; 4290 4291 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); 4292 4293 /* disable/enable non-vlan mcast contexts based on vlan snooping */ 4294 if (on) 4295 __br_multicast_stop(&br->multicast_ctx); 4296 else 4297 __br_multicast_open(&br->multicast_ctx); 4298 list_for_each_entry(p, &br->port_list, list) { 4299 if (on) 4300 br_multicast_disable_port(p); 4301 else 4302 br_multicast_enable_port(p); 4303 } 4304 4305 list_for_each_entry(vlan, &vg->vlan_list, vlist) 4306 br_multicast_toggle_vlan(vlan, on); 4307 4308 return 0; 4309 } 4310 4311 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on) 4312 { 4313 ASSERT_RTNL(); 4314 4315 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and 4316 * requires only RTNL to change 4317 */ 4318 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 4319 return false; 4320 4321 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED; 4322 br_multicast_toggle_vlan(vlan, on); 4323 4324 return true; 4325 } 4326 4327 void br_multicast_stop(struct net_bridge *br) 4328 { 4329 ASSERT_RTNL(); 4330 4331 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4332 struct net_bridge_vlan_group *vg; 4333 struct net_bridge_vlan *vlan; 4334 4335 vg = br_vlan_group(br); 4336 if (vg) { 4337 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4338 struct net_bridge_mcast *brmctx; 4339 4340 brmctx = &vlan->br_mcast_ctx; 4341 if (br_vlan_is_brentry(vlan) && 4342 !br_multicast_ctx_vlan_disabled(brmctx)) 4343 __br_multicast_stop(&vlan->br_mcast_ctx); 4344 } 4345 } 4346 } else { 4347 __br_multicast_stop(&br->multicast_ctx); 4348 } 4349 } 4350 4351 void br_multicast_dev_del(struct net_bridge *br) 4352 { 4353 struct net_bridge_mdb_entry *mp; 4354 HLIST_HEAD(deleted_head); 4355 struct hlist_node *tmp; 4356 4357 spin_lock_bh(&br->multicast_lock); 4358 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 4359 br_multicast_del_mdb_entry(mp); 4360 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4361 spin_unlock_bh(&br->multicast_lock); 4362 4363 br_multicast_ctx_deinit(&br->multicast_ctx); 4364 br_multicast_gc(&deleted_head); 4365 cancel_work_sync(&br->mcast_gc_work); 4366 4367 rcu_barrier(); 4368 } 4369 4370 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) 4371 { 4372 int err = -EINVAL; 4373 4374 spin_lock_bh(&brmctx->br->multicast_lock); 4375 4376 switch (val) { 4377 case MDB_RTR_TYPE_DISABLED: 4378 case MDB_RTR_TYPE_PERM: 4379 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); 4380 del_timer(&brmctx->ip4_mc_router_timer); 4381 #if IS_ENABLED(CONFIG_IPV6) 4382 del_timer(&brmctx->ip6_mc_router_timer); 4383 #endif 4384 brmctx->multicast_router = val; 4385 err = 0; 4386 break; 4387 case MDB_RTR_TYPE_TEMP_QUERY: 4388 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 4389 br_mc_router_state_change(brmctx->br, false); 4390 brmctx->multicast_router = val; 4391 err = 0; 4392 break; 4393 } 4394 4395 spin_unlock_bh(&brmctx->br->multicast_lock); 4396 4397 return err; 4398 } 4399 4400 static void 4401 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) 4402 { 4403 if (!deleted) 4404 return; 4405 4406 /* For backwards compatibility for now, only notify if there is 4407 * no multicast router anymore for both IPv4 and IPv6. 4408 */ 4409 if (!hlist_unhashed(&pmctx->ip4_rlist)) 4410 return; 4411 #if IS_ENABLED(CONFIG_IPV6) 4412 if (!hlist_unhashed(&pmctx->ip6_rlist)) 4413 return; 4414 #endif 4415 4416 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB); 4417 br_port_mc_router_state_change(pmctx->port, false); 4418 4419 /* don't allow timer refresh */ 4420 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) 4421 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4422 } 4423 4424 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, 4425 unsigned long val) 4426 { 4427 struct net_bridge_mcast *brmctx; 4428 unsigned long now = jiffies; 4429 int err = -EINVAL; 4430 bool del = false; 4431 4432 brmctx = br_multicast_port_ctx_get_global(pmctx); 4433 spin_lock_bh(&brmctx->br->multicast_lock); 4434 if (pmctx->multicast_router == val) { 4435 /* Refresh the temp router port timer */ 4436 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { 4437 mod_timer(&pmctx->ip4_mc_router_timer, 4438 now + brmctx->multicast_querier_interval); 4439 #if IS_ENABLED(CONFIG_IPV6) 4440 mod_timer(&pmctx->ip6_mc_router_timer, 4441 now + brmctx->multicast_querier_interval); 4442 #endif 4443 } 4444 err = 0; 4445 goto unlock; 4446 } 4447 switch (val) { 4448 case MDB_RTR_TYPE_DISABLED: 4449 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4450 del |= br_ip4_multicast_rport_del(pmctx); 4451 del_timer(&pmctx->ip4_mc_router_timer); 4452 del |= br_ip6_multicast_rport_del(pmctx); 4453 #if IS_ENABLED(CONFIG_IPV6) 4454 del_timer(&pmctx->ip6_mc_router_timer); 4455 #endif 4456 br_multicast_rport_del_notify(pmctx, del); 4457 break; 4458 case MDB_RTR_TYPE_TEMP_QUERY: 4459 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4460 del |= br_ip4_multicast_rport_del(pmctx); 4461 del |= br_ip6_multicast_rport_del(pmctx); 4462 br_multicast_rport_del_notify(pmctx, del); 4463 break; 4464 case MDB_RTR_TYPE_PERM: 4465 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4466 del_timer(&pmctx->ip4_mc_router_timer); 4467 br_ip4_multicast_add_router(brmctx, pmctx); 4468 #if IS_ENABLED(CONFIG_IPV6) 4469 del_timer(&pmctx->ip6_mc_router_timer); 4470 #endif 4471 br_ip6_multicast_add_router(brmctx, pmctx); 4472 break; 4473 case MDB_RTR_TYPE_TEMP: 4474 pmctx->multicast_router = MDB_RTR_TYPE_TEMP; 4475 br_ip4_multicast_mark_router(brmctx, pmctx); 4476 br_ip6_multicast_mark_router(brmctx, pmctx); 4477 break; 4478 default: 4479 goto unlock; 4480 } 4481 err = 0; 4482 unlock: 4483 spin_unlock_bh(&brmctx->br->multicast_lock); 4484 4485 return err; 4486 } 4487 4488 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router) 4489 { 4490 int err; 4491 4492 if (br_vlan_is_master(v)) 4493 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router); 4494 else 4495 err = br_multicast_set_port_router(&v->port_mcast_ctx, 4496 mcast_router); 4497 4498 return err; 4499 } 4500 4501 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 4502 struct bridge_mcast_own_query *query) 4503 { 4504 struct net_bridge_port *port; 4505 4506 if (!br_multicast_ctx_matches_vlan_snooping(brmctx)) 4507 return; 4508 4509 __br_multicast_open_query(brmctx->br, query); 4510 4511 rcu_read_lock(); 4512 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { 4513 struct bridge_mcast_own_query *ip4_own_query; 4514 #if IS_ENABLED(CONFIG_IPV6) 4515 struct bridge_mcast_own_query *ip6_own_query; 4516 #endif 4517 4518 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx)) 4519 continue; 4520 4521 if (br_multicast_ctx_is_vlan(brmctx)) { 4522 struct net_bridge_vlan *vlan; 4523 4524 vlan = br_vlan_find(nbp_vlan_group_rcu(port), 4525 brmctx->vlan->vid); 4526 if (!vlan || 4527 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx)) 4528 continue; 4529 4530 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query; 4531 #if IS_ENABLED(CONFIG_IPV6) 4532 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query; 4533 #endif 4534 } else { 4535 ip4_own_query = &port->multicast_ctx.ip4_own_query; 4536 #if IS_ENABLED(CONFIG_IPV6) 4537 ip6_own_query = &port->multicast_ctx.ip6_own_query; 4538 #endif 4539 } 4540 4541 if (query == &brmctx->ip4_own_query) 4542 br_multicast_enable(ip4_own_query); 4543 #if IS_ENABLED(CONFIG_IPV6) 4544 else 4545 br_multicast_enable(ip6_own_query); 4546 #endif 4547 } 4548 rcu_read_unlock(); 4549 } 4550 4551 int br_multicast_toggle(struct net_bridge *br, unsigned long val, 4552 struct netlink_ext_ack *extack) 4553 { 4554 struct net_bridge_port *port; 4555 bool change_snoopers = false; 4556 int err = 0; 4557 4558 spin_lock_bh(&br->multicast_lock); 4559 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 4560 goto unlock; 4561 4562 err = br_mc_disabled_update(br->dev, val, extack); 4563 if (err == -EOPNOTSUPP) 4564 err = 0; 4565 if (err) 4566 goto unlock; 4567 4568 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 4569 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 4570 change_snoopers = true; 4571 goto unlock; 4572 } 4573 4574 if (!netif_running(br->dev)) 4575 goto unlock; 4576 4577 br_multicast_open(br); 4578 list_for_each_entry(port, &br->port_list, list) 4579 __br_multicast_enable_port_ctx(&port->multicast_ctx); 4580 4581 change_snoopers = true; 4582 4583 unlock: 4584 spin_unlock_bh(&br->multicast_lock); 4585 4586 /* br_multicast_join_snoopers has the potential to cause 4587 * an MLD Report/Leave to be delivered to br_multicast_rcv, 4588 * which would in turn call br_multicast_add_group, which would 4589 * attempt to acquire multicast_lock. This function should be 4590 * called after the lock has been released to avoid deadlocks on 4591 * multicast_lock. 4592 * 4593 * br_multicast_leave_snoopers does not have the problem since 4594 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 4595 * returns without calling br_multicast_ipv4/6_rcv if it's not 4596 * enabled. Moved both functions out just for symmetry. 4597 */ 4598 if (change_snoopers) { 4599 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4600 br_multicast_join_snoopers(br); 4601 else 4602 br_multicast_leave_snoopers(br); 4603 } 4604 4605 return err; 4606 } 4607 4608 bool br_multicast_enabled(const struct net_device *dev) 4609 { 4610 struct net_bridge *br = netdev_priv(dev); 4611 4612 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 4613 } 4614 EXPORT_SYMBOL_GPL(br_multicast_enabled); 4615 4616 bool br_multicast_router(const struct net_device *dev) 4617 { 4618 struct net_bridge *br = netdev_priv(dev); 4619 bool is_router; 4620 4621 spin_lock_bh(&br->multicast_lock); 4622 is_router = br_multicast_is_router(&br->multicast_ctx, NULL); 4623 spin_unlock_bh(&br->multicast_lock); 4624 return is_router; 4625 } 4626 EXPORT_SYMBOL_GPL(br_multicast_router); 4627 4628 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) 4629 { 4630 unsigned long max_delay; 4631 4632 val = !!val; 4633 4634 spin_lock_bh(&brmctx->br->multicast_lock); 4635 if (brmctx->multicast_querier == val) 4636 goto unlock; 4637 4638 WRITE_ONCE(brmctx->multicast_querier, val); 4639 if (!val) 4640 goto unlock; 4641 4642 max_delay = brmctx->multicast_query_response_interval; 4643 4644 if (!timer_pending(&brmctx->ip4_other_query.timer)) 4645 brmctx->ip4_other_query.delay_time = jiffies + max_delay; 4646 4647 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); 4648 4649 #if IS_ENABLED(CONFIG_IPV6) 4650 if (!timer_pending(&brmctx->ip6_other_query.timer)) 4651 brmctx->ip6_other_query.delay_time = jiffies + max_delay; 4652 4653 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); 4654 #endif 4655 4656 unlock: 4657 spin_unlock_bh(&brmctx->br->multicast_lock); 4658 4659 return 0; 4660 } 4661 4662 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx, 4663 unsigned long val) 4664 { 4665 /* Currently we support only version 2 and 3 */ 4666 switch (val) { 4667 case 2: 4668 case 3: 4669 break; 4670 default: 4671 return -EINVAL; 4672 } 4673 4674 spin_lock_bh(&brmctx->br->multicast_lock); 4675 brmctx->multicast_igmp_version = val; 4676 spin_unlock_bh(&brmctx->br->multicast_lock); 4677 4678 return 0; 4679 } 4680 4681 #if IS_ENABLED(CONFIG_IPV6) 4682 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, 4683 unsigned long val) 4684 { 4685 /* Currently we support version 1 and 2 */ 4686 switch (val) { 4687 case 1: 4688 case 2: 4689 break; 4690 default: 4691 return -EINVAL; 4692 } 4693 4694 spin_lock_bh(&brmctx->br->multicast_lock); 4695 brmctx->multicast_mld_version = val; 4696 spin_unlock_bh(&brmctx->br->multicast_lock); 4697 4698 return 0; 4699 } 4700 #endif 4701 4702 void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, 4703 unsigned long val) 4704 { 4705 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4706 4707 if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { 4708 br_info(brmctx->br, 4709 "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", 4710 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), 4711 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); 4712 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; 4713 } 4714 4715 brmctx->multicast_query_interval = intvl_jiffies; 4716 } 4717 4718 void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, 4719 unsigned long val) 4720 { 4721 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4722 4723 if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { 4724 br_info(brmctx->br, 4725 "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", 4726 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), 4727 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); 4728 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; 4729 } 4730 4731 brmctx->multicast_startup_query_interval = intvl_jiffies; 4732 } 4733 4734 /** 4735 * br_multicast_list_adjacent - Returns snooped multicast addresses 4736 * @dev: The bridge port adjacent to which to retrieve addresses 4737 * @br_ip_list: The list to store found, snooped multicast IP addresses in 4738 * 4739 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 4740 * snooping feature on all bridge ports of dev's bridge device, excluding 4741 * the addresses from dev itself. 4742 * 4743 * Returns the number of items added to br_ip_list. 4744 * 4745 * Notes: 4746 * - br_ip_list needs to be initialized by caller 4747 * - br_ip_list might contain duplicates in the end 4748 * (needs to be taken care of by caller) 4749 * - br_ip_list needs to be freed by caller 4750 */ 4751 int br_multicast_list_adjacent(struct net_device *dev, 4752 struct list_head *br_ip_list) 4753 { 4754 struct net_bridge *br; 4755 struct net_bridge_port *port; 4756 struct net_bridge_port_group *group; 4757 struct br_ip_list *entry; 4758 int count = 0; 4759 4760 rcu_read_lock(); 4761 if (!br_ip_list || !netif_is_bridge_port(dev)) 4762 goto unlock; 4763 4764 port = br_port_get_rcu(dev); 4765 if (!port || !port->br) 4766 goto unlock; 4767 4768 br = port->br; 4769 4770 list_for_each_entry_rcu(port, &br->port_list, list) { 4771 if (!port->dev || port->dev == dev) 4772 continue; 4773 4774 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 4775 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 4776 if (!entry) 4777 goto unlock; 4778 4779 entry->addr = group->key.addr; 4780 list_add(&entry->list, br_ip_list); 4781 count++; 4782 } 4783 } 4784 4785 unlock: 4786 rcu_read_unlock(); 4787 return count; 4788 } 4789 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 4790 4791 /** 4792 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 4793 * @dev: The bridge port providing the bridge on which to check for a querier 4794 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4795 * 4796 * Checks whether the given interface has a bridge on top and if so returns 4797 * true if a valid querier exists anywhere on the bridged link layer. 4798 * Otherwise returns false. 4799 */ 4800 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 4801 { 4802 struct net_bridge *br; 4803 struct net_bridge_port *port; 4804 struct ethhdr eth; 4805 bool ret = false; 4806 4807 rcu_read_lock(); 4808 if (!netif_is_bridge_port(dev)) 4809 goto unlock; 4810 4811 port = br_port_get_rcu(dev); 4812 if (!port || !port->br) 4813 goto unlock; 4814 4815 br = port->br; 4816 4817 memset(ð, 0, sizeof(eth)); 4818 eth.h_proto = htons(proto); 4819 4820 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL); 4821 4822 unlock: 4823 rcu_read_unlock(); 4824 return ret; 4825 } 4826 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 4827 4828 /** 4829 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 4830 * @dev: The bridge port adjacent to which to check for a querier 4831 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4832 * 4833 * Checks whether the given interface has a bridge on top and if so returns 4834 * true if a selected querier is behind one of the other ports of this 4835 * bridge. Otherwise returns false. 4836 */ 4837 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 4838 { 4839 struct net_bridge_mcast *brmctx; 4840 struct net_bridge *br; 4841 struct net_bridge_port *port; 4842 bool ret = false; 4843 int port_ifidx; 4844 4845 rcu_read_lock(); 4846 if (!netif_is_bridge_port(dev)) 4847 goto unlock; 4848 4849 port = br_port_get_rcu(dev); 4850 if (!port || !port->br) 4851 goto unlock; 4852 4853 br = port->br; 4854 brmctx = &br->multicast_ctx; 4855 4856 switch (proto) { 4857 case ETH_P_IP: 4858 port_ifidx = brmctx->ip4_querier.port_ifidx; 4859 if (!timer_pending(&brmctx->ip4_other_query.timer) || 4860 port_ifidx == port->dev->ifindex) 4861 goto unlock; 4862 break; 4863 #if IS_ENABLED(CONFIG_IPV6) 4864 case ETH_P_IPV6: 4865 port_ifidx = brmctx->ip6_querier.port_ifidx; 4866 if (!timer_pending(&brmctx->ip6_other_query.timer) || 4867 port_ifidx == port->dev->ifindex) 4868 goto unlock; 4869 break; 4870 #endif 4871 default: 4872 goto unlock; 4873 } 4874 4875 ret = true; 4876 unlock: 4877 rcu_read_unlock(); 4878 return ret; 4879 } 4880 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 4881 4882 /** 4883 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port 4884 * @dev: The bridge port adjacent to which to check for a multicast router 4885 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4886 * 4887 * Checks whether the given interface has a bridge on top and if so returns 4888 * true if a multicast router is behind one of the other ports of this 4889 * bridge. Otherwise returns false. 4890 */ 4891 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) 4892 { 4893 struct net_bridge_mcast_port *pmctx; 4894 struct net_bridge_mcast *brmctx; 4895 struct net_bridge_port *port; 4896 bool ret = false; 4897 4898 rcu_read_lock(); 4899 port = br_port_get_check_rcu(dev); 4900 if (!port) 4901 goto unlock; 4902 4903 brmctx = &port->br->multicast_ctx; 4904 switch (proto) { 4905 case ETH_P_IP: 4906 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, 4907 ip4_rlist) { 4908 if (pmctx->port == port) 4909 continue; 4910 4911 ret = true; 4912 goto unlock; 4913 } 4914 break; 4915 #if IS_ENABLED(CONFIG_IPV6) 4916 case ETH_P_IPV6: 4917 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, 4918 ip6_rlist) { 4919 if (pmctx->port == port) 4920 continue; 4921 4922 ret = true; 4923 goto unlock; 4924 } 4925 break; 4926 #endif 4927 default: 4928 /* when compiled without IPv6 support, be conservative and 4929 * always assume presence of an IPv6 multicast router 4930 */ 4931 ret = true; 4932 } 4933 4934 unlock: 4935 rcu_read_unlock(); 4936 return ret; 4937 } 4938 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); 4939 4940 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 4941 const struct sk_buff *skb, u8 type, u8 dir) 4942 { 4943 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 4944 __be16 proto = skb->protocol; 4945 unsigned int t_len; 4946 4947 u64_stats_update_begin(&pstats->syncp); 4948 switch (proto) { 4949 case htons(ETH_P_IP): 4950 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 4951 switch (type) { 4952 case IGMP_HOST_MEMBERSHIP_REPORT: 4953 pstats->mstats.igmp_v1reports[dir]++; 4954 break; 4955 case IGMPV2_HOST_MEMBERSHIP_REPORT: 4956 pstats->mstats.igmp_v2reports[dir]++; 4957 break; 4958 case IGMPV3_HOST_MEMBERSHIP_REPORT: 4959 pstats->mstats.igmp_v3reports[dir]++; 4960 break; 4961 case IGMP_HOST_MEMBERSHIP_QUERY: 4962 if (t_len != sizeof(struct igmphdr)) { 4963 pstats->mstats.igmp_v3queries[dir]++; 4964 } else { 4965 unsigned int offset = skb_transport_offset(skb); 4966 struct igmphdr *ih, _ihdr; 4967 4968 ih = skb_header_pointer(skb, offset, 4969 sizeof(_ihdr), &_ihdr); 4970 if (!ih) 4971 break; 4972 if (!ih->code) 4973 pstats->mstats.igmp_v1queries[dir]++; 4974 else 4975 pstats->mstats.igmp_v2queries[dir]++; 4976 } 4977 break; 4978 case IGMP_HOST_LEAVE_MESSAGE: 4979 pstats->mstats.igmp_leaves[dir]++; 4980 break; 4981 } 4982 break; 4983 #if IS_ENABLED(CONFIG_IPV6) 4984 case htons(ETH_P_IPV6): 4985 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 4986 sizeof(struct ipv6hdr); 4987 t_len -= skb_network_header_len(skb); 4988 switch (type) { 4989 case ICMPV6_MGM_REPORT: 4990 pstats->mstats.mld_v1reports[dir]++; 4991 break; 4992 case ICMPV6_MLD2_REPORT: 4993 pstats->mstats.mld_v2reports[dir]++; 4994 break; 4995 case ICMPV6_MGM_QUERY: 4996 if (t_len != sizeof(struct mld_msg)) 4997 pstats->mstats.mld_v2queries[dir]++; 4998 else 4999 pstats->mstats.mld_v1queries[dir]++; 5000 break; 5001 case ICMPV6_MGM_REDUCTION: 5002 pstats->mstats.mld_leaves[dir]++; 5003 break; 5004 } 5005 break; 5006 #endif /* CONFIG_IPV6 */ 5007 } 5008 u64_stats_update_end(&pstats->syncp); 5009 } 5010 5011 void br_multicast_count(struct net_bridge *br, 5012 const struct net_bridge_port *p, 5013 const struct sk_buff *skb, u8 type, u8 dir) 5014 { 5015 struct bridge_mcast_stats __percpu *stats; 5016 5017 /* if multicast_disabled is true then igmp type can't be set */ 5018 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 5019 return; 5020 5021 if (p) 5022 stats = p->mcast_stats; 5023 else 5024 stats = br->mcast_stats; 5025 if (WARN_ON(!stats)) 5026 return; 5027 5028 br_mcast_stats_add(stats, skb, type, dir); 5029 } 5030 5031 int br_multicast_init_stats(struct net_bridge *br) 5032 { 5033 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 5034 if (!br->mcast_stats) 5035 return -ENOMEM; 5036 5037 return 0; 5038 } 5039 5040 void br_multicast_uninit_stats(struct net_bridge *br) 5041 { 5042 free_percpu(br->mcast_stats); 5043 } 5044 5045 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 5046 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 5047 { 5048 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 5049 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 5050 } 5051 5052 void br_multicast_get_stats(const struct net_bridge *br, 5053 const struct net_bridge_port *p, 5054 struct br_mcast_stats *dest) 5055 { 5056 struct bridge_mcast_stats __percpu *stats; 5057 struct br_mcast_stats tdst; 5058 int i; 5059 5060 memset(dest, 0, sizeof(*dest)); 5061 if (p) 5062 stats = p->mcast_stats; 5063 else 5064 stats = br->mcast_stats; 5065 if (WARN_ON(!stats)) 5066 return; 5067 5068 memset(&tdst, 0, sizeof(tdst)); 5069 for_each_possible_cpu(i) { 5070 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 5071 struct br_mcast_stats temp; 5072 unsigned int start; 5073 5074 do { 5075 start = u64_stats_fetch_begin(&cpu_stats->syncp); 5076 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 5077 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 5078 5079 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 5080 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 5081 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 5082 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 5083 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 5084 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 5085 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 5086 tdst.igmp_parse_errors += temp.igmp_parse_errors; 5087 5088 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 5089 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 5090 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 5091 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 5092 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 5093 tdst.mld_parse_errors += temp.mld_parse_errors; 5094 } 5095 memcpy(dest, &tdst, sizeof(*dest)); 5096 } 5097 5098 int br_mdb_hash_init(struct net_bridge *br) 5099 { 5100 int err; 5101 5102 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 5103 if (err) 5104 return err; 5105 5106 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 5107 if (err) { 5108 rhashtable_destroy(&br->sg_port_tbl); 5109 return err; 5110 } 5111 5112 return 0; 5113 } 5114 5115 void br_mdb_hash_fini(struct net_bridge *br) 5116 { 5117 rhashtable_destroy(&br->sg_port_tbl); 5118 rhashtable_destroy(&br->mdb_hash_tbl); 5119 } 5120