1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/list.h> 4 #include <linux/netdevice.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/skbuff.h> 7 #include <net/ip.h> 8 #include <net/switchdev.h> 9 10 #include "br_private.h" 11 12 static struct static_key_false br_switchdev_tx_fwd_offload; 13 14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p, 15 const struct sk_buff *skb) 16 { 17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 18 return false; 19 20 if (br_multicast_igmp_type(skb)) 21 return false; 22 23 return (p->flags & BR_TX_FWD_OFFLOAD) && 24 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom); 25 } 26 27 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb) 28 { 29 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 30 return false; 31 32 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload; 33 } 34 35 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb) 36 { 37 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb); 38 } 39 40 /* Mark the frame for TX forwarding offload if this egress port supports it */ 41 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p, 42 struct sk_buff *skb) 43 { 44 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 45 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true; 46 } 47 48 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms 49 * that the skb has been already forwarded to, to avoid further cloning to 50 * other ports in the same hwdom by making nbp_switchdev_allowed_egress() 51 * return false. 52 */ 53 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p, 54 struct sk_buff *skb) 55 { 56 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 57 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms); 58 } 59 60 void nbp_switchdev_frame_mark(const struct net_bridge_port *p, 61 struct sk_buff *skb) 62 { 63 if (p->hwdom) 64 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom; 65 } 66 67 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, 68 const struct sk_buff *skb) 69 { 70 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb); 71 72 return !test_bit(p->hwdom, &cb->fwd_hwdoms) && 73 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom); 74 } 75 76 /* Flags that can be offloaded to hardware */ 77 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \ 78 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \ 79 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST) 80 81 int br_switchdev_set_port_flag(struct net_bridge_port *p, 82 unsigned long flags, 83 unsigned long mask, 84 struct netlink_ext_ack *extack) 85 { 86 struct switchdev_attr attr = { 87 .orig_dev = p->dev, 88 }; 89 struct switchdev_notifier_port_attr_info info = { 90 .attr = &attr, 91 }; 92 int err; 93 94 mask &= BR_PORT_FLAGS_HW_OFFLOAD; 95 if (!mask) 96 return 0; 97 98 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS; 99 attr.u.brport_flags.val = flags; 100 attr.u.brport_flags.mask = mask; 101 102 /* We run from atomic context here */ 103 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev, 104 &info.info, extack); 105 err = notifier_to_errno(err); 106 if (err == -EOPNOTSUPP) 107 return 0; 108 109 if (err) { 110 NL_SET_ERR_MSG_WEAK_MOD(extack, 111 "bridge flag offload is not supported"); 112 return -EOPNOTSUPP; 113 } 114 115 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS; 116 attr.flags = SWITCHDEV_F_DEFER; 117 118 err = switchdev_port_attr_set(p->dev, &attr, extack); 119 if (err) { 120 NL_SET_ERR_MSG_WEAK_MOD(extack, 121 "error setting offload flag on port"); 122 return err; 123 } 124 125 return 0; 126 } 127 128 static void br_switchdev_fdb_populate(struct net_bridge *br, 129 struct switchdev_notifier_fdb_info *item, 130 const struct net_bridge_fdb_entry *fdb, 131 const void *ctx) 132 { 133 const struct net_bridge_port *p = READ_ONCE(fdb->dst); 134 135 item->addr = fdb->key.addr.addr; 136 item->vid = fdb->key.vlan_id; 137 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 138 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags); 139 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); 140 item->locked = false; 141 item->info.dev = (!p || item->is_local) ? br->dev : p->dev; 142 item->info.ctx = ctx; 143 } 144 145 void 146 br_switchdev_fdb_notify(struct net_bridge *br, 147 const struct net_bridge_fdb_entry *fdb, int type) 148 { 149 struct switchdev_notifier_fdb_info item; 150 151 if (test_bit(BR_FDB_LOCKED, &fdb->flags)) 152 return; 153 154 /* Entries with these flags were created using ndm_state == NUD_REACHABLE, 155 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something 156 * equivalent to 'bridge fdb add ... master dynamic (sticky)'. 157 * Drivers don't know how to deal with these, so don't notify them to 158 * avoid confusing them. 159 */ 160 if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) && 161 !test_bit(BR_FDB_STATIC, &fdb->flags) && 162 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) 163 return; 164 165 br_switchdev_fdb_populate(br, &item, fdb, NULL); 166 167 switch (type) { 168 case RTM_DELNEIGH: 169 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, 170 item.info.dev, &item.info, NULL); 171 break; 172 case RTM_NEWNEIGH: 173 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, 174 item.info.dev, &item.info, NULL); 175 break; 176 } 177 } 178 179 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags, 180 bool changed, struct netlink_ext_ack *extack) 181 { 182 struct switchdev_obj_port_vlan v = { 183 .obj.orig_dev = dev, 184 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 185 .flags = flags, 186 .vid = vid, 187 .changed = changed, 188 }; 189 190 return switchdev_port_obj_add(dev, &v.obj, extack); 191 } 192 193 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid) 194 { 195 struct switchdev_obj_port_vlan v = { 196 .obj.orig_dev = dev, 197 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 198 .vid = vid, 199 }; 200 201 return switchdev_port_obj_del(dev, &v.obj); 202 } 203 204 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining) 205 { 206 struct net_bridge *br = joining->br; 207 struct net_bridge_port *p; 208 int hwdom; 209 210 /* joining is yet to be added to the port list. */ 211 list_for_each_entry(p, &br->port_list, list) { 212 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) { 213 joining->hwdom = p->hwdom; 214 return 0; 215 } 216 } 217 218 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1); 219 if (hwdom >= BR_HWDOM_MAX) 220 return -EBUSY; 221 222 set_bit(hwdom, &br->busy_hwdoms); 223 joining->hwdom = hwdom; 224 return 0; 225 } 226 227 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving) 228 { 229 struct net_bridge *br = leaving->br; 230 struct net_bridge_port *p; 231 232 /* leaving is no longer in the port list. */ 233 list_for_each_entry(p, &br->port_list, list) { 234 if (p->hwdom == leaving->hwdom) 235 return; 236 } 237 238 clear_bit(leaving->hwdom, &br->busy_hwdoms); 239 } 240 241 static int nbp_switchdev_add(struct net_bridge_port *p, 242 struct netdev_phys_item_id ppid, 243 bool tx_fwd_offload, 244 struct netlink_ext_ack *extack) 245 { 246 int err; 247 248 if (p->offload_count) { 249 /* Prevent unsupported configurations such as a bridge port 250 * which is a bonding interface, and the member ports are from 251 * different hardware switches. 252 */ 253 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) { 254 NL_SET_ERR_MSG_MOD(extack, 255 "Same bridge port cannot be offloaded by two physical switches"); 256 return -EBUSY; 257 } 258 259 /* Tolerate drivers that call switchdev_bridge_port_offload() 260 * more than once for the same bridge port, such as when the 261 * bridge port is an offloaded bonding/team interface. 262 */ 263 p->offload_count++; 264 265 return 0; 266 } 267 268 p->ppid = ppid; 269 p->offload_count = 1; 270 271 err = nbp_switchdev_hwdom_set(p); 272 if (err) 273 return err; 274 275 if (tx_fwd_offload) { 276 p->flags |= BR_TX_FWD_OFFLOAD; 277 static_branch_inc(&br_switchdev_tx_fwd_offload); 278 } 279 280 return 0; 281 } 282 283 static void nbp_switchdev_del(struct net_bridge_port *p) 284 { 285 if (WARN_ON(!p->offload_count)) 286 return; 287 288 p->offload_count--; 289 290 if (p->offload_count) 291 return; 292 293 if (p->hwdom) 294 nbp_switchdev_hwdom_put(p); 295 296 if (p->flags & BR_TX_FWD_OFFLOAD) { 297 p->flags &= ~BR_TX_FWD_OFFLOAD; 298 static_branch_dec(&br_switchdev_tx_fwd_offload); 299 } 300 } 301 302 static int 303 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb, 304 const struct net_bridge_fdb_entry *fdb, 305 unsigned long action, const void *ctx) 306 { 307 struct switchdev_notifier_fdb_info item; 308 int err; 309 310 br_switchdev_fdb_populate(br, &item, fdb, ctx); 311 312 err = nb->notifier_call(nb, action, &item); 313 return notifier_to_errno(err); 314 } 315 316 static int 317 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx, 318 bool adding, struct notifier_block *nb) 319 { 320 struct net_bridge_fdb_entry *fdb; 321 struct net_bridge *br; 322 unsigned long action; 323 int err = 0; 324 325 if (!nb) 326 return 0; 327 328 if (!netif_is_bridge_master(br_dev)) 329 return -EINVAL; 330 331 br = netdev_priv(br_dev); 332 333 if (adding) 334 action = SWITCHDEV_FDB_ADD_TO_DEVICE; 335 else 336 action = SWITCHDEV_FDB_DEL_TO_DEVICE; 337 338 rcu_read_lock(); 339 340 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) { 341 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx); 342 if (err) 343 break; 344 } 345 346 rcu_read_unlock(); 347 348 return err; 349 } 350 351 static int br_switchdev_vlan_attr_replay(struct net_device *br_dev, 352 const void *ctx, 353 struct notifier_block *nb, 354 struct netlink_ext_ack *extack) 355 { 356 struct switchdev_notifier_port_attr_info attr_info = { 357 .info = { 358 .dev = br_dev, 359 .extack = extack, 360 .ctx = ctx, 361 }, 362 }; 363 struct net_bridge *br = netdev_priv(br_dev); 364 struct net_bridge_vlan_group *vg; 365 struct switchdev_attr attr; 366 struct net_bridge_vlan *v; 367 int err; 368 369 attr_info.attr = &attr; 370 attr.orig_dev = br_dev; 371 372 vg = br_vlan_group(br); 373 if (!vg) 374 return 0; 375 376 list_for_each_entry(v, &vg->vlan_list, vlist) { 377 if (v->msti) { 378 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI; 379 attr.u.vlan_msti.vid = v->vid; 380 attr.u.vlan_msti.msti = v->msti; 381 382 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET, 383 &attr_info); 384 err = notifier_to_errno(err); 385 if (err) 386 return err; 387 } 388 } 389 390 return 0; 391 } 392 393 static int 394 br_switchdev_vlan_replay_one(struct notifier_block *nb, 395 struct net_device *dev, 396 struct switchdev_obj_port_vlan *vlan, 397 const void *ctx, unsigned long action, 398 struct netlink_ext_ack *extack) 399 { 400 struct switchdev_notifier_port_obj_info obj_info = { 401 .info = { 402 .dev = dev, 403 .extack = extack, 404 .ctx = ctx, 405 }, 406 .obj = &vlan->obj, 407 }; 408 int err; 409 410 err = nb->notifier_call(nb, action, &obj_info); 411 return notifier_to_errno(err); 412 } 413 414 static int br_switchdev_vlan_replay_group(struct notifier_block *nb, 415 struct net_device *dev, 416 struct net_bridge_vlan_group *vg, 417 const void *ctx, unsigned long action, 418 struct netlink_ext_ack *extack) 419 { 420 struct net_bridge_vlan *v; 421 int err = 0; 422 u16 pvid; 423 424 if (!vg) 425 return 0; 426 427 pvid = br_get_pvid(vg); 428 429 list_for_each_entry(v, &vg->vlan_list, vlist) { 430 struct switchdev_obj_port_vlan vlan = { 431 .obj.orig_dev = dev, 432 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 433 .flags = br_vlan_flags(v, pvid), 434 .vid = v->vid, 435 }; 436 437 if (!br_vlan_should_use(v)) 438 continue; 439 440 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx, 441 action, extack); 442 if (err) 443 return err; 444 } 445 446 return 0; 447 } 448 449 static int br_switchdev_vlan_replay(struct net_device *br_dev, 450 const void *ctx, bool adding, 451 struct notifier_block *nb, 452 struct netlink_ext_ack *extack) 453 { 454 struct net_bridge *br = netdev_priv(br_dev); 455 struct net_bridge_port *p; 456 unsigned long action; 457 int err; 458 459 ASSERT_RTNL(); 460 461 if (!nb) 462 return 0; 463 464 if (!netif_is_bridge_master(br_dev)) 465 return -EINVAL; 466 467 if (adding) 468 action = SWITCHDEV_PORT_OBJ_ADD; 469 else 470 action = SWITCHDEV_PORT_OBJ_DEL; 471 472 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br), 473 ctx, action, extack); 474 if (err) 475 return err; 476 477 list_for_each_entry(p, &br->port_list, list) { 478 struct net_device *dev = p->dev; 479 480 err = br_switchdev_vlan_replay_group(nb, dev, 481 nbp_vlan_group(p), 482 ctx, action, extack); 483 if (err) 484 return err; 485 } 486 487 if (adding) { 488 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack); 489 if (err) 490 return err; 491 } 492 493 return 0; 494 } 495 496 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 497 struct br_switchdev_mdb_complete_info { 498 struct net_bridge_port *port; 499 struct br_ip ip; 500 }; 501 502 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv) 503 { 504 struct br_switchdev_mdb_complete_info *data = priv; 505 struct net_bridge_port_group __rcu **pp; 506 struct net_bridge_port_group *p; 507 struct net_bridge_mdb_entry *mp; 508 struct net_bridge_port *port = data->port; 509 struct net_bridge *br = port->br; 510 u8 old_flags; 511 512 if (err == -EOPNOTSUPP) 513 goto out_free; 514 515 spin_lock_bh(&br->multicast_lock); 516 mp = br_mdb_ip_get(br, &data->ip); 517 if (!mp) 518 goto out; 519 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 520 pp = &p->next) { 521 if (p->key.port != port) 522 continue; 523 524 old_flags = p->flags; 525 br_multicast_set_pg_offload_flags(p, !err); 526 if (br_mdb_should_notify(br, old_flags ^ p->flags)) 527 br_mdb_flag_change_notify(br->dev, mp, p); 528 } 529 out: 530 spin_unlock_bh(&br->multicast_lock); 531 out_free: 532 kfree(priv); 533 } 534 535 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb, 536 const struct net_bridge_mdb_entry *mp) 537 { 538 if (mp->addr.proto == htons(ETH_P_IP)) 539 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr); 540 #if IS_ENABLED(CONFIG_IPV6) 541 else if (mp->addr.proto == htons(ETH_P_IPV6)) 542 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr); 543 #endif 544 else 545 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr); 546 547 mdb->vid = mp->addr.vid; 548 } 549 550 static void br_switchdev_host_mdb_one(struct net_device *dev, 551 struct net_device *lower_dev, 552 struct net_bridge_mdb_entry *mp, 553 int type) 554 { 555 struct switchdev_obj_port_mdb mdb = { 556 .obj = { 557 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 558 .flags = SWITCHDEV_F_DEFER, 559 .orig_dev = dev, 560 }, 561 }; 562 563 br_switchdev_mdb_populate(&mdb, mp); 564 565 switch (type) { 566 case RTM_NEWMDB: 567 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); 568 break; 569 case RTM_DELMDB: 570 switchdev_port_obj_del(lower_dev, &mdb.obj); 571 break; 572 } 573 } 574 575 static void br_switchdev_host_mdb(struct net_device *dev, 576 struct net_bridge_mdb_entry *mp, int type) 577 { 578 struct net_device *lower_dev; 579 struct list_head *iter; 580 581 netdev_for_each_lower_dev(dev, lower_dev, iter) 582 br_switchdev_host_mdb_one(dev, lower_dev, mp, type); 583 } 584 585 static int 586 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, 587 const struct switchdev_obj_port_mdb *mdb, 588 unsigned long action, const void *ctx, 589 struct netlink_ext_ack *extack) 590 { 591 struct switchdev_notifier_port_obj_info obj_info = { 592 .info = { 593 .dev = dev, 594 .extack = extack, 595 .ctx = ctx, 596 }, 597 .obj = &mdb->obj, 598 }; 599 int err; 600 601 err = nb->notifier_call(nb, action, &obj_info); 602 return notifier_to_errno(err); 603 } 604 605 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list, 606 struct net_device *dev, 607 unsigned long action, 608 enum switchdev_obj_id id, 609 const struct net_bridge_mdb_entry *mp, 610 struct net_device *orig_dev) 611 { 612 struct switchdev_obj_port_mdb mdb = { 613 .obj = { 614 .id = id, 615 .orig_dev = orig_dev, 616 }, 617 }; 618 struct switchdev_obj_port_mdb *pmdb; 619 620 br_switchdev_mdb_populate(&mdb, mp); 621 622 if (action == SWITCHDEV_PORT_OBJ_ADD && 623 switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) { 624 /* This event is already in the deferred queue of 625 * events, so this replay must be elided, lest the 626 * driver receives duplicate events for it. This can 627 * only happen when replaying additions, since 628 * modifications are always immediately visible in 629 * br->mdb_list, whereas actual event delivery may be 630 * delayed. 631 */ 632 return 0; 633 } 634 635 pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC); 636 if (!pmdb) 637 return -ENOMEM; 638 639 list_add_tail(&pmdb->obj.list, mdb_list); 640 return 0; 641 } 642 643 void br_switchdev_mdb_notify(struct net_device *dev, 644 struct net_bridge_mdb_entry *mp, 645 struct net_bridge_port_group *pg, 646 int type) 647 { 648 struct br_switchdev_mdb_complete_info *complete_info; 649 struct switchdev_obj_port_mdb mdb = { 650 .obj = { 651 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 652 .flags = SWITCHDEV_F_DEFER, 653 }, 654 }; 655 656 if (!pg) 657 return br_switchdev_host_mdb(dev, mp, type); 658 659 br_switchdev_mdb_populate(&mdb, mp); 660 661 mdb.obj.orig_dev = pg->key.port->dev; 662 switch (type) { 663 case RTM_NEWMDB: 664 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 665 if (!complete_info) 666 break; 667 complete_info->port = pg->key.port; 668 complete_info->ip = mp->addr; 669 mdb.obj.complete_priv = complete_info; 670 mdb.obj.complete = br_switchdev_mdb_complete; 671 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL)) 672 kfree(complete_info); 673 break; 674 case RTM_DELMDB: 675 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj); 676 break; 677 } 678 } 679 #endif 680 681 static int 682 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev, 683 const void *ctx, bool adding, struct notifier_block *nb, 684 struct netlink_ext_ack *extack) 685 { 686 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 687 const struct net_bridge_mdb_entry *mp; 688 struct switchdev_obj *obj, *tmp; 689 struct net_bridge *br; 690 unsigned long action; 691 LIST_HEAD(mdb_list); 692 int err = 0; 693 694 ASSERT_RTNL(); 695 696 if (!nb) 697 return 0; 698 699 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev)) 700 return -EINVAL; 701 702 br = netdev_priv(br_dev); 703 704 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 705 return 0; 706 707 if (adding) 708 action = SWITCHDEV_PORT_OBJ_ADD; 709 else 710 action = SWITCHDEV_PORT_OBJ_DEL; 711 712 /* br_switchdev_mdb_queue_one() will take care to not queue a 713 * replay of an event that is already pending in the switchdev 714 * deferred queue. In order to safely determine that, there 715 * must be no new deferred MDB notifications enqueued for the 716 * duration of the MDB scan. Therefore, grab the write-side 717 * lock to avoid racing with any concurrent IGMP/MLD snooping. 718 */ 719 spin_lock_bh(&br->multicast_lock); 720 721 hlist_for_each_entry(mp, &br->mdb_list, mdb_node) { 722 struct net_bridge_port_group __rcu * const *pp; 723 const struct net_bridge_port_group *p; 724 725 if (mp->host_joined) { 726 err = br_switchdev_mdb_queue_one(&mdb_list, dev, action, 727 SWITCHDEV_OBJ_ID_HOST_MDB, 728 mp, br_dev); 729 if (err) { 730 spin_unlock_bh(&br->multicast_lock); 731 goto out_free_mdb; 732 } 733 } 734 735 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 736 pp = &p->next) { 737 if (p->key.port->dev != dev) 738 continue; 739 740 err = br_switchdev_mdb_queue_one(&mdb_list, dev, action, 741 SWITCHDEV_OBJ_ID_PORT_MDB, 742 mp, dev); 743 if (err) { 744 spin_unlock_bh(&br->multicast_lock); 745 goto out_free_mdb; 746 } 747 } 748 } 749 750 spin_unlock_bh(&br->multicast_lock); 751 752 list_for_each_entry(obj, &mdb_list, list) { 753 err = br_switchdev_mdb_replay_one(nb, dev, 754 SWITCHDEV_OBJ_PORT_MDB(obj), 755 action, ctx, extack); 756 if (err == -EOPNOTSUPP) 757 err = 0; 758 if (err) 759 goto out_free_mdb; 760 } 761 762 out_free_mdb: 763 list_for_each_entry_safe(obj, tmp, &mdb_list, list) { 764 list_del(&obj->list); 765 kfree(SWITCHDEV_OBJ_PORT_MDB(obj)); 766 } 767 768 if (err) 769 return err; 770 #endif 771 772 return 0; 773 } 774 775 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx, 776 struct notifier_block *atomic_nb, 777 struct notifier_block *blocking_nb, 778 struct netlink_ext_ack *extack) 779 { 780 struct net_device *br_dev = p->br->dev; 781 struct net_device *dev = p->dev; 782 int err; 783 784 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack); 785 if (err && err != -EOPNOTSUPP) 786 return err; 787 788 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb, 789 extack); 790 if (err) { 791 /* -EOPNOTSUPP not propagated from MDB replay. */ 792 return err; 793 } 794 795 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb); 796 if (err && err != -EOPNOTSUPP) 797 return err; 798 799 return 0; 800 } 801 802 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p, 803 const void *ctx, 804 struct notifier_block *atomic_nb, 805 struct notifier_block *blocking_nb) 806 { 807 struct net_device *br_dev = p->br->dev; 808 struct net_device *dev = p->dev; 809 810 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb); 811 812 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); 813 814 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL); 815 816 /* Make sure that the device leaving this bridge has seen all 817 * relevant events before it is disassociated. In the normal 818 * case, when the device is directly attached to the bridge, 819 * this is covered by del_nbp(). If the association was indirect 820 * however, e.g. via a team or bond, and the device is leaving 821 * that intermediate device, then the bridge port remains in 822 * place. 823 */ 824 switchdev_deferred_process(); 825 } 826 827 /* Let the bridge know that this port is offloaded, so that it can assign a 828 * switchdev hardware domain to it. 829 */ 830 int br_switchdev_port_offload(struct net_bridge_port *p, 831 struct net_device *dev, const void *ctx, 832 struct notifier_block *atomic_nb, 833 struct notifier_block *blocking_nb, 834 bool tx_fwd_offload, 835 struct netlink_ext_ack *extack) 836 { 837 struct netdev_phys_item_id ppid; 838 int err; 839 840 err = netif_get_port_parent_id(dev, &ppid, false); 841 if (err) 842 return err; 843 844 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack); 845 if (err) 846 return err; 847 848 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack); 849 if (err) 850 goto out_switchdev_del; 851 852 return 0; 853 854 out_switchdev_del: 855 nbp_switchdev_del(p); 856 857 return err; 858 } 859 860 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx, 861 struct notifier_block *atomic_nb, 862 struct notifier_block *blocking_nb) 863 { 864 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb); 865 866 nbp_switchdev_del(p); 867 } 868 869 int br_switchdev_port_replay(struct net_bridge_port *p, 870 struct net_device *dev, const void *ctx, 871 struct notifier_block *atomic_nb, 872 struct notifier_block *blocking_nb, 873 struct netlink_ext_ack *extack) 874 { 875 return nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack); 876 } 877