1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/list.h> 4 #include <linux/netdevice.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/skbuff.h> 7 #include <net/ip.h> 8 #include <net/switchdev.h> 9 10 #include "br_private.h" 11 12 static struct static_key_false br_switchdev_tx_fwd_offload; 13 14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p, 15 const struct sk_buff *skb) 16 { 17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 18 return false; 19 20 return (p->flags & BR_TX_FWD_OFFLOAD) && 21 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom); 22 } 23 24 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb) 25 { 26 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 27 return false; 28 29 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload; 30 } 31 32 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb) 33 { 34 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb); 35 } 36 37 /* Mark the frame for TX forwarding offload if this egress port supports it */ 38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p, 39 struct sk_buff *skb) 40 { 41 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 42 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true; 43 } 44 45 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms 46 * that the skb has been already forwarded to, to avoid further cloning to 47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress() 48 * return false. 49 */ 50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p, 51 struct sk_buff *skb) 52 { 53 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 54 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms); 55 } 56 57 void nbp_switchdev_frame_mark(const struct net_bridge_port *p, 58 struct sk_buff *skb) 59 { 60 if (p->hwdom) 61 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom; 62 } 63 64 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, 65 const struct sk_buff *skb) 66 { 67 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb); 68 69 return !test_bit(p->hwdom, &cb->fwd_hwdoms) && 70 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom); 71 } 72 73 /* Flags that can be offloaded to hardware */ 74 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \ 75 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \ 76 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST) 77 78 int br_switchdev_set_port_flag(struct net_bridge_port *p, 79 unsigned long flags, 80 unsigned long mask, 81 struct netlink_ext_ack *extack) 82 { 83 struct switchdev_attr attr = { 84 .orig_dev = p->dev, 85 }; 86 struct switchdev_notifier_port_attr_info info = { 87 .attr = &attr, 88 }; 89 int err; 90 91 mask &= BR_PORT_FLAGS_HW_OFFLOAD; 92 if (!mask) 93 return 0; 94 95 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS; 96 attr.u.brport_flags.val = flags; 97 attr.u.brport_flags.mask = mask; 98 99 /* We run from atomic context here */ 100 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev, 101 &info.info, extack); 102 err = notifier_to_errno(err); 103 if (err == -EOPNOTSUPP) 104 return 0; 105 106 if (err) { 107 NL_SET_ERR_MSG_WEAK_MOD(extack, 108 "bridge flag offload is not supported"); 109 return -EOPNOTSUPP; 110 } 111 112 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS; 113 attr.flags = SWITCHDEV_F_DEFER; 114 115 err = switchdev_port_attr_set(p->dev, &attr, extack); 116 if (err) { 117 NL_SET_ERR_MSG_WEAK_MOD(extack, 118 "error setting offload flag on port"); 119 return err; 120 } 121 122 return 0; 123 } 124 125 static void br_switchdev_fdb_populate(struct net_bridge *br, 126 struct switchdev_notifier_fdb_info *item, 127 const struct net_bridge_fdb_entry *fdb, 128 const void *ctx) 129 { 130 const struct net_bridge_port *p = READ_ONCE(fdb->dst); 131 132 item->addr = fdb->key.addr.addr; 133 item->vid = fdb->key.vlan_id; 134 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 135 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags); 136 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); 137 item->locked = false; 138 item->info.dev = (!p || item->is_local) ? br->dev : p->dev; 139 item->info.ctx = ctx; 140 } 141 142 void 143 br_switchdev_fdb_notify(struct net_bridge *br, 144 const struct net_bridge_fdb_entry *fdb, int type) 145 { 146 struct switchdev_notifier_fdb_info item; 147 148 if (test_bit(BR_FDB_LOCKED, &fdb->flags)) 149 return; 150 151 br_switchdev_fdb_populate(br, &item, fdb, NULL); 152 153 switch (type) { 154 case RTM_DELNEIGH: 155 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, 156 item.info.dev, &item.info, NULL); 157 break; 158 case RTM_NEWNEIGH: 159 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, 160 item.info.dev, &item.info, NULL); 161 break; 162 } 163 } 164 165 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags, 166 bool changed, struct netlink_ext_ack *extack) 167 { 168 struct switchdev_obj_port_vlan v = { 169 .obj.orig_dev = dev, 170 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 171 .flags = flags, 172 .vid = vid, 173 .changed = changed, 174 }; 175 176 return switchdev_port_obj_add(dev, &v.obj, extack); 177 } 178 179 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid) 180 { 181 struct switchdev_obj_port_vlan v = { 182 .obj.orig_dev = dev, 183 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 184 .vid = vid, 185 }; 186 187 return switchdev_port_obj_del(dev, &v.obj); 188 } 189 190 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining) 191 { 192 struct net_bridge *br = joining->br; 193 struct net_bridge_port *p; 194 int hwdom; 195 196 /* joining is yet to be added to the port list. */ 197 list_for_each_entry(p, &br->port_list, list) { 198 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) { 199 joining->hwdom = p->hwdom; 200 return 0; 201 } 202 } 203 204 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1); 205 if (hwdom >= BR_HWDOM_MAX) 206 return -EBUSY; 207 208 set_bit(hwdom, &br->busy_hwdoms); 209 joining->hwdom = hwdom; 210 return 0; 211 } 212 213 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving) 214 { 215 struct net_bridge *br = leaving->br; 216 struct net_bridge_port *p; 217 218 /* leaving is no longer in the port list. */ 219 list_for_each_entry(p, &br->port_list, list) { 220 if (p->hwdom == leaving->hwdom) 221 return; 222 } 223 224 clear_bit(leaving->hwdom, &br->busy_hwdoms); 225 } 226 227 static int nbp_switchdev_add(struct net_bridge_port *p, 228 struct netdev_phys_item_id ppid, 229 bool tx_fwd_offload, 230 struct netlink_ext_ack *extack) 231 { 232 int err; 233 234 if (p->offload_count) { 235 /* Prevent unsupported configurations such as a bridge port 236 * which is a bonding interface, and the member ports are from 237 * different hardware switches. 238 */ 239 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) { 240 NL_SET_ERR_MSG_MOD(extack, 241 "Same bridge port cannot be offloaded by two physical switches"); 242 return -EBUSY; 243 } 244 245 /* Tolerate drivers that call switchdev_bridge_port_offload() 246 * more than once for the same bridge port, such as when the 247 * bridge port is an offloaded bonding/team interface. 248 */ 249 p->offload_count++; 250 251 return 0; 252 } 253 254 p->ppid = ppid; 255 p->offload_count = 1; 256 257 err = nbp_switchdev_hwdom_set(p); 258 if (err) 259 return err; 260 261 if (tx_fwd_offload) { 262 p->flags |= BR_TX_FWD_OFFLOAD; 263 static_branch_inc(&br_switchdev_tx_fwd_offload); 264 } 265 266 return 0; 267 } 268 269 static void nbp_switchdev_del(struct net_bridge_port *p) 270 { 271 if (WARN_ON(!p->offload_count)) 272 return; 273 274 p->offload_count--; 275 276 if (p->offload_count) 277 return; 278 279 if (p->hwdom) 280 nbp_switchdev_hwdom_put(p); 281 282 if (p->flags & BR_TX_FWD_OFFLOAD) { 283 p->flags &= ~BR_TX_FWD_OFFLOAD; 284 static_branch_dec(&br_switchdev_tx_fwd_offload); 285 } 286 } 287 288 static int 289 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb, 290 const struct net_bridge_fdb_entry *fdb, 291 unsigned long action, const void *ctx) 292 { 293 struct switchdev_notifier_fdb_info item; 294 int err; 295 296 br_switchdev_fdb_populate(br, &item, fdb, ctx); 297 298 err = nb->notifier_call(nb, action, &item); 299 return notifier_to_errno(err); 300 } 301 302 static int 303 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx, 304 bool adding, struct notifier_block *nb) 305 { 306 struct net_bridge_fdb_entry *fdb; 307 struct net_bridge *br; 308 unsigned long action; 309 int err = 0; 310 311 if (!nb) 312 return 0; 313 314 if (!netif_is_bridge_master(br_dev)) 315 return -EINVAL; 316 317 br = netdev_priv(br_dev); 318 319 if (adding) 320 action = SWITCHDEV_FDB_ADD_TO_DEVICE; 321 else 322 action = SWITCHDEV_FDB_DEL_TO_DEVICE; 323 324 rcu_read_lock(); 325 326 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) { 327 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx); 328 if (err) 329 break; 330 } 331 332 rcu_read_unlock(); 333 334 return err; 335 } 336 337 static int br_switchdev_vlan_attr_replay(struct net_device *br_dev, 338 const void *ctx, 339 struct notifier_block *nb, 340 struct netlink_ext_ack *extack) 341 { 342 struct switchdev_notifier_port_attr_info attr_info = { 343 .info = { 344 .dev = br_dev, 345 .extack = extack, 346 .ctx = ctx, 347 }, 348 }; 349 struct net_bridge *br = netdev_priv(br_dev); 350 struct net_bridge_vlan_group *vg; 351 struct switchdev_attr attr; 352 struct net_bridge_vlan *v; 353 int err; 354 355 attr_info.attr = &attr; 356 attr.orig_dev = br_dev; 357 358 vg = br_vlan_group(br); 359 if (!vg) 360 return 0; 361 362 list_for_each_entry(v, &vg->vlan_list, vlist) { 363 if (v->msti) { 364 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI; 365 attr.u.vlan_msti.vid = v->vid; 366 attr.u.vlan_msti.msti = v->msti; 367 368 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET, 369 &attr_info); 370 err = notifier_to_errno(err); 371 if (err) 372 return err; 373 } 374 } 375 376 return 0; 377 } 378 379 static int 380 br_switchdev_vlan_replay_one(struct notifier_block *nb, 381 struct net_device *dev, 382 struct switchdev_obj_port_vlan *vlan, 383 const void *ctx, unsigned long action, 384 struct netlink_ext_ack *extack) 385 { 386 struct switchdev_notifier_port_obj_info obj_info = { 387 .info = { 388 .dev = dev, 389 .extack = extack, 390 .ctx = ctx, 391 }, 392 .obj = &vlan->obj, 393 }; 394 int err; 395 396 err = nb->notifier_call(nb, action, &obj_info); 397 return notifier_to_errno(err); 398 } 399 400 static int br_switchdev_vlan_replay_group(struct notifier_block *nb, 401 struct net_device *dev, 402 struct net_bridge_vlan_group *vg, 403 const void *ctx, unsigned long action, 404 struct netlink_ext_ack *extack) 405 { 406 struct net_bridge_vlan *v; 407 int err = 0; 408 u16 pvid; 409 410 if (!vg) 411 return 0; 412 413 pvid = br_get_pvid(vg); 414 415 list_for_each_entry(v, &vg->vlan_list, vlist) { 416 struct switchdev_obj_port_vlan vlan = { 417 .obj.orig_dev = dev, 418 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 419 .flags = br_vlan_flags(v, pvid), 420 .vid = v->vid, 421 }; 422 423 if (!br_vlan_should_use(v)) 424 continue; 425 426 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx, 427 action, extack); 428 if (err) 429 return err; 430 } 431 432 return 0; 433 } 434 435 static int br_switchdev_vlan_replay(struct net_device *br_dev, 436 const void *ctx, bool adding, 437 struct notifier_block *nb, 438 struct netlink_ext_ack *extack) 439 { 440 struct net_bridge *br = netdev_priv(br_dev); 441 struct net_bridge_port *p; 442 unsigned long action; 443 int err; 444 445 ASSERT_RTNL(); 446 447 if (!nb) 448 return 0; 449 450 if (!netif_is_bridge_master(br_dev)) 451 return -EINVAL; 452 453 if (adding) 454 action = SWITCHDEV_PORT_OBJ_ADD; 455 else 456 action = SWITCHDEV_PORT_OBJ_DEL; 457 458 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br), 459 ctx, action, extack); 460 if (err) 461 return err; 462 463 list_for_each_entry(p, &br->port_list, list) { 464 struct net_device *dev = p->dev; 465 466 err = br_switchdev_vlan_replay_group(nb, dev, 467 nbp_vlan_group(p), 468 ctx, action, extack); 469 if (err) 470 return err; 471 } 472 473 if (adding) { 474 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack); 475 if (err) 476 return err; 477 } 478 479 return 0; 480 } 481 482 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 483 struct br_switchdev_mdb_complete_info { 484 struct net_bridge_port *port; 485 struct br_ip ip; 486 }; 487 488 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv) 489 { 490 struct br_switchdev_mdb_complete_info *data = priv; 491 struct net_bridge_port_group __rcu **pp; 492 struct net_bridge_port_group *p; 493 struct net_bridge_mdb_entry *mp; 494 struct net_bridge_port *port = data->port; 495 struct net_bridge *br = port->br; 496 497 if (err) 498 goto err; 499 500 spin_lock_bh(&br->multicast_lock); 501 mp = br_mdb_ip_get(br, &data->ip); 502 if (!mp) 503 goto out; 504 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 505 pp = &p->next) { 506 if (p->key.port != port) 507 continue; 508 p->flags |= MDB_PG_FLAGS_OFFLOAD; 509 } 510 out: 511 spin_unlock_bh(&br->multicast_lock); 512 err: 513 kfree(priv); 514 } 515 516 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb, 517 const struct net_bridge_mdb_entry *mp) 518 { 519 if (mp->addr.proto == htons(ETH_P_IP)) 520 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr); 521 #if IS_ENABLED(CONFIG_IPV6) 522 else if (mp->addr.proto == htons(ETH_P_IPV6)) 523 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr); 524 #endif 525 else 526 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr); 527 528 mdb->vid = mp->addr.vid; 529 } 530 531 static void br_switchdev_host_mdb_one(struct net_device *dev, 532 struct net_device *lower_dev, 533 struct net_bridge_mdb_entry *mp, 534 int type) 535 { 536 struct switchdev_obj_port_mdb mdb = { 537 .obj = { 538 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 539 .flags = SWITCHDEV_F_DEFER, 540 .orig_dev = dev, 541 }, 542 }; 543 544 br_switchdev_mdb_populate(&mdb, mp); 545 546 switch (type) { 547 case RTM_NEWMDB: 548 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); 549 break; 550 case RTM_DELMDB: 551 switchdev_port_obj_del(lower_dev, &mdb.obj); 552 break; 553 } 554 } 555 556 static void br_switchdev_host_mdb(struct net_device *dev, 557 struct net_bridge_mdb_entry *mp, int type) 558 { 559 struct net_device *lower_dev; 560 struct list_head *iter; 561 562 netdev_for_each_lower_dev(dev, lower_dev, iter) 563 br_switchdev_host_mdb_one(dev, lower_dev, mp, type); 564 } 565 566 static int 567 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, 568 const struct switchdev_obj_port_mdb *mdb, 569 unsigned long action, const void *ctx, 570 struct netlink_ext_ack *extack) 571 { 572 struct switchdev_notifier_port_obj_info obj_info = { 573 .info = { 574 .dev = dev, 575 .extack = extack, 576 .ctx = ctx, 577 }, 578 .obj = &mdb->obj, 579 }; 580 int err; 581 582 err = nb->notifier_call(nb, action, &obj_info); 583 return notifier_to_errno(err); 584 } 585 586 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list, 587 enum switchdev_obj_id id, 588 const struct net_bridge_mdb_entry *mp, 589 struct net_device *orig_dev) 590 { 591 struct switchdev_obj_port_mdb *mdb; 592 593 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC); 594 if (!mdb) 595 return -ENOMEM; 596 597 mdb->obj.id = id; 598 mdb->obj.orig_dev = orig_dev; 599 br_switchdev_mdb_populate(mdb, mp); 600 list_add_tail(&mdb->obj.list, mdb_list); 601 602 return 0; 603 } 604 605 void br_switchdev_mdb_notify(struct net_device *dev, 606 struct net_bridge_mdb_entry *mp, 607 struct net_bridge_port_group *pg, 608 int type) 609 { 610 struct br_switchdev_mdb_complete_info *complete_info; 611 struct switchdev_obj_port_mdb mdb = { 612 .obj = { 613 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 614 .flags = SWITCHDEV_F_DEFER, 615 }, 616 }; 617 618 if (!pg) 619 return br_switchdev_host_mdb(dev, mp, type); 620 621 br_switchdev_mdb_populate(&mdb, mp); 622 623 mdb.obj.orig_dev = pg->key.port->dev; 624 switch (type) { 625 case RTM_NEWMDB: 626 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 627 if (!complete_info) 628 break; 629 complete_info->port = pg->key.port; 630 complete_info->ip = mp->addr; 631 mdb.obj.complete_priv = complete_info; 632 mdb.obj.complete = br_switchdev_mdb_complete; 633 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL)) 634 kfree(complete_info); 635 break; 636 case RTM_DELMDB: 637 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj); 638 break; 639 } 640 } 641 #endif 642 643 static int 644 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev, 645 const void *ctx, bool adding, struct notifier_block *nb, 646 struct netlink_ext_ack *extack) 647 { 648 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 649 const struct net_bridge_mdb_entry *mp; 650 struct switchdev_obj *obj, *tmp; 651 struct net_bridge *br; 652 unsigned long action; 653 LIST_HEAD(mdb_list); 654 int err = 0; 655 656 ASSERT_RTNL(); 657 658 if (!nb) 659 return 0; 660 661 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev)) 662 return -EINVAL; 663 664 br = netdev_priv(br_dev); 665 666 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 667 return 0; 668 669 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex, 670 * because the write-side protection is br->multicast_lock. But we 671 * need to emulate the [ blocking ] calling context of a regular 672 * switchdev event, so since both br->multicast_lock and RCU read side 673 * critical sections are atomic, we have no choice but to pick the RCU 674 * read side lock, queue up all our events, leave the critical section 675 * and notify switchdev from blocking context. 676 */ 677 rcu_read_lock(); 678 679 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { 680 struct net_bridge_port_group __rcu * const *pp; 681 const struct net_bridge_port_group *p; 682 683 if (mp->host_joined) { 684 err = br_switchdev_mdb_queue_one(&mdb_list, 685 SWITCHDEV_OBJ_ID_HOST_MDB, 686 mp, br_dev); 687 if (err) { 688 rcu_read_unlock(); 689 goto out_free_mdb; 690 } 691 } 692 693 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; 694 pp = &p->next) { 695 if (p->key.port->dev != dev) 696 continue; 697 698 err = br_switchdev_mdb_queue_one(&mdb_list, 699 SWITCHDEV_OBJ_ID_PORT_MDB, 700 mp, dev); 701 if (err) { 702 rcu_read_unlock(); 703 goto out_free_mdb; 704 } 705 } 706 } 707 708 rcu_read_unlock(); 709 710 if (adding) 711 action = SWITCHDEV_PORT_OBJ_ADD; 712 else 713 action = SWITCHDEV_PORT_OBJ_DEL; 714 715 list_for_each_entry(obj, &mdb_list, list) { 716 err = br_switchdev_mdb_replay_one(nb, dev, 717 SWITCHDEV_OBJ_PORT_MDB(obj), 718 action, ctx, extack); 719 if (err) 720 goto out_free_mdb; 721 } 722 723 out_free_mdb: 724 list_for_each_entry_safe(obj, tmp, &mdb_list, list) { 725 list_del(&obj->list); 726 kfree(SWITCHDEV_OBJ_PORT_MDB(obj)); 727 } 728 729 if (err) 730 return err; 731 #endif 732 733 return 0; 734 } 735 736 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx, 737 struct notifier_block *atomic_nb, 738 struct notifier_block *blocking_nb, 739 struct netlink_ext_ack *extack) 740 { 741 struct net_device *br_dev = p->br->dev; 742 struct net_device *dev = p->dev; 743 int err; 744 745 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack); 746 if (err && err != -EOPNOTSUPP) 747 return err; 748 749 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb, 750 extack); 751 if (err && err != -EOPNOTSUPP) 752 return err; 753 754 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb); 755 if (err && err != -EOPNOTSUPP) 756 return err; 757 758 return 0; 759 } 760 761 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p, 762 const void *ctx, 763 struct notifier_block *atomic_nb, 764 struct notifier_block *blocking_nb) 765 { 766 struct net_device *br_dev = p->br->dev; 767 struct net_device *dev = p->dev; 768 769 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb); 770 771 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); 772 773 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL); 774 } 775 776 /* Let the bridge know that this port is offloaded, so that it can assign a 777 * switchdev hardware domain to it. 778 */ 779 int br_switchdev_port_offload(struct net_bridge_port *p, 780 struct net_device *dev, const void *ctx, 781 struct notifier_block *atomic_nb, 782 struct notifier_block *blocking_nb, 783 bool tx_fwd_offload, 784 struct netlink_ext_ack *extack) 785 { 786 struct netdev_phys_item_id ppid; 787 int err; 788 789 err = dev_get_port_parent_id(dev, &ppid, false); 790 if (err) 791 return err; 792 793 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack); 794 if (err) 795 return err; 796 797 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack); 798 if (err) 799 goto out_switchdev_del; 800 801 return 0; 802 803 out_switchdev_del: 804 nbp_switchdev_del(p); 805 806 return err; 807 } 808 809 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx, 810 struct notifier_block *atomic_nb, 811 struct notifier_block *blocking_nb) 812 { 813 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb); 814 815 nbp_switchdev_del(p); 816 } 817