1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/list.h> 4 #include <linux/netdevice.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/skbuff.h> 7 #include <net/ip.h> 8 #include <net/switchdev.h> 9 10 #include "br_private.h" 11 12 static struct static_key_false br_switchdev_tx_fwd_offload; 13 14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p, 15 const struct sk_buff *skb) 16 { 17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 18 return false; 19 20 return (p->flags & BR_TX_FWD_OFFLOAD) && 21 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom); 22 } 23 24 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb) 25 { 26 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload)) 27 return false; 28 29 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload; 30 } 31 32 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb) 33 { 34 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb); 35 } 36 37 /* Mark the frame for TX forwarding offload if this egress port supports it */ 38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p, 39 struct sk_buff *skb) 40 { 41 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 42 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true; 43 } 44 45 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms 46 * that the skb has been already forwarded to, to avoid further cloning to 47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress() 48 * return false. 49 */ 50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p, 51 struct sk_buff *skb) 52 { 53 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) 54 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms); 55 } 56 57 void nbp_switchdev_frame_mark(const struct net_bridge_port *p, 58 struct sk_buff *skb) 59 { 60 if (p->hwdom) 61 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom; 62 } 63 64 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, 65 const struct sk_buff *skb) 66 { 67 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb); 68 69 return !test_bit(p->hwdom, &cb->fwd_hwdoms) && 70 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom); 71 } 72 73 /* Flags that can be offloaded to hardware */ 74 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \ 75 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \ 76 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST) 77 78 int br_switchdev_set_port_flag(struct net_bridge_port *p, 79 unsigned long flags, 80 unsigned long mask, 81 struct netlink_ext_ack *extack) 82 { 83 struct switchdev_attr attr = { 84 .orig_dev = p->dev, 85 }; 86 struct switchdev_notifier_port_attr_info info = { 87 .attr = &attr, 88 }; 89 int err; 90 91 mask &= BR_PORT_FLAGS_HW_OFFLOAD; 92 if (!mask) 93 return 0; 94 95 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS; 96 attr.u.brport_flags.val = flags; 97 attr.u.brport_flags.mask = mask; 98 99 /* We run from atomic context here */ 100 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev, 101 &info.info, extack); 102 err = notifier_to_errno(err); 103 if (err == -EOPNOTSUPP) 104 return 0; 105 106 if (err) { 107 NL_SET_ERR_MSG_WEAK_MOD(extack, 108 "bridge flag offload is not supported"); 109 return -EOPNOTSUPP; 110 } 111 112 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS; 113 attr.flags = SWITCHDEV_F_DEFER; 114 115 err = switchdev_port_attr_set(p->dev, &attr, extack); 116 if (err) { 117 NL_SET_ERR_MSG_WEAK_MOD(extack, 118 "error setting offload flag on port"); 119 return err; 120 } 121 122 return 0; 123 } 124 125 static void br_switchdev_fdb_populate(struct net_bridge *br, 126 struct switchdev_notifier_fdb_info *item, 127 const struct net_bridge_fdb_entry *fdb, 128 const void *ctx) 129 { 130 const struct net_bridge_port *p = READ_ONCE(fdb->dst); 131 132 item->addr = fdb->key.addr.addr; 133 item->vid = fdb->key.vlan_id; 134 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 135 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags); 136 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); 137 item->locked = false; 138 item->info.dev = (!p || item->is_local) ? br->dev : p->dev; 139 item->info.ctx = ctx; 140 } 141 142 void 143 br_switchdev_fdb_notify(struct net_bridge *br, 144 const struct net_bridge_fdb_entry *fdb, int type) 145 { 146 struct switchdev_notifier_fdb_info item; 147 148 if (test_bit(BR_FDB_LOCKED, &fdb->flags)) 149 return; 150 151 /* Entries with these flags were created using ndm_state == NUD_REACHABLE, 152 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something 153 * equivalent to 'bridge fdb add ... master dynamic (sticky)'. 154 * Drivers don't know how to deal with these, so don't notify them to 155 * avoid confusing them. 156 */ 157 if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) && 158 !test_bit(BR_FDB_STATIC, &fdb->flags) && 159 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) 160 return; 161 162 br_switchdev_fdb_populate(br, &item, fdb, NULL); 163 164 switch (type) { 165 case RTM_DELNEIGH: 166 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, 167 item.info.dev, &item.info, NULL); 168 break; 169 case RTM_NEWNEIGH: 170 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, 171 item.info.dev, &item.info, NULL); 172 break; 173 } 174 } 175 176 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags, 177 bool changed, struct netlink_ext_ack *extack) 178 { 179 struct switchdev_obj_port_vlan v = { 180 .obj.orig_dev = dev, 181 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 182 .flags = flags, 183 .vid = vid, 184 .changed = changed, 185 }; 186 187 return switchdev_port_obj_add(dev, &v.obj, extack); 188 } 189 190 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid) 191 { 192 struct switchdev_obj_port_vlan v = { 193 .obj.orig_dev = dev, 194 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 195 .vid = vid, 196 }; 197 198 return switchdev_port_obj_del(dev, &v.obj); 199 } 200 201 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining) 202 { 203 struct net_bridge *br = joining->br; 204 struct net_bridge_port *p; 205 int hwdom; 206 207 /* joining is yet to be added to the port list. */ 208 list_for_each_entry(p, &br->port_list, list) { 209 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) { 210 joining->hwdom = p->hwdom; 211 return 0; 212 } 213 } 214 215 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1); 216 if (hwdom >= BR_HWDOM_MAX) 217 return -EBUSY; 218 219 set_bit(hwdom, &br->busy_hwdoms); 220 joining->hwdom = hwdom; 221 return 0; 222 } 223 224 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving) 225 { 226 struct net_bridge *br = leaving->br; 227 struct net_bridge_port *p; 228 229 /* leaving is no longer in the port list. */ 230 list_for_each_entry(p, &br->port_list, list) { 231 if (p->hwdom == leaving->hwdom) 232 return; 233 } 234 235 clear_bit(leaving->hwdom, &br->busy_hwdoms); 236 } 237 238 static int nbp_switchdev_add(struct net_bridge_port *p, 239 struct netdev_phys_item_id ppid, 240 bool tx_fwd_offload, 241 struct netlink_ext_ack *extack) 242 { 243 int err; 244 245 if (p->offload_count) { 246 /* Prevent unsupported configurations such as a bridge port 247 * which is a bonding interface, and the member ports are from 248 * different hardware switches. 249 */ 250 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) { 251 NL_SET_ERR_MSG_MOD(extack, 252 "Same bridge port cannot be offloaded by two physical switches"); 253 return -EBUSY; 254 } 255 256 /* Tolerate drivers that call switchdev_bridge_port_offload() 257 * more than once for the same bridge port, such as when the 258 * bridge port is an offloaded bonding/team interface. 259 */ 260 p->offload_count++; 261 262 return 0; 263 } 264 265 p->ppid = ppid; 266 p->offload_count = 1; 267 268 err = nbp_switchdev_hwdom_set(p); 269 if (err) 270 return err; 271 272 if (tx_fwd_offload) { 273 p->flags |= BR_TX_FWD_OFFLOAD; 274 static_branch_inc(&br_switchdev_tx_fwd_offload); 275 } 276 277 return 0; 278 } 279 280 static void nbp_switchdev_del(struct net_bridge_port *p) 281 { 282 if (WARN_ON(!p->offload_count)) 283 return; 284 285 p->offload_count--; 286 287 if (p->offload_count) 288 return; 289 290 if (p->hwdom) 291 nbp_switchdev_hwdom_put(p); 292 293 if (p->flags & BR_TX_FWD_OFFLOAD) { 294 p->flags &= ~BR_TX_FWD_OFFLOAD; 295 static_branch_dec(&br_switchdev_tx_fwd_offload); 296 } 297 } 298 299 static int 300 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb, 301 const struct net_bridge_fdb_entry *fdb, 302 unsigned long action, const void *ctx) 303 { 304 struct switchdev_notifier_fdb_info item; 305 int err; 306 307 br_switchdev_fdb_populate(br, &item, fdb, ctx); 308 309 err = nb->notifier_call(nb, action, &item); 310 return notifier_to_errno(err); 311 } 312 313 static int 314 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx, 315 bool adding, struct notifier_block *nb) 316 { 317 struct net_bridge_fdb_entry *fdb; 318 struct net_bridge *br; 319 unsigned long action; 320 int err = 0; 321 322 if (!nb) 323 return 0; 324 325 if (!netif_is_bridge_master(br_dev)) 326 return -EINVAL; 327 328 br = netdev_priv(br_dev); 329 330 if (adding) 331 action = SWITCHDEV_FDB_ADD_TO_DEVICE; 332 else 333 action = SWITCHDEV_FDB_DEL_TO_DEVICE; 334 335 rcu_read_lock(); 336 337 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) { 338 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx); 339 if (err) 340 break; 341 } 342 343 rcu_read_unlock(); 344 345 return err; 346 } 347 348 static int br_switchdev_vlan_attr_replay(struct net_device *br_dev, 349 const void *ctx, 350 struct notifier_block *nb, 351 struct netlink_ext_ack *extack) 352 { 353 struct switchdev_notifier_port_attr_info attr_info = { 354 .info = { 355 .dev = br_dev, 356 .extack = extack, 357 .ctx = ctx, 358 }, 359 }; 360 struct net_bridge *br = netdev_priv(br_dev); 361 struct net_bridge_vlan_group *vg; 362 struct switchdev_attr attr; 363 struct net_bridge_vlan *v; 364 int err; 365 366 attr_info.attr = &attr; 367 attr.orig_dev = br_dev; 368 369 vg = br_vlan_group(br); 370 if (!vg) 371 return 0; 372 373 list_for_each_entry(v, &vg->vlan_list, vlist) { 374 if (v->msti) { 375 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI; 376 attr.u.vlan_msti.vid = v->vid; 377 attr.u.vlan_msti.msti = v->msti; 378 379 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET, 380 &attr_info); 381 err = notifier_to_errno(err); 382 if (err) 383 return err; 384 } 385 } 386 387 return 0; 388 } 389 390 static int 391 br_switchdev_vlan_replay_one(struct notifier_block *nb, 392 struct net_device *dev, 393 struct switchdev_obj_port_vlan *vlan, 394 const void *ctx, unsigned long action, 395 struct netlink_ext_ack *extack) 396 { 397 struct switchdev_notifier_port_obj_info obj_info = { 398 .info = { 399 .dev = dev, 400 .extack = extack, 401 .ctx = ctx, 402 }, 403 .obj = &vlan->obj, 404 }; 405 int err; 406 407 err = nb->notifier_call(nb, action, &obj_info); 408 return notifier_to_errno(err); 409 } 410 411 static int br_switchdev_vlan_replay_group(struct notifier_block *nb, 412 struct net_device *dev, 413 struct net_bridge_vlan_group *vg, 414 const void *ctx, unsigned long action, 415 struct netlink_ext_ack *extack) 416 { 417 struct net_bridge_vlan *v; 418 int err = 0; 419 u16 pvid; 420 421 if (!vg) 422 return 0; 423 424 pvid = br_get_pvid(vg); 425 426 list_for_each_entry(v, &vg->vlan_list, vlist) { 427 struct switchdev_obj_port_vlan vlan = { 428 .obj.orig_dev = dev, 429 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 430 .flags = br_vlan_flags(v, pvid), 431 .vid = v->vid, 432 }; 433 434 if (!br_vlan_should_use(v)) 435 continue; 436 437 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx, 438 action, extack); 439 if (err) 440 return err; 441 } 442 443 return 0; 444 } 445 446 static int br_switchdev_vlan_replay(struct net_device *br_dev, 447 const void *ctx, bool adding, 448 struct notifier_block *nb, 449 struct netlink_ext_ack *extack) 450 { 451 struct net_bridge *br = netdev_priv(br_dev); 452 struct net_bridge_port *p; 453 unsigned long action; 454 int err; 455 456 ASSERT_RTNL(); 457 458 if (!nb) 459 return 0; 460 461 if (!netif_is_bridge_master(br_dev)) 462 return -EINVAL; 463 464 if (adding) 465 action = SWITCHDEV_PORT_OBJ_ADD; 466 else 467 action = SWITCHDEV_PORT_OBJ_DEL; 468 469 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br), 470 ctx, action, extack); 471 if (err) 472 return err; 473 474 list_for_each_entry(p, &br->port_list, list) { 475 struct net_device *dev = p->dev; 476 477 err = br_switchdev_vlan_replay_group(nb, dev, 478 nbp_vlan_group(p), 479 ctx, action, extack); 480 if (err) 481 return err; 482 } 483 484 if (adding) { 485 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack); 486 if (err) 487 return err; 488 } 489 490 return 0; 491 } 492 493 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 494 struct br_switchdev_mdb_complete_info { 495 struct net_bridge_port *port; 496 struct br_ip ip; 497 }; 498 499 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv) 500 { 501 struct br_switchdev_mdb_complete_info *data = priv; 502 struct net_bridge_port_group __rcu **pp; 503 struct net_bridge_port_group *p; 504 struct net_bridge_mdb_entry *mp; 505 struct net_bridge_port *port = data->port; 506 struct net_bridge *br = port->br; 507 508 if (err) 509 goto err; 510 511 spin_lock_bh(&br->multicast_lock); 512 mp = br_mdb_ip_get(br, &data->ip); 513 if (!mp) 514 goto out; 515 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 516 pp = &p->next) { 517 if (p->key.port != port) 518 continue; 519 p->flags |= MDB_PG_FLAGS_OFFLOAD; 520 } 521 out: 522 spin_unlock_bh(&br->multicast_lock); 523 err: 524 kfree(priv); 525 } 526 527 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb, 528 const struct net_bridge_mdb_entry *mp) 529 { 530 if (mp->addr.proto == htons(ETH_P_IP)) 531 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr); 532 #if IS_ENABLED(CONFIG_IPV6) 533 else if (mp->addr.proto == htons(ETH_P_IPV6)) 534 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr); 535 #endif 536 else 537 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr); 538 539 mdb->vid = mp->addr.vid; 540 } 541 542 static void br_switchdev_host_mdb_one(struct net_device *dev, 543 struct net_device *lower_dev, 544 struct net_bridge_mdb_entry *mp, 545 int type) 546 { 547 struct switchdev_obj_port_mdb mdb = { 548 .obj = { 549 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 550 .flags = SWITCHDEV_F_DEFER, 551 .orig_dev = dev, 552 }, 553 }; 554 555 br_switchdev_mdb_populate(&mdb, mp); 556 557 switch (type) { 558 case RTM_NEWMDB: 559 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); 560 break; 561 case RTM_DELMDB: 562 switchdev_port_obj_del(lower_dev, &mdb.obj); 563 break; 564 } 565 } 566 567 static void br_switchdev_host_mdb(struct net_device *dev, 568 struct net_bridge_mdb_entry *mp, int type) 569 { 570 struct net_device *lower_dev; 571 struct list_head *iter; 572 573 netdev_for_each_lower_dev(dev, lower_dev, iter) 574 br_switchdev_host_mdb_one(dev, lower_dev, mp, type); 575 } 576 577 static int 578 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, 579 const struct switchdev_obj_port_mdb *mdb, 580 unsigned long action, const void *ctx, 581 struct netlink_ext_ack *extack) 582 { 583 struct switchdev_notifier_port_obj_info obj_info = { 584 .info = { 585 .dev = dev, 586 .extack = extack, 587 .ctx = ctx, 588 }, 589 .obj = &mdb->obj, 590 }; 591 int err; 592 593 err = nb->notifier_call(nb, action, &obj_info); 594 return notifier_to_errno(err); 595 } 596 597 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list, 598 enum switchdev_obj_id id, 599 const struct net_bridge_mdb_entry *mp, 600 struct net_device *orig_dev) 601 { 602 struct switchdev_obj_port_mdb *mdb; 603 604 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC); 605 if (!mdb) 606 return -ENOMEM; 607 608 mdb->obj.id = id; 609 mdb->obj.orig_dev = orig_dev; 610 br_switchdev_mdb_populate(mdb, mp); 611 list_add_tail(&mdb->obj.list, mdb_list); 612 613 return 0; 614 } 615 616 void br_switchdev_mdb_notify(struct net_device *dev, 617 struct net_bridge_mdb_entry *mp, 618 struct net_bridge_port_group *pg, 619 int type) 620 { 621 struct br_switchdev_mdb_complete_info *complete_info; 622 struct switchdev_obj_port_mdb mdb = { 623 .obj = { 624 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 625 .flags = SWITCHDEV_F_DEFER, 626 }, 627 }; 628 629 if (!pg) 630 return br_switchdev_host_mdb(dev, mp, type); 631 632 br_switchdev_mdb_populate(&mdb, mp); 633 634 mdb.obj.orig_dev = pg->key.port->dev; 635 switch (type) { 636 case RTM_NEWMDB: 637 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 638 if (!complete_info) 639 break; 640 complete_info->port = pg->key.port; 641 complete_info->ip = mp->addr; 642 mdb.obj.complete_priv = complete_info; 643 mdb.obj.complete = br_switchdev_mdb_complete; 644 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL)) 645 kfree(complete_info); 646 break; 647 case RTM_DELMDB: 648 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj); 649 break; 650 } 651 } 652 #endif 653 654 static int 655 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev, 656 const void *ctx, bool adding, struct notifier_block *nb, 657 struct netlink_ext_ack *extack) 658 { 659 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 660 const struct net_bridge_mdb_entry *mp; 661 struct switchdev_obj *obj, *tmp; 662 struct net_bridge *br; 663 unsigned long action; 664 LIST_HEAD(mdb_list); 665 int err = 0; 666 667 ASSERT_RTNL(); 668 669 if (!nb) 670 return 0; 671 672 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev)) 673 return -EINVAL; 674 675 br = netdev_priv(br_dev); 676 677 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 678 return 0; 679 680 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex, 681 * because the write-side protection is br->multicast_lock. But we 682 * need to emulate the [ blocking ] calling context of a regular 683 * switchdev event, so since both br->multicast_lock and RCU read side 684 * critical sections are atomic, we have no choice but to pick the RCU 685 * read side lock, queue up all our events, leave the critical section 686 * and notify switchdev from blocking context. 687 */ 688 rcu_read_lock(); 689 690 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { 691 struct net_bridge_port_group __rcu * const *pp; 692 const struct net_bridge_port_group *p; 693 694 if (mp->host_joined) { 695 err = br_switchdev_mdb_queue_one(&mdb_list, 696 SWITCHDEV_OBJ_ID_HOST_MDB, 697 mp, br_dev); 698 if (err) { 699 rcu_read_unlock(); 700 goto out_free_mdb; 701 } 702 } 703 704 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; 705 pp = &p->next) { 706 if (p->key.port->dev != dev) 707 continue; 708 709 err = br_switchdev_mdb_queue_one(&mdb_list, 710 SWITCHDEV_OBJ_ID_PORT_MDB, 711 mp, dev); 712 if (err) { 713 rcu_read_unlock(); 714 goto out_free_mdb; 715 } 716 } 717 } 718 719 rcu_read_unlock(); 720 721 if (adding) 722 action = SWITCHDEV_PORT_OBJ_ADD; 723 else 724 action = SWITCHDEV_PORT_OBJ_DEL; 725 726 list_for_each_entry(obj, &mdb_list, list) { 727 err = br_switchdev_mdb_replay_one(nb, dev, 728 SWITCHDEV_OBJ_PORT_MDB(obj), 729 action, ctx, extack); 730 if (err) 731 goto out_free_mdb; 732 } 733 734 out_free_mdb: 735 list_for_each_entry_safe(obj, tmp, &mdb_list, list) { 736 list_del(&obj->list); 737 kfree(SWITCHDEV_OBJ_PORT_MDB(obj)); 738 } 739 740 if (err) 741 return err; 742 #endif 743 744 return 0; 745 } 746 747 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx, 748 struct notifier_block *atomic_nb, 749 struct notifier_block *blocking_nb, 750 struct netlink_ext_ack *extack) 751 { 752 struct net_device *br_dev = p->br->dev; 753 struct net_device *dev = p->dev; 754 int err; 755 756 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack); 757 if (err && err != -EOPNOTSUPP) 758 return err; 759 760 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb, 761 extack); 762 if (err && err != -EOPNOTSUPP) 763 return err; 764 765 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb); 766 if (err && err != -EOPNOTSUPP) 767 return err; 768 769 return 0; 770 } 771 772 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p, 773 const void *ctx, 774 struct notifier_block *atomic_nb, 775 struct notifier_block *blocking_nb) 776 { 777 struct net_device *br_dev = p->br->dev; 778 struct net_device *dev = p->dev; 779 780 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb); 781 782 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); 783 784 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL); 785 } 786 787 /* Let the bridge know that this port is offloaded, so that it can assign a 788 * switchdev hardware domain to it. 789 */ 790 int br_switchdev_port_offload(struct net_bridge_port *p, 791 struct net_device *dev, const void *ctx, 792 struct notifier_block *atomic_nb, 793 struct notifier_block *blocking_nb, 794 bool tx_fwd_offload, 795 struct netlink_ext_ack *extack) 796 { 797 struct netdev_phys_item_id ppid; 798 int err; 799 800 err = dev_get_port_parent_id(dev, &ppid, false); 801 if (err) 802 return err; 803 804 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack); 805 if (err) 806 return err; 807 808 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack); 809 if (err) 810 goto out_switchdev_del; 811 812 return 0; 813 814 out_switchdev_del: 815 nbp_switchdev_del(p); 816 817 return err; 818 } 819 820 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx, 821 struct notifier_block *atomic_nb, 822 struct notifier_block *blocking_nb) 823 { 824 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb); 825 826 nbp_switchdev_del(p); 827 } 828