1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2023, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch_br.h" 6 #include "ice_repr.h" 7 #include "ice_switch.h" 8 #include "ice_vlan.h" 9 #include "ice_vf_vsi_vlan_ops.h" 10 #include "ice_trace.h" 11 12 #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000) 13 14 static const struct rhashtable_params ice_fdb_ht_params = { 15 .key_offset = offsetof(struct ice_esw_br_fdb_entry, data), 16 .key_len = sizeof(struct ice_esw_br_fdb_data), 17 .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node), 18 .automatic_shrinking = true, 19 }; 20 21 static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev) 22 { 23 /* Accept only PF netdev, PRs and LAG */ 24 return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) || 25 netif_is_lag_master(dev); 26 } 27 28 static struct net_device * 29 ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev) 30 { 31 struct net_device *lower; 32 struct list_head *iter; 33 34 netdev_for_each_lower_dev(lag_dev, lower, iter) { 35 if (netif_is_ice(lower)) 36 return lower; 37 } 38 39 return NULL; 40 } 41 42 static struct ice_esw_br_port * 43 ice_eswitch_br_netdev_to_port(struct net_device *dev) 44 { 45 if (ice_is_port_repr_netdev(dev)) { 46 struct ice_repr *repr = ice_netdev_to_repr(dev); 47 48 return repr->br_port; 49 } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) { 50 struct net_device *ice_dev; 51 struct ice_pf *pf; 52 53 if (netif_is_lag_master(dev)) 54 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 55 else 56 ice_dev = dev; 57 58 if (!ice_dev) 59 return NULL; 60 61 pf = ice_netdev_to_pf(ice_dev); 62 63 return pf->br_port; 64 } 65 66 return NULL; 67 } 68 69 static void 70 ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info, 71 u8 pf_id, u16 vf_vsi_idx) 72 { 73 rule_info->sw_act.vsi_handle = vf_vsi_idx; 74 rule_info->sw_act.flag |= ICE_FLTR_RX; 75 rule_info->sw_act.src = pf_id; 76 rule_info->priority = 2; 77 } 78 79 static void 80 ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info, 81 u16 pf_vsi_idx) 82 { 83 rule_info->sw_act.vsi_handle = pf_vsi_idx; 84 rule_info->sw_act.flag |= ICE_FLTR_TX; 85 rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 86 rule_info->flags_info.act_valid = true; 87 rule_info->priority = 2; 88 } 89 90 static int 91 ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule) 92 { 93 int err; 94 95 if (!rule) 96 return -EINVAL; 97 98 err = ice_rem_adv_rule_by_id(hw, rule); 99 kfree(rule); 100 101 return err; 102 } 103 104 static u16 105 ice_eswitch_br_get_lkups_cnt(u16 vid) 106 { 107 return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1; 108 } 109 110 static void 111 ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid) 112 { 113 if (ice_eswitch_br_is_vid_valid(vid)) { 114 list[1].type = ICE_VLAN_OFOS; 115 list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK); 116 list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 117 } 118 } 119 120 static struct ice_rule_query_data * 121 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type, 122 const unsigned char *mac, u16 vid) 123 { 124 struct ice_adv_rule_info rule_info = { 0 }; 125 struct ice_rule_query_data *rule; 126 struct ice_adv_lkup_elem *list; 127 u16 lkups_cnt; 128 int err; 129 130 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 131 132 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 133 if (!rule) 134 return ERR_PTR(-ENOMEM); 135 136 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 137 if (!list) { 138 err = -ENOMEM; 139 goto err_list_alloc; 140 } 141 142 switch (port_type) { 143 case ICE_ESWITCH_BR_UPLINK_PORT: 144 ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx); 145 break; 146 case ICE_ESWITCH_BR_VF_REPR_PORT: 147 ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id, 148 vsi_idx); 149 break; 150 default: 151 err = -EINVAL; 152 goto err_add_rule; 153 } 154 155 list[0].type = ICE_MAC_OFOS; 156 ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac); 157 eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr); 158 159 ice_eswitch_br_add_vlan_lkup(list, vid); 160 161 rule_info.need_pass_l2 = true; 162 163 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 164 165 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 166 if (err) 167 goto err_add_rule; 168 169 kfree(list); 170 171 return rule; 172 173 err_add_rule: 174 kfree(list); 175 err_list_alloc: 176 kfree(rule); 177 178 return ERR_PTR(err); 179 } 180 181 static struct ice_rule_query_data * 182 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx, 183 const unsigned char *mac, u16 vid) 184 { 185 struct ice_adv_rule_info rule_info = { 0 }; 186 struct ice_rule_query_data *rule; 187 struct ice_adv_lkup_elem *list; 188 int err = -ENOMEM; 189 u16 lkups_cnt; 190 191 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 192 193 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 194 if (!rule) 195 goto err_exit; 196 197 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 198 if (!list) 199 goto err_list_alloc; 200 201 list[0].type = ICE_MAC_OFOS; 202 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); 203 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); 204 205 ice_eswitch_br_add_vlan_lkup(list, vid); 206 207 rule_info.allow_pass_l2 = true; 208 rule_info.sw_act.vsi_handle = vsi_idx; 209 rule_info.sw_act.fltr_act = ICE_NOP; 210 rule_info.priority = 2; 211 212 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 213 if (err) 214 goto err_add_rule; 215 216 kfree(list); 217 218 return rule; 219 220 err_add_rule: 221 kfree(list); 222 err_list_alloc: 223 kfree(rule); 224 err_exit: 225 return ERR_PTR(err); 226 } 227 228 static struct ice_esw_br_flow * 229 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx, 230 int port_type, const unsigned char *mac, u16 vid) 231 { 232 struct ice_rule_query_data *fwd_rule, *guard_rule; 233 struct ice_esw_br_flow *flow; 234 int err; 235 236 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 237 if (!flow) 238 return ERR_PTR(-ENOMEM); 239 240 fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac, 241 vid); 242 err = PTR_ERR_OR_ZERO(fwd_rule); 243 if (err) { 244 dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n", 245 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 246 err); 247 goto err_fwd_rule; 248 } 249 250 guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid); 251 err = PTR_ERR_OR_ZERO(guard_rule); 252 if (err) { 253 dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n", 254 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 255 err); 256 goto err_guard_rule; 257 } 258 259 flow->fwd_rule = fwd_rule; 260 flow->guard_rule = guard_rule; 261 262 return flow; 263 264 err_guard_rule: 265 ice_eswitch_br_rule_delete(hw, fwd_rule); 266 err_fwd_rule: 267 kfree(flow); 268 269 return ERR_PTR(err); 270 } 271 272 static struct ice_esw_br_fdb_entry * 273 ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac, 274 u16 vid) 275 { 276 struct ice_esw_br_fdb_data data = { 277 .vid = vid, 278 }; 279 280 ether_addr_copy(data.addr, mac); 281 return rhashtable_lookup_fast(&bridge->fdb_ht, &data, 282 ice_fdb_ht_params); 283 } 284 285 static void 286 ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow) 287 { 288 struct device *dev = ice_pf_to_dev(pf); 289 int err; 290 291 err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule); 292 if (err) 293 dev_err(dev, "Failed to delete FDB forward rule, err: %d\n", 294 err); 295 296 err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule); 297 if (err) 298 dev_err(dev, "Failed to delete FDB guard rule, err: %d\n", 299 err); 300 301 kfree(flow); 302 } 303 304 static struct ice_esw_br_vlan * 305 ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 306 { 307 struct ice_pf *pf = bridge->br_offloads->pf; 308 struct device *dev = ice_pf_to_dev(pf); 309 struct ice_esw_br_port *port; 310 struct ice_esw_br_vlan *vlan; 311 312 port = xa_load(&bridge->ports, vsi_idx); 313 if (!port) { 314 dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx); 315 return ERR_PTR(-EINVAL); 316 } 317 318 vlan = xa_load(&port->vlans, vid); 319 if (!vlan) { 320 dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n", 321 vsi_idx); 322 return ERR_PTR(-EINVAL); 323 } 324 325 return vlan; 326 } 327 328 static void 329 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge, 330 struct ice_esw_br_fdb_entry *fdb_entry) 331 { 332 struct ice_pf *pf = bridge->br_offloads->pf; 333 334 rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 335 ice_fdb_ht_params); 336 list_del(&fdb_entry->list); 337 338 ice_eswitch_br_flow_delete(pf, fdb_entry->flow); 339 340 kfree(fdb_entry); 341 } 342 343 static void 344 ice_eswitch_br_fdb_offload_notify(struct net_device *dev, 345 const unsigned char *mac, u16 vid, 346 unsigned long val) 347 { 348 struct switchdev_notifier_fdb_info fdb_info = { 349 .addr = mac, 350 .vid = vid, 351 .offloaded = true, 352 }; 353 354 call_switchdev_notifiers(val, dev, &fdb_info.info, NULL); 355 } 356 357 static void 358 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge, 359 struct ice_esw_br_fdb_entry *entry) 360 { 361 if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)) 362 ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr, 363 entry->data.vid, 364 SWITCHDEV_FDB_DEL_TO_BRIDGE); 365 ice_eswitch_br_fdb_entry_delete(bridge, entry); 366 } 367 368 static void 369 ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge, 370 const unsigned char *mac, u16 vid) 371 { 372 struct ice_pf *pf = bridge->br_offloads->pf; 373 struct ice_esw_br_fdb_entry *fdb_entry; 374 struct device *dev = ice_pf_to_dev(pf); 375 376 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 377 if (!fdb_entry) { 378 dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n", 379 mac, vid); 380 return; 381 } 382 383 trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry); 384 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 385 } 386 387 static void 388 ice_eswitch_br_fdb_entry_create(struct net_device *netdev, 389 struct ice_esw_br_port *br_port, 390 bool added_by_user, 391 const unsigned char *mac, u16 vid) 392 { 393 struct ice_esw_br *bridge = br_port->bridge; 394 struct ice_pf *pf = bridge->br_offloads->pf; 395 struct device *dev = ice_pf_to_dev(pf); 396 struct ice_esw_br_fdb_entry *fdb_entry; 397 struct ice_esw_br_flow *flow; 398 struct ice_esw_br_vlan *vlan; 399 struct ice_hw *hw = &pf->hw; 400 unsigned long event; 401 int err; 402 403 /* untagged filtering is not yet supported */ 404 if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid) 405 return; 406 407 if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) { 408 vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx, 409 vid); 410 if (IS_ERR(vlan)) { 411 dev_err(dev, "Failed to find vlan lookup, err: %ld\n", 412 PTR_ERR(vlan)); 413 return; 414 } 415 } 416 417 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 418 if (fdb_entry) 419 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 420 421 fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); 422 if (!fdb_entry) { 423 err = -ENOMEM; 424 goto err_exit; 425 } 426 427 flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx, 428 br_port->type, mac, vid); 429 if (IS_ERR(flow)) { 430 err = PTR_ERR(flow); 431 goto err_add_flow; 432 } 433 434 ether_addr_copy(fdb_entry->data.addr, mac); 435 fdb_entry->data.vid = vid; 436 fdb_entry->br_port = br_port; 437 fdb_entry->flow = flow; 438 fdb_entry->dev = netdev; 439 fdb_entry->last_use = jiffies; 440 event = SWITCHDEV_FDB_ADD_TO_BRIDGE; 441 442 if (added_by_user) { 443 fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER; 444 event = SWITCHDEV_FDB_OFFLOADED; 445 } 446 447 err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 448 ice_fdb_ht_params); 449 if (err) 450 goto err_fdb_insert; 451 452 list_add(&fdb_entry->list, &bridge->fdb_list); 453 trace_ice_eswitch_br_fdb_entry_create(fdb_entry); 454 455 ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event); 456 457 return; 458 459 err_fdb_insert: 460 ice_eswitch_br_flow_delete(pf, flow); 461 err_add_flow: 462 kfree(fdb_entry); 463 err_exit: 464 dev_err(dev, "Failed to create fdb entry, err: %d\n", err); 465 } 466 467 static void 468 ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work) 469 { 470 kfree(fdb_work->fdb_info.addr); 471 kfree(fdb_work); 472 } 473 474 static void 475 ice_eswitch_br_fdb_event_work(struct work_struct *work) 476 { 477 struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work); 478 bool added_by_user = fdb_work->fdb_info.added_by_user; 479 const unsigned char *mac = fdb_work->fdb_info.addr; 480 u16 vid = fdb_work->fdb_info.vid; 481 struct ice_esw_br_port *br_port; 482 483 rtnl_lock(); 484 485 br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev); 486 if (!br_port) 487 goto err_exit; 488 489 switch (fdb_work->event) { 490 case SWITCHDEV_FDB_ADD_TO_DEVICE: 491 ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port, 492 added_by_user, mac, vid); 493 break; 494 case SWITCHDEV_FDB_DEL_TO_DEVICE: 495 ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge, 496 mac, vid); 497 break; 498 default: 499 goto err_exit; 500 } 501 502 err_exit: 503 rtnl_unlock(); 504 dev_put(fdb_work->dev); 505 ice_eswitch_br_fdb_work_dealloc(fdb_work); 506 } 507 508 static struct ice_esw_br_fdb_work * 509 ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info, 510 struct net_device *dev, 511 unsigned long event) 512 { 513 struct ice_esw_br_fdb_work *work; 514 unsigned char *mac; 515 516 work = kzalloc(sizeof(*work), GFP_ATOMIC); 517 if (!work) 518 return ERR_PTR(-ENOMEM); 519 520 INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work); 521 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); 522 523 mac = kzalloc(ETH_ALEN, GFP_ATOMIC); 524 if (!mac) { 525 kfree(work); 526 return ERR_PTR(-ENOMEM); 527 } 528 529 ether_addr_copy(mac, fdb_info->addr); 530 work->fdb_info.addr = mac; 531 work->event = event; 532 work->dev = dev; 533 534 return work; 535 } 536 537 static int 538 ice_eswitch_br_switchdev_event(struct notifier_block *nb, 539 unsigned long event, void *ptr) 540 { 541 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 542 struct switchdev_notifier_fdb_info *fdb_info; 543 struct switchdev_notifier_info *info = ptr; 544 struct ice_esw_br_offloads *br_offloads; 545 struct ice_esw_br_fdb_work *work; 546 struct netlink_ext_ack *extack; 547 struct net_device *upper; 548 549 br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb); 550 extack = switchdev_notifier_info_to_extack(ptr); 551 552 upper = netdev_master_upper_dev_get_rcu(dev); 553 if (!upper) 554 return NOTIFY_DONE; 555 556 if (!netif_is_bridge_master(upper)) 557 return NOTIFY_DONE; 558 559 if (!ice_eswitch_br_is_dev_valid(dev)) 560 return NOTIFY_DONE; 561 562 if (!ice_eswitch_br_netdev_to_port(dev)) 563 return NOTIFY_DONE; 564 565 switch (event) { 566 case SWITCHDEV_FDB_ADD_TO_DEVICE: 567 case SWITCHDEV_FDB_DEL_TO_DEVICE: 568 fdb_info = container_of(info, typeof(*fdb_info), info); 569 570 work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event); 571 if (IS_ERR(work)) { 572 NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work"); 573 return notifier_from_errno(PTR_ERR(work)); 574 } 575 dev_hold(dev); 576 577 queue_work(br_offloads->wq, &work->work); 578 break; 579 default: 580 break; 581 } 582 return NOTIFY_DONE; 583 } 584 585 static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) 586 { 587 struct ice_esw_br_fdb_entry *entry, *tmp; 588 589 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) 590 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 591 } 592 593 static void 594 ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable) 595 { 596 if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) 597 return; 598 599 ice_eswitch_br_fdb_flush(bridge); 600 if (enable) 601 bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING; 602 else 603 bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING; 604 } 605 606 static void 607 ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port) 608 { 609 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0); 610 struct ice_vsi_vlan_ops *vlan_ops; 611 612 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 613 614 vlan_ops->del_vlan(port->vsi, &port_vlan); 615 vlan_ops->clear_port_vlan(port->vsi); 616 617 ice_vf_vsi_disable_port_vlan(port->vsi); 618 619 port->pvid = 0; 620 } 621 622 static void 623 ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port, 624 struct ice_esw_br_vlan *vlan) 625 { 626 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 627 struct ice_esw_br *bridge = port->bridge; 628 629 trace_ice_eswitch_br_vlan_cleanup(vlan); 630 631 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 632 if (vlan->vid == fdb_entry->data.vid) 633 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 634 } 635 636 xa_erase(&port->vlans, vlan->vid); 637 if (port->pvid == vlan->vid) 638 ice_eswitch_br_clear_pvid(port); 639 kfree(vlan); 640 } 641 642 static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port) 643 { 644 struct ice_esw_br_vlan *vlan; 645 unsigned long index; 646 647 xa_for_each(&port->vlans, index, vlan) 648 ice_eswitch_br_vlan_cleanup(port, vlan); 649 } 650 651 static int 652 ice_eswitch_br_set_pvid(struct ice_esw_br_port *port, 653 struct ice_esw_br_vlan *vlan) 654 { 655 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0); 656 struct device *dev = ice_pf_to_dev(port->vsi->back); 657 struct ice_vsi_vlan_ops *vlan_ops; 658 int err; 659 660 if (port->pvid == vlan->vid || vlan->vid == 1) 661 return 0; 662 663 /* Setting port vlan on uplink isn't supported by hw */ 664 if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) 665 return -EOPNOTSUPP; 666 667 if (port->pvid) { 668 dev_info(dev, 669 "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n", 670 port->vsi_idx, port->pvid); 671 return -EEXIST; 672 } 673 674 ice_vf_vsi_enable_port_vlan(port->vsi); 675 676 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 677 err = vlan_ops->set_port_vlan(port->vsi, &port_vlan); 678 if (err) 679 return err; 680 681 err = vlan_ops->add_vlan(port->vsi, &port_vlan); 682 if (err) 683 return err; 684 685 ice_eswitch_br_port_vlans_flush(port); 686 port->pvid = vlan->vid; 687 688 return 0; 689 } 690 691 static struct ice_esw_br_vlan * 692 ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port) 693 { 694 struct device *dev = ice_pf_to_dev(port->vsi->back); 695 struct ice_esw_br_vlan *vlan; 696 int err; 697 698 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 699 if (!vlan) 700 return ERR_PTR(-ENOMEM); 701 702 vlan->vid = vid; 703 vlan->flags = flags; 704 if ((flags & BRIDGE_VLAN_INFO_PVID) && 705 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 706 err = ice_eswitch_br_set_pvid(port, vlan); 707 if (err) 708 goto err_set_pvid; 709 } else if ((flags & BRIDGE_VLAN_INFO_PVID) || 710 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 711 dev_info(dev, "VLAN push and pop are supported only simultaneously\n"); 712 err = -EOPNOTSUPP; 713 goto err_set_pvid; 714 } 715 716 err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL); 717 if (err) 718 goto err_insert; 719 720 trace_ice_eswitch_br_vlan_create(vlan); 721 722 return vlan; 723 724 err_insert: 725 if (port->pvid) 726 ice_eswitch_br_clear_pvid(port); 727 err_set_pvid: 728 kfree(vlan); 729 return ERR_PTR(err); 730 } 731 732 static int 733 ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid, 734 u16 flags, struct netlink_ext_ack *extack) 735 { 736 struct ice_esw_br_port *port; 737 struct ice_esw_br_vlan *vlan; 738 739 port = xa_load(&bridge->ports, vsi_idx); 740 if (!port) 741 return -EINVAL; 742 743 if (port->pvid) { 744 dev_info(ice_pf_to_dev(port->vsi->back), 745 "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n", 746 port->vsi_idx, port->pvid); 747 return -EEXIST; 748 } 749 750 vlan = xa_load(&port->vlans, vid); 751 if (vlan) { 752 if (vlan->flags == flags) 753 return 0; 754 755 ice_eswitch_br_vlan_cleanup(port, vlan); 756 } 757 758 vlan = ice_eswitch_br_vlan_create(vid, flags, port); 759 if (IS_ERR(vlan)) { 760 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u", 761 vid, vsi_idx); 762 return PTR_ERR(vlan); 763 } 764 765 return 0; 766 } 767 768 static void 769 ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 770 { 771 struct ice_esw_br_port *port; 772 struct ice_esw_br_vlan *vlan; 773 774 port = xa_load(&bridge->ports, vsi_idx); 775 if (!port) 776 return; 777 778 vlan = xa_load(&port->vlans, vid); 779 if (!vlan) 780 return; 781 782 ice_eswitch_br_vlan_cleanup(port, vlan); 783 } 784 785 static int 786 ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx, 787 const struct switchdev_obj *obj, 788 struct netlink_ext_ack *extack) 789 { 790 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 791 struct switchdev_obj_port_vlan *vlan; 792 int err; 793 794 if (!br_port) 795 return -EINVAL; 796 797 switch (obj->id) { 798 case SWITCHDEV_OBJ_ID_PORT_VLAN: 799 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 800 err = ice_eswitch_br_port_vlan_add(br_port->bridge, 801 br_port->vsi_idx, vlan->vid, 802 vlan->flags, extack); 803 return err; 804 default: 805 return -EOPNOTSUPP; 806 } 807 } 808 809 static int 810 ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx, 811 const struct switchdev_obj *obj) 812 { 813 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 814 struct switchdev_obj_port_vlan *vlan; 815 816 if (!br_port) 817 return -EINVAL; 818 819 switch (obj->id) { 820 case SWITCHDEV_OBJ_ID_PORT_VLAN: 821 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 822 ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx, 823 vlan->vid); 824 return 0; 825 default: 826 return -EOPNOTSUPP; 827 } 828 } 829 830 static int 831 ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx, 832 const struct switchdev_attr *attr, 833 struct netlink_ext_ack *extack) 834 { 835 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 836 837 if (!br_port) 838 return -EINVAL; 839 840 switch (attr->id) { 841 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 842 ice_eswitch_br_vlan_filtering_set(br_port->bridge, 843 attr->u.vlan_filtering); 844 return 0; 845 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 846 br_port->bridge->ageing_time = 847 clock_t_to_jiffies(attr->u.ageing_time); 848 return 0; 849 default: 850 return -EOPNOTSUPP; 851 } 852 } 853 854 static int 855 ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event, 856 void *ptr) 857 { 858 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 859 int err; 860 861 switch (event) { 862 case SWITCHDEV_PORT_OBJ_ADD: 863 err = switchdev_handle_port_obj_add(dev, ptr, 864 ice_eswitch_br_is_dev_valid, 865 ice_eswitch_br_port_obj_add); 866 break; 867 case SWITCHDEV_PORT_OBJ_DEL: 868 err = switchdev_handle_port_obj_del(dev, ptr, 869 ice_eswitch_br_is_dev_valid, 870 ice_eswitch_br_port_obj_del); 871 break; 872 case SWITCHDEV_PORT_ATTR_SET: 873 err = switchdev_handle_port_attr_set(dev, ptr, 874 ice_eswitch_br_is_dev_valid, 875 ice_eswitch_br_port_obj_attr_set); 876 break; 877 default: 878 err = 0; 879 } 880 881 return notifier_from_errno(err); 882 } 883 884 static void 885 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, 886 struct ice_esw_br_port *br_port) 887 { 888 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 889 struct ice_vsi *vsi = br_port->vsi; 890 891 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 892 if (br_port == fdb_entry->br_port) 893 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 894 } 895 896 if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { 897 vsi->back->br_port = NULL; 898 } else { 899 struct ice_repr *repr = 900 ice_repr_get(vsi->back, br_port->repr_id); 901 902 if (repr) 903 repr->br_port = NULL; 904 } 905 906 xa_erase(&bridge->ports, br_port->vsi_idx); 907 ice_eswitch_br_port_vlans_flush(br_port); 908 kfree(br_port); 909 } 910 911 static struct ice_esw_br_port * 912 ice_eswitch_br_port_init(struct ice_esw_br *bridge) 913 { 914 struct ice_esw_br_port *br_port; 915 916 br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); 917 if (!br_port) 918 return ERR_PTR(-ENOMEM); 919 920 xa_init(&br_port->vlans); 921 922 br_port->bridge = bridge; 923 924 return br_port; 925 } 926 927 static int 928 ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, 929 struct ice_repr *repr) 930 { 931 struct ice_esw_br_port *br_port; 932 int err; 933 934 br_port = ice_eswitch_br_port_init(bridge); 935 if (IS_ERR(br_port)) 936 return PTR_ERR(br_port); 937 938 br_port->vsi = repr->src_vsi; 939 br_port->vsi_idx = br_port->vsi->idx; 940 br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; 941 br_port->repr_id = repr->id; 942 repr->br_port = br_port; 943 944 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 945 if (err) { 946 ice_eswitch_br_port_deinit(bridge, br_port); 947 return err; 948 } 949 950 return 0; 951 } 952 953 static int 954 ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) 955 { 956 struct ice_vsi *vsi = pf->eswitch.uplink_vsi; 957 struct ice_esw_br_port *br_port; 958 int err; 959 960 br_port = ice_eswitch_br_port_init(bridge); 961 if (IS_ERR(br_port)) 962 return PTR_ERR(br_port); 963 964 br_port->vsi = vsi; 965 br_port->vsi_idx = br_port->vsi->idx; 966 br_port->type = ICE_ESWITCH_BR_UPLINK_PORT; 967 pf->br_port = br_port; 968 969 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 970 if (err) { 971 ice_eswitch_br_port_deinit(bridge, br_port); 972 return err; 973 } 974 975 return 0; 976 } 977 978 static void 979 ice_eswitch_br_ports_flush(struct ice_esw_br *bridge) 980 { 981 struct ice_esw_br_port *port; 982 unsigned long i; 983 984 xa_for_each(&bridge->ports, i, port) 985 ice_eswitch_br_port_deinit(bridge, port); 986 } 987 988 static void 989 ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads, 990 struct ice_esw_br *bridge) 991 { 992 if (!bridge) 993 return; 994 995 /* Cleanup all the ports that were added asynchronously 996 * through NETDEV_CHANGEUPPER event. 997 */ 998 ice_eswitch_br_ports_flush(bridge); 999 WARN_ON(!xa_empty(&bridge->ports)); 1000 xa_destroy(&bridge->ports); 1001 rhashtable_destroy(&bridge->fdb_ht); 1002 1003 br_offloads->bridge = NULL; 1004 kfree(bridge); 1005 } 1006 1007 static struct ice_esw_br * 1008 ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex) 1009 { 1010 struct ice_esw_br *bridge; 1011 int err; 1012 1013 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 1014 if (!bridge) 1015 return ERR_PTR(-ENOMEM); 1016 1017 err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params); 1018 if (err) { 1019 kfree(bridge); 1020 return ERR_PTR(err); 1021 } 1022 1023 INIT_LIST_HEAD(&bridge->fdb_list); 1024 bridge->br_offloads = br_offloads; 1025 bridge->ifindex = ifindex; 1026 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); 1027 xa_init(&bridge->ports); 1028 br_offloads->bridge = bridge; 1029 1030 return bridge; 1031 } 1032 1033 static struct ice_esw_br * 1034 ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex, 1035 struct netlink_ext_ack *extack) 1036 { 1037 struct ice_esw_br *bridge = br_offloads->bridge; 1038 1039 if (bridge) { 1040 if (bridge->ifindex != ifindex) { 1041 NL_SET_ERR_MSG_MOD(extack, 1042 "Only one bridge is supported per eswitch"); 1043 return ERR_PTR(-EOPNOTSUPP); 1044 } 1045 return bridge; 1046 } 1047 1048 /* Create the bridge if it doesn't exist yet */ 1049 bridge = ice_eswitch_br_init(br_offloads, ifindex); 1050 if (IS_ERR(bridge)) 1051 NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge"); 1052 1053 return bridge; 1054 } 1055 1056 static void 1057 ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads, 1058 struct ice_esw_br *bridge) 1059 { 1060 /* Remove the bridge if it exists and there are no ports left */ 1061 if (!bridge || !xa_empty(&bridge->ports)) 1062 return; 1063 1064 ice_eswitch_br_deinit(br_offloads, bridge); 1065 } 1066 1067 static int 1068 ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads, 1069 struct net_device *dev, int ifindex, 1070 struct netlink_ext_ack *extack) 1071 { 1072 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev); 1073 struct ice_esw_br *bridge; 1074 1075 if (!br_port) { 1076 NL_SET_ERR_MSG_MOD(extack, 1077 "Port representor is not attached to any bridge"); 1078 return -EINVAL; 1079 } 1080 1081 if (br_port->bridge->ifindex != ifindex) { 1082 NL_SET_ERR_MSG_MOD(extack, 1083 "Port representor is attached to another bridge"); 1084 return -EINVAL; 1085 } 1086 1087 bridge = br_port->bridge; 1088 1089 trace_ice_eswitch_br_port_unlink(br_port); 1090 ice_eswitch_br_port_deinit(br_port->bridge, br_port); 1091 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1092 1093 return 0; 1094 } 1095 1096 static int 1097 ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads, 1098 struct net_device *dev, int ifindex, 1099 struct netlink_ext_ack *extack) 1100 { 1101 struct ice_esw_br *bridge; 1102 int err; 1103 1104 if (ice_eswitch_br_netdev_to_port(dev)) { 1105 NL_SET_ERR_MSG_MOD(extack, 1106 "Port is already attached to the bridge"); 1107 return -EINVAL; 1108 } 1109 1110 bridge = ice_eswitch_br_get(br_offloads, ifindex, extack); 1111 if (IS_ERR(bridge)) 1112 return PTR_ERR(bridge); 1113 1114 if (ice_is_port_repr_netdev(dev)) { 1115 struct ice_repr *repr = ice_netdev_to_repr(dev); 1116 1117 err = ice_eswitch_br_vf_repr_port_init(bridge, repr); 1118 trace_ice_eswitch_br_port_link(repr->br_port); 1119 } else { 1120 struct net_device *ice_dev; 1121 struct ice_pf *pf; 1122 1123 if (netif_is_lag_master(dev)) 1124 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 1125 else 1126 ice_dev = dev; 1127 1128 if (!ice_dev) 1129 return 0; 1130 1131 pf = ice_netdev_to_pf(ice_dev); 1132 1133 err = ice_eswitch_br_uplink_port_init(bridge, pf); 1134 trace_ice_eswitch_br_port_link(pf->br_port); 1135 } 1136 if (err) { 1137 NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port"); 1138 goto err_port_init; 1139 } 1140 1141 return 0; 1142 1143 err_port_init: 1144 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1145 return err; 1146 } 1147 1148 static int 1149 ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr) 1150 { 1151 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1152 struct netdev_notifier_changeupper_info *info = ptr; 1153 struct ice_esw_br_offloads *br_offloads; 1154 struct netlink_ext_ack *extack; 1155 struct net_device *upper; 1156 1157 br_offloads = ice_nb_to_br_offloads(nb, netdev_nb); 1158 1159 if (!ice_eswitch_br_is_dev_valid(dev)) 1160 return 0; 1161 1162 upper = info->upper_dev; 1163 if (!netif_is_bridge_master(upper)) 1164 return 0; 1165 1166 extack = netdev_notifier_info_to_extack(&info->info); 1167 1168 if (info->linking) 1169 return ice_eswitch_br_port_link(br_offloads, dev, 1170 upper->ifindex, extack); 1171 else 1172 return ice_eswitch_br_port_unlink(br_offloads, dev, 1173 upper->ifindex, extack); 1174 } 1175 1176 static int 1177 ice_eswitch_br_port_event(struct notifier_block *nb, 1178 unsigned long event, void *ptr) 1179 { 1180 int err = 0; 1181 1182 switch (event) { 1183 case NETDEV_CHANGEUPPER: 1184 err = ice_eswitch_br_port_changeupper(nb, ptr); 1185 break; 1186 } 1187 1188 return notifier_from_errno(err); 1189 } 1190 1191 static void 1192 ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) 1193 { 1194 struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads; 1195 1196 ASSERT_RTNL(); 1197 1198 if (!br_offloads) 1199 return; 1200 1201 ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); 1202 1203 pf->eswitch.br_offloads = NULL; 1204 kfree(br_offloads); 1205 } 1206 1207 static struct ice_esw_br_offloads * 1208 ice_eswitch_br_offloads_alloc(struct ice_pf *pf) 1209 { 1210 struct ice_esw_br_offloads *br_offloads; 1211 1212 ASSERT_RTNL(); 1213 1214 if (pf->eswitch.br_offloads) 1215 return ERR_PTR(-EEXIST); 1216 1217 br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); 1218 if (!br_offloads) 1219 return ERR_PTR(-ENOMEM); 1220 1221 pf->eswitch.br_offloads = br_offloads; 1222 br_offloads->pf = pf; 1223 1224 return br_offloads; 1225 } 1226 1227 void 1228 ice_eswitch_br_offloads_deinit(struct ice_pf *pf) 1229 { 1230 struct ice_esw_br_offloads *br_offloads; 1231 1232 br_offloads = pf->eswitch.br_offloads; 1233 if (!br_offloads) 1234 return; 1235 1236 cancel_delayed_work_sync(&br_offloads->update_work); 1237 unregister_netdevice_notifier(&br_offloads->netdev_nb); 1238 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1239 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1240 destroy_workqueue(br_offloads->wq); 1241 /* Although notifier block is unregistered just before, 1242 * so we don't get any new events, some events might be 1243 * already in progress. Hold the rtnl lock and wait for 1244 * them to finished. 1245 */ 1246 rtnl_lock(); 1247 ice_eswitch_br_offloads_dealloc(pf); 1248 rtnl_unlock(); 1249 } 1250 1251 static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads) 1252 { 1253 struct ice_esw_br *bridge = br_offloads->bridge; 1254 struct ice_esw_br_fdb_entry *entry, *tmp; 1255 1256 if (!bridge) 1257 return; 1258 1259 rtnl_lock(); 1260 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { 1261 if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER) 1262 continue; 1263 1264 if (time_is_after_eq_jiffies(entry->last_use + 1265 bridge->ageing_time)) 1266 continue; 1267 1268 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 1269 } 1270 rtnl_unlock(); 1271 } 1272 1273 static void ice_eswitch_br_update_work(struct work_struct *work) 1274 { 1275 struct ice_esw_br_offloads *br_offloads; 1276 1277 br_offloads = ice_work_to_br_offloads(work); 1278 1279 ice_eswitch_br_update(br_offloads); 1280 1281 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1282 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1283 } 1284 1285 int 1286 ice_eswitch_br_offloads_init(struct ice_pf *pf) 1287 { 1288 struct ice_esw_br_offloads *br_offloads; 1289 struct device *dev = ice_pf_to_dev(pf); 1290 int err; 1291 1292 rtnl_lock(); 1293 br_offloads = ice_eswitch_br_offloads_alloc(pf); 1294 rtnl_unlock(); 1295 if (IS_ERR(br_offloads)) { 1296 dev_err(dev, "Failed to init eswitch bridge\n"); 1297 return PTR_ERR(br_offloads); 1298 } 1299 1300 br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0); 1301 if (!br_offloads->wq) { 1302 err = -ENOMEM; 1303 dev_err(dev, "Failed to allocate bridge workqueue\n"); 1304 goto err_alloc_wq; 1305 } 1306 1307 br_offloads->switchdev_nb.notifier_call = 1308 ice_eswitch_br_switchdev_event; 1309 err = register_switchdev_notifier(&br_offloads->switchdev_nb); 1310 if (err) { 1311 dev_err(dev, 1312 "Failed to register switchdev notifier\n"); 1313 goto err_reg_switchdev_nb; 1314 } 1315 1316 br_offloads->switchdev_blk.notifier_call = 1317 ice_eswitch_br_event_blocking; 1318 err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1319 if (err) { 1320 dev_err(dev, 1321 "Failed to register bridge blocking switchdev notifier\n"); 1322 goto err_reg_switchdev_blk; 1323 } 1324 1325 br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event; 1326 err = register_netdevice_notifier(&br_offloads->netdev_nb); 1327 if (err) { 1328 dev_err(dev, 1329 "Failed to register bridge port event notifier\n"); 1330 goto err_reg_netdev_nb; 1331 } 1332 1333 INIT_DELAYED_WORK(&br_offloads->update_work, 1334 ice_eswitch_br_update_work); 1335 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1336 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1337 1338 return 0; 1339 1340 err_reg_netdev_nb: 1341 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1342 err_reg_switchdev_blk: 1343 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1344 err_reg_switchdev_nb: 1345 destroy_workqueue(br_offloads->wq); 1346 err_alloc_wq: 1347 rtnl_lock(); 1348 ice_eswitch_br_offloads_dealloc(pf); 1349 rtnl_unlock(); 1350 1351 return err; 1352 } 1353