1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2023, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch_br.h" 6 #include "ice_repr.h" 7 #include "ice_switch.h" 8 #include "ice_vlan.h" 9 #include "ice_vf_vsi_vlan_ops.h" 10 #include "ice_trace.h" 11 12 #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000) 13 14 static const struct rhashtable_params ice_fdb_ht_params = { 15 .key_offset = offsetof(struct ice_esw_br_fdb_entry, data), 16 .key_len = sizeof(struct ice_esw_br_fdb_data), 17 .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node), 18 .automatic_shrinking = true, 19 }; 20 21 static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev) 22 { 23 /* Accept only PF netdev, PRs and LAG */ 24 return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) || 25 netif_is_lag_master(dev); 26 } 27 28 static struct net_device * 29 ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev) 30 { 31 struct net_device *lower; 32 struct list_head *iter; 33 34 netdev_for_each_lower_dev(lag_dev, lower, iter) { 35 if (netif_is_ice(lower)) 36 return lower; 37 } 38 39 return NULL; 40 } 41 42 static struct ice_esw_br_port * 43 ice_eswitch_br_netdev_to_port(struct net_device *dev) 44 { 45 if (ice_is_port_repr_netdev(dev)) { 46 struct ice_repr *repr = ice_netdev_to_repr(dev); 47 48 return repr->br_port; 49 } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) { 50 struct net_device *ice_dev; 51 struct ice_pf *pf; 52 53 if (netif_is_lag_master(dev)) 54 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 55 else 56 ice_dev = dev; 57 58 if (!ice_dev) 59 return NULL; 60 61 pf = ice_netdev_to_pf(ice_dev); 62 63 return pf->br_port; 64 } 65 66 return NULL; 67 } 68 69 static void 70 ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info, 71 u8 pf_id, u16 vf_vsi_idx) 72 { 73 rule_info->sw_act.vsi_handle = vf_vsi_idx; 74 rule_info->sw_act.flag |= ICE_FLTR_RX; 75 rule_info->sw_act.src = pf_id; 76 rule_info->priority = 2; 77 } 78 79 static void 80 ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info, 81 u16 pf_vsi_idx) 82 { 83 rule_info->sw_act.vsi_handle = pf_vsi_idx; 84 rule_info->sw_act.flag |= ICE_FLTR_TX; 85 rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 86 rule_info->flags_info.act_valid = true; 87 rule_info->priority = 2; 88 } 89 90 static int 91 ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule) 92 { 93 int err; 94 95 if (!rule) 96 return -EINVAL; 97 98 err = ice_rem_adv_rule_by_id(hw, rule); 99 kfree(rule); 100 101 return err; 102 } 103 104 static u16 105 ice_eswitch_br_get_lkups_cnt(u16 vid) 106 { 107 return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1; 108 } 109 110 static void 111 ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid) 112 { 113 if (ice_eswitch_br_is_vid_valid(vid)) { 114 list[1].type = ICE_VLAN_OFOS; 115 list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK); 116 list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 117 } 118 } 119 120 static struct ice_rule_query_data * 121 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type, 122 const unsigned char *mac, u16 vid) 123 { 124 struct ice_adv_rule_info rule_info = { 0 }; 125 struct ice_rule_query_data *rule; 126 struct ice_adv_lkup_elem *list; 127 u16 lkups_cnt; 128 int err; 129 130 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 131 132 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 133 if (!rule) 134 return ERR_PTR(-ENOMEM); 135 136 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 137 if (!list) { 138 err = -ENOMEM; 139 goto err_list_alloc; 140 } 141 142 switch (port_type) { 143 case ICE_ESWITCH_BR_UPLINK_PORT: 144 ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx); 145 break; 146 case ICE_ESWITCH_BR_VF_REPR_PORT: 147 ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id, 148 vsi_idx); 149 break; 150 default: 151 err = -EINVAL; 152 goto err_add_rule; 153 } 154 155 list[0].type = ICE_MAC_OFOS; 156 ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac); 157 eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr); 158 159 ice_eswitch_br_add_vlan_lkup(list, vid); 160 161 rule_info.need_pass_l2 = true; 162 163 rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 164 165 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 166 if (err) 167 goto err_add_rule; 168 169 kfree(list); 170 171 return rule; 172 173 err_add_rule: 174 kfree(list); 175 err_list_alloc: 176 kfree(rule); 177 178 return ERR_PTR(err); 179 } 180 181 static struct ice_rule_query_data * 182 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx, 183 const unsigned char *mac, u16 vid) 184 { 185 struct ice_adv_rule_info rule_info = { 0 }; 186 struct ice_rule_query_data *rule; 187 struct ice_adv_lkup_elem *list; 188 int err = -ENOMEM; 189 u16 lkups_cnt; 190 191 lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 192 193 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 194 if (!rule) 195 goto err_exit; 196 197 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 198 if (!list) 199 goto err_list_alloc; 200 201 list[0].type = ICE_MAC_OFOS; 202 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); 203 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); 204 205 ice_eswitch_br_add_vlan_lkup(list, vid); 206 207 rule_info.allow_pass_l2 = true; 208 rule_info.sw_act.vsi_handle = vsi_idx; 209 rule_info.sw_act.fltr_act = ICE_NOP; 210 rule_info.priority = 2; 211 212 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 213 if (err) 214 goto err_add_rule; 215 216 kfree(list); 217 218 return rule; 219 220 err_add_rule: 221 kfree(list); 222 err_list_alloc: 223 kfree(rule); 224 err_exit: 225 return ERR_PTR(err); 226 } 227 228 static struct ice_esw_br_flow * 229 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx, 230 int port_type, const unsigned char *mac, u16 vid) 231 { 232 struct ice_rule_query_data *fwd_rule, *guard_rule; 233 struct ice_esw_br_flow *flow; 234 int err; 235 236 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 237 if (!flow) 238 return ERR_PTR(-ENOMEM); 239 240 fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac, 241 vid); 242 err = PTR_ERR_OR_ZERO(fwd_rule); 243 if (err) { 244 dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n", 245 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 246 err); 247 goto err_fwd_rule; 248 } 249 250 guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid); 251 err = PTR_ERR_OR_ZERO(guard_rule); 252 if (err) { 253 dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n", 254 port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 255 err); 256 goto err_guard_rule; 257 } 258 259 flow->fwd_rule = fwd_rule; 260 flow->guard_rule = guard_rule; 261 262 return flow; 263 264 err_guard_rule: 265 ice_eswitch_br_rule_delete(hw, fwd_rule); 266 err_fwd_rule: 267 kfree(flow); 268 269 return ERR_PTR(err); 270 } 271 272 static struct ice_esw_br_fdb_entry * 273 ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac, 274 u16 vid) 275 { 276 struct ice_esw_br_fdb_data data = { 277 .vid = vid, 278 }; 279 280 ether_addr_copy(data.addr, mac); 281 return rhashtable_lookup_fast(&bridge->fdb_ht, &data, 282 ice_fdb_ht_params); 283 } 284 285 static void 286 ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow) 287 { 288 struct device *dev = ice_pf_to_dev(pf); 289 int err; 290 291 err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule); 292 if (err) 293 dev_err(dev, "Failed to delete FDB forward rule, err: %d\n", 294 err); 295 296 err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule); 297 if (err) 298 dev_err(dev, "Failed to delete FDB guard rule, err: %d\n", 299 err); 300 301 kfree(flow); 302 } 303 304 static struct ice_esw_br_vlan * 305 ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 306 { 307 struct ice_pf *pf = bridge->br_offloads->pf; 308 struct device *dev = ice_pf_to_dev(pf); 309 struct ice_esw_br_port *port; 310 struct ice_esw_br_vlan *vlan; 311 312 port = xa_load(&bridge->ports, vsi_idx); 313 if (!port) { 314 dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx); 315 return ERR_PTR(-EINVAL); 316 } 317 318 vlan = xa_load(&port->vlans, vid); 319 if (!vlan) { 320 dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n", 321 vsi_idx); 322 return ERR_PTR(-EINVAL); 323 } 324 325 return vlan; 326 } 327 328 static void 329 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge, 330 struct ice_esw_br_fdb_entry *fdb_entry) 331 { 332 struct ice_pf *pf = bridge->br_offloads->pf; 333 334 rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 335 ice_fdb_ht_params); 336 list_del(&fdb_entry->list); 337 338 ice_eswitch_br_flow_delete(pf, fdb_entry->flow); 339 340 kfree(fdb_entry); 341 } 342 343 static void 344 ice_eswitch_br_fdb_offload_notify(struct net_device *dev, 345 const unsigned char *mac, u16 vid, 346 unsigned long val) 347 { 348 struct switchdev_notifier_fdb_info fdb_info = { 349 .addr = mac, 350 .vid = vid, 351 .offloaded = true, 352 }; 353 354 call_switchdev_notifiers(val, dev, &fdb_info.info, NULL); 355 } 356 357 static void 358 ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge, 359 struct ice_esw_br_fdb_entry *entry) 360 { 361 if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)) 362 ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr, 363 entry->data.vid, 364 SWITCHDEV_FDB_DEL_TO_BRIDGE); 365 ice_eswitch_br_fdb_entry_delete(bridge, entry); 366 } 367 368 static void 369 ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge, 370 const unsigned char *mac, u16 vid) 371 { 372 struct ice_pf *pf = bridge->br_offloads->pf; 373 struct ice_esw_br_fdb_entry *fdb_entry; 374 struct device *dev = ice_pf_to_dev(pf); 375 376 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 377 if (!fdb_entry) { 378 dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n", 379 mac, vid); 380 return; 381 } 382 383 trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry); 384 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 385 } 386 387 static void 388 ice_eswitch_br_fdb_entry_create(struct net_device *netdev, 389 struct ice_esw_br_port *br_port, 390 bool added_by_user, 391 const unsigned char *mac, u16 vid) 392 { 393 struct ice_esw_br *bridge = br_port->bridge; 394 struct ice_pf *pf = bridge->br_offloads->pf; 395 struct device *dev = ice_pf_to_dev(pf); 396 struct ice_esw_br_fdb_entry *fdb_entry; 397 struct ice_esw_br_flow *flow; 398 struct ice_esw_br_vlan *vlan; 399 struct ice_hw *hw = &pf->hw; 400 unsigned long event; 401 int err; 402 403 /* untagged filtering is not yet supported */ 404 if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid) 405 return; 406 407 if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) { 408 vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx, 409 vid); 410 if (IS_ERR(vlan)) { 411 dev_err(dev, "Failed to find vlan lookup, err: %ld\n", 412 PTR_ERR(vlan)); 413 return; 414 } 415 } 416 417 fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 418 if (fdb_entry) 419 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 420 421 fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); 422 if (!fdb_entry) { 423 err = -ENOMEM; 424 goto err_exit; 425 } 426 427 flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx, 428 br_port->type, mac, vid); 429 if (IS_ERR(flow)) { 430 err = PTR_ERR(flow); 431 goto err_add_flow; 432 } 433 434 ether_addr_copy(fdb_entry->data.addr, mac); 435 fdb_entry->data.vid = vid; 436 fdb_entry->br_port = br_port; 437 fdb_entry->flow = flow; 438 fdb_entry->dev = netdev; 439 fdb_entry->last_use = jiffies; 440 event = SWITCHDEV_FDB_ADD_TO_BRIDGE; 441 442 if (added_by_user) { 443 fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER; 444 event = SWITCHDEV_FDB_OFFLOADED; 445 } 446 447 err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 448 ice_fdb_ht_params); 449 if (err) 450 goto err_fdb_insert; 451 452 list_add(&fdb_entry->list, &bridge->fdb_list); 453 trace_ice_eswitch_br_fdb_entry_create(fdb_entry); 454 455 ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event); 456 457 return; 458 459 err_fdb_insert: 460 ice_eswitch_br_flow_delete(pf, flow); 461 err_add_flow: 462 kfree(fdb_entry); 463 err_exit: 464 dev_err(dev, "Failed to create fdb entry, err: %d\n", err); 465 } 466 467 static void 468 ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work) 469 { 470 kfree(fdb_work->fdb_info.addr); 471 kfree(fdb_work); 472 } 473 474 static void 475 ice_eswitch_br_fdb_event_work(struct work_struct *work) 476 { 477 struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work); 478 bool added_by_user = fdb_work->fdb_info.added_by_user; 479 const unsigned char *mac = fdb_work->fdb_info.addr; 480 u16 vid = fdb_work->fdb_info.vid; 481 struct ice_esw_br_port *br_port; 482 483 rtnl_lock(); 484 485 br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev); 486 if (!br_port) 487 goto err_exit; 488 489 switch (fdb_work->event) { 490 case SWITCHDEV_FDB_ADD_TO_DEVICE: 491 ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port, 492 added_by_user, mac, vid); 493 break; 494 case SWITCHDEV_FDB_DEL_TO_DEVICE: 495 ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge, 496 mac, vid); 497 break; 498 default: 499 goto err_exit; 500 } 501 502 err_exit: 503 rtnl_unlock(); 504 dev_put(fdb_work->dev); 505 ice_eswitch_br_fdb_work_dealloc(fdb_work); 506 } 507 508 static struct ice_esw_br_fdb_work * 509 ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info, 510 struct net_device *dev, 511 unsigned long event) 512 { 513 struct ice_esw_br_fdb_work *work; 514 unsigned char *mac; 515 516 work = kzalloc(sizeof(*work), GFP_ATOMIC); 517 if (!work) 518 return ERR_PTR(-ENOMEM); 519 520 INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work); 521 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); 522 523 mac = kzalloc(ETH_ALEN, GFP_ATOMIC); 524 if (!mac) { 525 kfree(work); 526 return ERR_PTR(-ENOMEM); 527 } 528 529 ether_addr_copy(mac, fdb_info->addr); 530 work->fdb_info.addr = mac; 531 work->event = event; 532 work->dev = dev; 533 534 return work; 535 } 536 537 static int 538 ice_eswitch_br_switchdev_event(struct notifier_block *nb, 539 unsigned long event, void *ptr) 540 { 541 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 542 struct switchdev_notifier_fdb_info *fdb_info; 543 struct switchdev_notifier_info *info = ptr; 544 struct ice_esw_br_offloads *br_offloads; 545 struct ice_esw_br_fdb_work *work; 546 struct netlink_ext_ack *extack; 547 struct net_device *upper; 548 549 br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb); 550 extack = switchdev_notifier_info_to_extack(ptr); 551 552 upper = netdev_master_upper_dev_get_rcu(dev); 553 if (!upper) 554 return NOTIFY_DONE; 555 556 if (!netif_is_bridge_master(upper)) 557 return NOTIFY_DONE; 558 559 if (!ice_eswitch_br_is_dev_valid(dev)) 560 return NOTIFY_DONE; 561 562 if (!ice_eswitch_br_netdev_to_port(dev)) 563 return NOTIFY_DONE; 564 565 switch (event) { 566 case SWITCHDEV_FDB_ADD_TO_DEVICE: 567 case SWITCHDEV_FDB_DEL_TO_DEVICE: 568 fdb_info = container_of(info, typeof(*fdb_info), info); 569 570 work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event); 571 if (IS_ERR(work)) { 572 NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work"); 573 return notifier_from_errno(PTR_ERR(work)); 574 } 575 dev_hold(dev); 576 577 queue_work(br_offloads->wq, &work->work); 578 break; 579 default: 580 break; 581 } 582 return NOTIFY_DONE; 583 } 584 585 void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) 586 { 587 struct ice_esw_br_fdb_entry *entry, *tmp; 588 589 if (!bridge) 590 return; 591 592 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) 593 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 594 } 595 596 static void 597 ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable) 598 { 599 if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) 600 return; 601 602 ice_eswitch_br_fdb_flush(bridge); 603 if (enable) 604 bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING; 605 else 606 bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING; 607 } 608 609 static void 610 ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port) 611 { 612 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0); 613 struct ice_vsi_vlan_ops *vlan_ops; 614 615 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 616 617 vlan_ops->del_vlan(port->vsi, &port_vlan); 618 vlan_ops->clear_port_vlan(port->vsi); 619 620 ice_vf_vsi_disable_port_vlan(port->vsi); 621 622 port->pvid = 0; 623 } 624 625 static void 626 ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port, 627 struct ice_esw_br_vlan *vlan) 628 { 629 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 630 struct ice_esw_br *bridge = port->bridge; 631 632 trace_ice_eswitch_br_vlan_cleanup(vlan); 633 634 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 635 if (vlan->vid == fdb_entry->data.vid) 636 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 637 } 638 639 xa_erase(&port->vlans, vlan->vid); 640 if (port->pvid == vlan->vid) 641 ice_eswitch_br_clear_pvid(port); 642 kfree(vlan); 643 } 644 645 static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port) 646 { 647 struct ice_esw_br_vlan *vlan; 648 unsigned long index; 649 650 xa_for_each(&port->vlans, index, vlan) 651 ice_eswitch_br_vlan_cleanup(port, vlan); 652 } 653 654 static int 655 ice_eswitch_br_set_pvid(struct ice_esw_br_port *port, 656 struct ice_esw_br_vlan *vlan) 657 { 658 struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0); 659 struct device *dev = ice_pf_to_dev(port->vsi->back); 660 struct ice_vsi_vlan_ops *vlan_ops; 661 int err; 662 663 if (port->pvid == vlan->vid || vlan->vid == 1) 664 return 0; 665 666 /* Setting port vlan on uplink isn't supported by hw */ 667 if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) 668 return -EOPNOTSUPP; 669 670 if (port->pvid) { 671 dev_info(dev, 672 "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n", 673 port->vsi_idx, port->pvid); 674 return -EEXIST; 675 } 676 677 ice_vf_vsi_enable_port_vlan(port->vsi); 678 679 vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 680 err = vlan_ops->set_port_vlan(port->vsi, &port_vlan); 681 if (err) 682 return err; 683 684 err = vlan_ops->add_vlan(port->vsi, &port_vlan); 685 if (err) 686 return err; 687 688 ice_eswitch_br_port_vlans_flush(port); 689 port->pvid = vlan->vid; 690 691 return 0; 692 } 693 694 static struct ice_esw_br_vlan * 695 ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port) 696 { 697 struct device *dev = ice_pf_to_dev(port->vsi->back); 698 struct ice_esw_br_vlan *vlan; 699 int err; 700 701 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 702 if (!vlan) 703 return ERR_PTR(-ENOMEM); 704 705 vlan->vid = vid; 706 vlan->flags = flags; 707 if ((flags & BRIDGE_VLAN_INFO_PVID) && 708 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 709 err = ice_eswitch_br_set_pvid(port, vlan); 710 if (err) 711 goto err_set_pvid; 712 } else if ((flags & BRIDGE_VLAN_INFO_PVID) || 713 (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 714 dev_info(dev, "VLAN push and pop are supported only simultaneously\n"); 715 err = -EOPNOTSUPP; 716 goto err_set_pvid; 717 } 718 719 err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL); 720 if (err) 721 goto err_insert; 722 723 trace_ice_eswitch_br_vlan_create(vlan); 724 725 return vlan; 726 727 err_insert: 728 if (port->pvid) 729 ice_eswitch_br_clear_pvid(port); 730 err_set_pvid: 731 kfree(vlan); 732 return ERR_PTR(err); 733 } 734 735 static int 736 ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid, 737 u16 flags, struct netlink_ext_ack *extack) 738 { 739 struct ice_esw_br_port *port; 740 struct ice_esw_br_vlan *vlan; 741 742 port = xa_load(&bridge->ports, vsi_idx); 743 if (!port) 744 return -EINVAL; 745 746 if (port->pvid) { 747 dev_info(ice_pf_to_dev(port->vsi->back), 748 "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n", 749 port->vsi_idx, port->pvid); 750 return -EEXIST; 751 } 752 753 vlan = xa_load(&port->vlans, vid); 754 if (vlan) { 755 if (vlan->flags == flags) 756 return 0; 757 758 ice_eswitch_br_vlan_cleanup(port, vlan); 759 } 760 761 vlan = ice_eswitch_br_vlan_create(vid, flags, port); 762 if (IS_ERR(vlan)) { 763 NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u", 764 vid, vsi_idx); 765 return PTR_ERR(vlan); 766 } 767 768 return 0; 769 } 770 771 static void 772 ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 773 { 774 struct ice_esw_br_port *port; 775 struct ice_esw_br_vlan *vlan; 776 777 port = xa_load(&bridge->ports, vsi_idx); 778 if (!port) 779 return; 780 781 vlan = xa_load(&port->vlans, vid); 782 if (!vlan) 783 return; 784 785 ice_eswitch_br_vlan_cleanup(port, vlan); 786 } 787 788 static int 789 ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx, 790 const struct switchdev_obj *obj, 791 struct netlink_ext_ack *extack) 792 { 793 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 794 struct switchdev_obj_port_vlan *vlan; 795 int err; 796 797 if (!br_port) 798 return -EINVAL; 799 800 switch (obj->id) { 801 case SWITCHDEV_OBJ_ID_PORT_VLAN: 802 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 803 err = ice_eswitch_br_port_vlan_add(br_port->bridge, 804 br_port->vsi_idx, vlan->vid, 805 vlan->flags, extack); 806 return err; 807 default: 808 return -EOPNOTSUPP; 809 } 810 } 811 812 static int 813 ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx, 814 const struct switchdev_obj *obj) 815 { 816 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 817 struct switchdev_obj_port_vlan *vlan; 818 819 if (!br_port) 820 return -EINVAL; 821 822 switch (obj->id) { 823 case SWITCHDEV_OBJ_ID_PORT_VLAN: 824 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 825 ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx, 826 vlan->vid); 827 return 0; 828 default: 829 return -EOPNOTSUPP; 830 } 831 } 832 833 static int 834 ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx, 835 const struct switchdev_attr *attr, 836 struct netlink_ext_ack *extack) 837 { 838 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 839 840 if (!br_port) 841 return -EINVAL; 842 843 switch (attr->id) { 844 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 845 ice_eswitch_br_vlan_filtering_set(br_port->bridge, 846 attr->u.vlan_filtering); 847 return 0; 848 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 849 br_port->bridge->ageing_time = 850 clock_t_to_jiffies(attr->u.ageing_time); 851 return 0; 852 default: 853 return -EOPNOTSUPP; 854 } 855 } 856 857 static int 858 ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event, 859 void *ptr) 860 { 861 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 862 int err; 863 864 switch (event) { 865 case SWITCHDEV_PORT_OBJ_ADD: 866 err = switchdev_handle_port_obj_add(dev, ptr, 867 ice_eswitch_br_is_dev_valid, 868 ice_eswitch_br_port_obj_add); 869 break; 870 case SWITCHDEV_PORT_OBJ_DEL: 871 err = switchdev_handle_port_obj_del(dev, ptr, 872 ice_eswitch_br_is_dev_valid, 873 ice_eswitch_br_port_obj_del); 874 break; 875 case SWITCHDEV_PORT_ATTR_SET: 876 err = switchdev_handle_port_attr_set(dev, ptr, 877 ice_eswitch_br_is_dev_valid, 878 ice_eswitch_br_port_obj_attr_set); 879 break; 880 default: 881 err = 0; 882 } 883 884 return notifier_from_errno(err); 885 } 886 887 static void 888 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, 889 struct ice_esw_br_port *br_port) 890 { 891 struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 892 struct ice_vsi *vsi = br_port->vsi; 893 894 list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 895 if (br_port == fdb_entry->br_port) 896 ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 897 } 898 899 if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { 900 vsi->back->br_port = NULL; 901 } else { 902 struct ice_repr *repr = 903 ice_repr_get(vsi->back, br_port->repr_id); 904 905 if (repr) 906 repr->br_port = NULL; 907 } 908 909 xa_erase(&bridge->ports, br_port->vsi_idx); 910 ice_eswitch_br_port_vlans_flush(br_port); 911 kfree(br_port); 912 } 913 914 static struct ice_esw_br_port * 915 ice_eswitch_br_port_init(struct ice_esw_br *bridge) 916 { 917 struct ice_esw_br_port *br_port; 918 919 br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); 920 if (!br_port) 921 return ERR_PTR(-ENOMEM); 922 923 xa_init(&br_port->vlans); 924 925 br_port->bridge = bridge; 926 927 return br_port; 928 } 929 930 static int 931 ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, 932 struct ice_repr *repr) 933 { 934 struct ice_esw_br_port *br_port; 935 int err; 936 937 br_port = ice_eswitch_br_port_init(bridge); 938 if (IS_ERR(br_port)) 939 return PTR_ERR(br_port); 940 941 br_port->vsi = repr->src_vsi; 942 br_port->vsi_idx = br_port->vsi->idx; 943 br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; 944 br_port->repr_id = repr->id; 945 repr->br_port = br_port; 946 947 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 948 if (err) { 949 ice_eswitch_br_port_deinit(bridge, br_port); 950 return err; 951 } 952 953 return 0; 954 } 955 956 static int 957 ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) 958 { 959 struct ice_vsi *vsi = pf->eswitch.uplink_vsi; 960 struct ice_esw_br_port *br_port; 961 int err; 962 963 br_port = ice_eswitch_br_port_init(bridge); 964 if (IS_ERR(br_port)) 965 return PTR_ERR(br_port); 966 967 br_port->vsi = vsi; 968 br_port->vsi_idx = br_port->vsi->idx; 969 br_port->type = ICE_ESWITCH_BR_UPLINK_PORT; 970 pf->br_port = br_port; 971 972 err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 973 if (err) { 974 ice_eswitch_br_port_deinit(bridge, br_port); 975 return err; 976 } 977 978 return 0; 979 } 980 981 static void 982 ice_eswitch_br_ports_flush(struct ice_esw_br *bridge) 983 { 984 struct ice_esw_br_port *port; 985 unsigned long i; 986 987 xa_for_each(&bridge->ports, i, port) 988 ice_eswitch_br_port_deinit(bridge, port); 989 } 990 991 static void 992 ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads, 993 struct ice_esw_br *bridge) 994 { 995 if (!bridge) 996 return; 997 998 /* Cleanup all the ports that were added asynchronously 999 * through NETDEV_CHANGEUPPER event. 1000 */ 1001 ice_eswitch_br_ports_flush(bridge); 1002 WARN_ON(!xa_empty(&bridge->ports)); 1003 xa_destroy(&bridge->ports); 1004 rhashtable_destroy(&bridge->fdb_ht); 1005 1006 br_offloads->bridge = NULL; 1007 kfree(bridge); 1008 } 1009 1010 static struct ice_esw_br * 1011 ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex) 1012 { 1013 struct ice_esw_br *bridge; 1014 int err; 1015 1016 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 1017 if (!bridge) 1018 return ERR_PTR(-ENOMEM); 1019 1020 err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params); 1021 if (err) { 1022 kfree(bridge); 1023 return ERR_PTR(err); 1024 } 1025 1026 INIT_LIST_HEAD(&bridge->fdb_list); 1027 bridge->br_offloads = br_offloads; 1028 bridge->ifindex = ifindex; 1029 bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); 1030 xa_init(&bridge->ports); 1031 br_offloads->bridge = bridge; 1032 1033 return bridge; 1034 } 1035 1036 static struct ice_esw_br * 1037 ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex, 1038 struct netlink_ext_ack *extack) 1039 { 1040 struct ice_esw_br *bridge = br_offloads->bridge; 1041 1042 if (bridge) { 1043 if (bridge->ifindex != ifindex) { 1044 NL_SET_ERR_MSG_MOD(extack, 1045 "Only one bridge is supported per eswitch"); 1046 return ERR_PTR(-EOPNOTSUPP); 1047 } 1048 return bridge; 1049 } 1050 1051 /* Create the bridge if it doesn't exist yet */ 1052 bridge = ice_eswitch_br_init(br_offloads, ifindex); 1053 if (IS_ERR(bridge)) 1054 NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge"); 1055 1056 return bridge; 1057 } 1058 1059 static void 1060 ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads, 1061 struct ice_esw_br *bridge) 1062 { 1063 /* Remove the bridge if it exists and there are no ports left */ 1064 if (!bridge || !xa_empty(&bridge->ports)) 1065 return; 1066 1067 ice_eswitch_br_deinit(br_offloads, bridge); 1068 } 1069 1070 static int 1071 ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads, 1072 struct net_device *dev, int ifindex, 1073 struct netlink_ext_ack *extack) 1074 { 1075 struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev); 1076 struct ice_esw_br *bridge; 1077 1078 if (!br_port) { 1079 NL_SET_ERR_MSG_MOD(extack, 1080 "Port representor is not attached to any bridge"); 1081 return -EINVAL; 1082 } 1083 1084 if (br_port->bridge->ifindex != ifindex) { 1085 NL_SET_ERR_MSG_MOD(extack, 1086 "Port representor is attached to another bridge"); 1087 return -EINVAL; 1088 } 1089 1090 bridge = br_port->bridge; 1091 1092 trace_ice_eswitch_br_port_unlink(br_port); 1093 ice_eswitch_br_port_deinit(br_port->bridge, br_port); 1094 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1095 1096 return 0; 1097 } 1098 1099 static int 1100 ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads, 1101 struct net_device *dev, int ifindex, 1102 struct netlink_ext_ack *extack) 1103 { 1104 struct ice_esw_br *bridge; 1105 int err; 1106 1107 if (ice_eswitch_br_netdev_to_port(dev)) { 1108 NL_SET_ERR_MSG_MOD(extack, 1109 "Port is already attached to the bridge"); 1110 return -EINVAL; 1111 } 1112 1113 bridge = ice_eswitch_br_get(br_offloads, ifindex, extack); 1114 if (IS_ERR(bridge)) 1115 return PTR_ERR(bridge); 1116 1117 if (ice_is_port_repr_netdev(dev)) { 1118 struct ice_repr *repr = ice_netdev_to_repr(dev); 1119 1120 err = ice_eswitch_br_vf_repr_port_init(bridge, repr); 1121 trace_ice_eswitch_br_port_link(repr->br_port); 1122 } else { 1123 struct net_device *ice_dev; 1124 struct ice_pf *pf; 1125 1126 if (netif_is_lag_master(dev)) 1127 ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); 1128 else 1129 ice_dev = dev; 1130 1131 if (!ice_dev) 1132 return 0; 1133 1134 pf = ice_netdev_to_pf(ice_dev); 1135 1136 err = ice_eswitch_br_uplink_port_init(bridge, pf); 1137 trace_ice_eswitch_br_port_link(pf->br_port); 1138 } 1139 if (err) { 1140 NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port"); 1141 goto err_port_init; 1142 } 1143 1144 return 0; 1145 1146 err_port_init: 1147 ice_eswitch_br_verify_deinit(br_offloads, bridge); 1148 return err; 1149 } 1150 1151 static int 1152 ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr) 1153 { 1154 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1155 struct netdev_notifier_changeupper_info *info = ptr; 1156 struct ice_esw_br_offloads *br_offloads; 1157 struct netlink_ext_ack *extack; 1158 struct net_device *upper; 1159 1160 br_offloads = ice_nb_to_br_offloads(nb, netdev_nb); 1161 1162 if (!ice_eswitch_br_is_dev_valid(dev)) 1163 return 0; 1164 1165 upper = info->upper_dev; 1166 if (!netif_is_bridge_master(upper)) 1167 return 0; 1168 1169 extack = netdev_notifier_info_to_extack(&info->info); 1170 1171 if (info->linking) 1172 return ice_eswitch_br_port_link(br_offloads, dev, 1173 upper->ifindex, extack); 1174 else 1175 return ice_eswitch_br_port_unlink(br_offloads, dev, 1176 upper->ifindex, extack); 1177 } 1178 1179 static int 1180 ice_eswitch_br_port_event(struct notifier_block *nb, 1181 unsigned long event, void *ptr) 1182 { 1183 int err = 0; 1184 1185 switch (event) { 1186 case NETDEV_CHANGEUPPER: 1187 err = ice_eswitch_br_port_changeupper(nb, ptr); 1188 break; 1189 } 1190 1191 return notifier_from_errno(err); 1192 } 1193 1194 static void 1195 ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) 1196 { 1197 struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads; 1198 1199 ASSERT_RTNL(); 1200 1201 if (!br_offloads) 1202 return; 1203 1204 ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); 1205 1206 pf->eswitch.br_offloads = NULL; 1207 kfree(br_offloads); 1208 } 1209 1210 static struct ice_esw_br_offloads * 1211 ice_eswitch_br_offloads_alloc(struct ice_pf *pf) 1212 { 1213 struct ice_esw_br_offloads *br_offloads; 1214 1215 ASSERT_RTNL(); 1216 1217 if (pf->eswitch.br_offloads) 1218 return ERR_PTR(-EEXIST); 1219 1220 br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); 1221 if (!br_offloads) 1222 return ERR_PTR(-ENOMEM); 1223 1224 pf->eswitch.br_offloads = br_offloads; 1225 br_offloads->pf = pf; 1226 1227 return br_offloads; 1228 } 1229 1230 void 1231 ice_eswitch_br_offloads_deinit(struct ice_pf *pf) 1232 { 1233 struct ice_esw_br_offloads *br_offloads; 1234 1235 br_offloads = pf->eswitch.br_offloads; 1236 if (!br_offloads) 1237 return; 1238 1239 cancel_delayed_work_sync(&br_offloads->update_work); 1240 unregister_netdevice_notifier(&br_offloads->netdev_nb); 1241 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1242 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1243 destroy_workqueue(br_offloads->wq); 1244 /* Although notifier block is unregistered just before, 1245 * so we don't get any new events, some events might be 1246 * already in progress. Hold the rtnl lock and wait for 1247 * them to finished. 1248 */ 1249 rtnl_lock(); 1250 ice_eswitch_br_offloads_dealloc(pf); 1251 rtnl_unlock(); 1252 } 1253 1254 static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads) 1255 { 1256 struct ice_esw_br *bridge = br_offloads->bridge; 1257 struct ice_esw_br_fdb_entry *entry, *tmp; 1258 1259 if (!bridge) 1260 return; 1261 1262 rtnl_lock(); 1263 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { 1264 if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER) 1265 continue; 1266 1267 if (time_is_after_eq_jiffies(entry->last_use + 1268 bridge->ageing_time)) 1269 continue; 1270 1271 ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 1272 } 1273 rtnl_unlock(); 1274 } 1275 1276 static void ice_eswitch_br_update_work(struct work_struct *work) 1277 { 1278 struct ice_esw_br_offloads *br_offloads; 1279 1280 br_offloads = ice_work_to_br_offloads(work); 1281 1282 ice_eswitch_br_update(br_offloads); 1283 1284 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1285 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1286 } 1287 1288 int 1289 ice_eswitch_br_offloads_init(struct ice_pf *pf) 1290 { 1291 struct ice_esw_br_offloads *br_offloads; 1292 struct device *dev = ice_pf_to_dev(pf); 1293 int err; 1294 1295 rtnl_lock(); 1296 br_offloads = ice_eswitch_br_offloads_alloc(pf); 1297 rtnl_unlock(); 1298 if (IS_ERR(br_offloads)) { 1299 dev_err(dev, "Failed to init eswitch bridge\n"); 1300 return PTR_ERR(br_offloads); 1301 } 1302 1303 br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0); 1304 if (!br_offloads->wq) { 1305 err = -ENOMEM; 1306 dev_err(dev, "Failed to allocate bridge workqueue\n"); 1307 goto err_alloc_wq; 1308 } 1309 1310 br_offloads->switchdev_nb.notifier_call = 1311 ice_eswitch_br_switchdev_event; 1312 err = register_switchdev_notifier(&br_offloads->switchdev_nb); 1313 if (err) { 1314 dev_err(dev, 1315 "Failed to register switchdev notifier\n"); 1316 goto err_reg_switchdev_nb; 1317 } 1318 1319 br_offloads->switchdev_blk.notifier_call = 1320 ice_eswitch_br_event_blocking; 1321 err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1322 if (err) { 1323 dev_err(dev, 1324 "Failed to register bridge blocking switchdev notifier\n"); 1325 goto err_reg_switchdev_blk; 1326 } 1327 1328 br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event; 1329 err = register_netdevice_notifier(&br_offloads->netdev_nb); 1330 if (err) { 1331 dev_err(dev, 1332 "Failed to register bridge port event notifier\n"); 1333 goto err_reg_netdev_nb; 1334 } 1335 1336 INIT_DELAYED_WORK(&br_offloads->update_work, 1337 ice_eswitch_br_update_work); 1338 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1339 ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1340 1341 return 0; 1342 1343 err_reg_netdev_nb: 1344 unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1345 err_reg_switchdev_blk: 1346 unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1347 err_reg_switchdev_nb: 1348 destroy_workqueue(br_offloads->wq); 1349 err_alloc_wq: 1350 rtnl_lock(); 1351 ice_eswitch_br_offloads_dealloc(pf); 1352 rtnl_unlock(); 1353 1354 return err; 1355 } 1356