1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_eswitch.h" 7 #include "ice_fltr.h" 8 #include "ice_repr.h" 9 #include "ice_devlink.h" 10 #include "ice_tc_lib.h" 11 12 /** 13 * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC 14 * @pf: pointer to PF struct 15 * @vf: pointer to VF struct 16 * @mac: VF's MAC address 17 * 18 * This function adds advanced rule that forwards packets with 19 * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue. 20 */ 21 int 22 ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) 23 { 24 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 25 struct ice_adv_rule_info rule_info = { 0 }; 26 struct ice_adv_lkup_elem *list; 27 struct ice_hw *hw = &pf->hw; 28 const u16 lkups_cnt = 1; 29 int err; 30 31 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 32 if (!list) 33 return -ENOMEM; 34 35 list[0].type = ICE_MAC_OFOS; 36 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); 37 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); 38 39 rule_info.sw_act.flag |= ICE_FLTR_TX; 40 rule_info.sw_act.vsi_handle = ctrl_vsi->idx; 41 rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; 42 rule_info.rx = false; 43 rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + 44 ctrl_vsi->rxq_map[vf->vf_id]; 45 rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; 46 rule_info.flags_info.act_valid = true; 47 rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; 48 49 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, 50 vf->repr->mac_rule); 51 if (err) 52 dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d", 53 vf->vf_id); 54 else 55 vf->repr->rule_added = true; 56 57 kfree(list); 58 return err; 59 } 60 61 /** 62 * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC 63 * @vf: pointer to vF struct 64 * 65 * This function replays VF's MAC rule after reset. 66 */ 67 void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) 68 { 69 int err; 70 71 if (!ice_is_switchdev_running(vf->pf)) 72 return; 73 74 if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { 75 err = ice_eswitch_add_vf_mac_rule(vf->pf, vf, 76 vf->hw_lan_addr.addr); 77 if (err) { 78 dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n", 79 vf->hw_lan_addr.addr, vf->vf_id, err); 80 return; 81 } 82 vf->num_mac++; 83 84 ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); 85 } 86 } 87 88 /** 89 * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC 90 * @vf: pointer to the VF struct 91 * 92 * Delete the advanced rule that was used to forward packets with the VF's MAC 93 * address (src MAC) to the corresponding switchdev ctrl VSI queue. 94 */ 95 void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) 96 { 97 if (!ice_is_switchdev_running(vf->pf)) 98 return; 99 100 if (!vf->repr->rule_added) 101 return; 102 103 ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule); 104 vf->repr->rule_added = false; 105 } 106 107 /** 108 * ice_eswitch_setup_env - configure switchdev HW filters 109 * @pf: pointer to PF struct 110 * 111 * This function adds HW filters configuration specific for switchdev 112 * mode. 113 */ 114 static int ice_eswitch_setup_env(struct ice_pf *pf) 115 { 116 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; 117 struct net_device *uplink_netdev = uplink_vsi->netdev; 118 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 119 struct ice_vsi_vlan_ops *vlan_ops; 120 bool rule_added = false; 121 122 vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi); 123 if (vlan_ops->dis_stripping(ctrl_vsi)) 124 return -ENODEV; 125 126 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); 127 128 netif_addr_lock_bh(uplink_netdev); 129 __dev_uc_unsync(uplink_netdev, NULL); 130 __dev_mc_unsync(uplink_netdev, NULL); 131 netif_addr_unlock_bh(uplink_netdev); 132 133 if (ice_vsi_add_vlan_zero(uplink_vsi)) 134 goto err_def_rx; 135 136 if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) { 137 if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi)) 138 goto err_def_rx; 139 rule_added = true; 140 } 141 142 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) 143 goto err_override_uplink; 144 145 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) 146 goto err_override_control; 147 148 return 0; 149 150 err_override_control: 151 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 152 err_override_uplink: 153 if (rule_added) 154 ice_clear_dflt_vsi(uplink_vsi->vsw); 155 err_def_rx: 156 ice_fltr_add_mac_and_broadcast(uplink_vsi, 157 uplink_vsi->port_info->mac.perm_addr, 158 ICE_FWD_TO_VSI); 159 return -ENODEV; 160 } 161 162 /** 163 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI 164 * @pf: pointer to PF struct 165 * 166 * In switchdev number of allocated Tx/Rx rings is equal. 167 * 168 * This function fills q_vectors structures associated with representor and 169 * move each ring pairs to port representor netdevs. Each port representor 170 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to 171 * number of VFs. 172 */ 173 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) 174 { 175 struct ice_vsi *vsi = pf->switchdev.control_vsi; 176 int q_id; 177 178 ice_for_each_txq(vsi, q_id) { 179 struct ice_repr *repr = pf->vf[q_id].repr; 180 struct ice_q_vector *q_vector = repr->q_vector; 181 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; 182 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; 183 184 q_vector->vsi = vsi; 185 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; 186 187 q_vector->num_ring_tx = 1; 188 q_vector->tx.tx_ring = tx_ring; 189 tx_ring->q_vector = q_vector; 190 tx_ring->next = NULL; 191 tx_ring->netdev = repr->netdev; 192 /* In switchdev mode, from OS stack perspective, there is only 193 * one queue for given netdev, so it needs to be indexed as 0. 194 */ 195 tx_ring->q_index = 0; 196 197 q_vector->num_ring_rx = 1; 198 q_vector->rx.rx_ring = rx_ring; 199 rx_ring->q_vector = q_vector; 200 rx_ring->next = NULL; 201 rx_ring->netdev = repr->netdev; 202 } 203 } 204 205 /** 206 * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode 207 * @pf: pointer to PF struct 208 */ 209 static int ice_eswitch_setup_reprs(struct ice_pf *pf) 210 { 211 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 212 int max_vsi_num = 0; 213 int i; 214 215 ice_for_each_vf(pf, i) { 216 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; 217 struct ice_vf *vf = &pf->vf[i]; 218 219 ice_remove_vsi_fltr(&pf->hw, vsi->idx); 220 vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 221 GFP_KERNEL); 222 if (!vf->repr->dst) { 223 ice_fltr_add_mac_and_broadcast(vsi, 224 vf->hw_lan_addr.addr, 225 ICE_FWD_TO_VSI); 226 goto err; 227 } 228 229 if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { 230 ice_fltr_add_mac_and_broadcast(vsi, 231 vf->hw_lan_addr.addr, 232 ICE_FWD_TO_VSI); 233 metadata_dst_free(vf->repr->dst); 234 goto err; 235 } 236 237 if (ice_vsi_add_vlan_zero(vsi)) { 238 ice_fltr_add_mac_and_broadcast(vsi, 239 vf->hw_lan_addr.addr, 240 ICE_FWD_TO_VSI); 241 metadata_dst_free(vf->repr->dst); 242 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 243 goto err; 244 } 245 246 if (max_vsi_num < vsi->vsi_num) 247 max_vsi_num = vsi->vsi_num; 248 249 netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll, 250 NAPI_POLL_WEIGHT); 251 252 netif_keep_dst(vf->repr->netdev); 253 } 254 255 ice_for_each_vf(pf, i) { 256 struct ice_repr *repr = pf->vf[i].repr; 257 struct ice_vsi *vsi = repr->src_vsi; 258 struct metadata_dst *dst; 259 260 dst = repr->dst; 261 dst->u.port_info.port_id = vsi->vsi_num; 262 dst->u.port_info.lower_dev = repr->netdev; 263 ice_repr_set_traffic_vsi(repr, ctrl_vsi); 264 } 265 266 return 0; 267 268 err: 269 for (i = i - 1; i >= 0; i--) { 270 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; 271 struct ice_vf *vf = &pf->vf[i]; 272 273 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 274 metadata_dst_free(vf->repr->dst); 275 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, 276 ICE_FWD_TO_VSI); 277 } 278 279 return -ENODEV; 280 } 281 282 /** 283 * ice_eswitch_release_reprs - clear PR VSIs configuration 284 * @pf: poiner to PF struct 285 * @ctrl_vsi: pointer to switchdev control VSI 286 */ 287 static void 288 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) 289 { 290 int i; 291 292 ice_for_each_vf(pf, i) { 293 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; 294 struct ice_vf *vf = &pf->vf[i]; 295 296 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 297 metadata_dst_free(vf->repr->dst); 298 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, 299 ICE_FWD_TO_VSI); 300 301 netif_napi_del(&vf->repr->q_vector->napi); 302 } 303 } 304 305 /** 306 * ice_eswitch_update_repr - reconfigure VF port representor 307 * @vsi: VF VSI for which port representor is configured 308 */ 309 void ice_eswitch_update_repr(struct ice_vsi *vsi) 310 { 311 struct ice_pf *pf = vsi->back; 312 struct ice_repr *repr; 313 struct ice_vf *vf; 314 int ret; 315 316 if (!ice_is_switchdev_running(pf)) 317 return; 318 319 vf = &pf->vf[vsi->vf_id]; 320 repr = vf->repr; 321 repr->src_vsi = vsi; 322 repr->dst->u.port_info.port_id = vsi->vsi_num; 323 324 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); 325 if (ret) { 326 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); 327 dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); 328 } 329 } 330 331 /** 332 * ice_eswitch_port_start_xmit - callback for packets transmit 333 * @skb: send buffer 334 * @netdev: network interface device structure 335 * 336 * Returns NETDEV_TX_OK if sent, else an error code 337 */ 338 netdev_tx_t 339 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) 340 { 341 struct ice_netdev_priv *np; 342 struct ice_repr *repr; 343 struct ice_vsi *vsi; 344 345 np = netdev_priv(netdev); 346 vsi = np->vsi; 347 348 if (ice_is_reset_in_progress(vsi->back->state)) 349 return NETDEV_TX_BUSY; 350 351 repr = ice_netdev_to_repr(netdev); 352 skb_dst_drop(skb); 353 dst_hold((struct dst_entry *)repr->dst); 354 skb_dst_set(skb, (struct dst_entry *)repr->dst); 355 skb->queue_mapping = repr->vf->vf_id; 356 357 return ice_start_xmit(skb, netdev); 358 } 359 360 /** 361 * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor 362 * @skb: pointer to send buffer 363 * @off: pointer to offload struct 364 */ 365 void 366 ice_eswitch_set_target_vsi(struct sk_buff *skb, 367 struct ice_tx_offload_params *off) 368 { 369 struct metadata_dst *dst = skb_metadata_dst(skb); 370 u64 cd_cmd, dst_vsi; 371 372 if (!dst) { 373 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; 374 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); 375 } else { 376 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; 377 dst_vsi = ((u64)dst->u.port_info.port_id << 378 ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; 379 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; 380 } 381 } 382 383 /** 384 * ice_eswitch_release_env - clear switchdev HW filters 385 * @pf: pointer to PF struct 386 * 387 * This function removes HW filters configuration specific for switchdev 388 * mode and restores default legacy mode settings. 389 */ 390 static void ice_eswitch_release_env(struct ice_pf *pf) 391 { 392 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; 393 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 394 395 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); 396 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 397 ice_clear_dflt_vsi(uplink_vsi->vsw); 398 ice_fltr_add_mac_and_broadcast(uplink_vsi, 399 uplink_vsi->port_info->mac.perm_addr, 400 ICE_FWD_TO_VSI); 401 } 402 403 /** 404 * ice_eswitch_vsi_setup - configure switchdev control VSI 405 * @pf: pointer to PF structure 406 * @pi: pointer to port_info structure 407 */ 408 static struct ice_vsi * 409 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 410 { 411 return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL); 412 } 413 414 /** 415 * ice_eswitch_napi_del - remove NAPI handle for all port representors 416 * @pf: pointer to PF structure 417 */ 418 static void ice_eswitch_napi_del(struct ice_pf *pf) 419 { 420 int i; 421 422 ice_for_each_vf(pf, i) 423 netif_napi_del(&pf->vf[i].repr->q_vector->napi); 424 } 425 426 /** 427 * ice_eswitch_napi_enable - enable NAPI for all port representors 428 * @pf: pointer to PF structure 429 */ 430 static void ice_eswitch_napi_enable(struct ice_pf *pf) 431 { 432 int i; 433 434 ice_for_each_vf(pf, i) 435 napi_enable(&pf->vf[i].repr->q_vector->napi); 436 } 437 438 /** 439 * ice_eswitch_napi_disable - disable NAPI for all port representors 440 * @pf: pointer to PF structure 441 */ 442 static void ice_eswitch_napi_disable(struct ice_pf *pf) 443 { 444 int i; 445 446 ice_for_each_vf(pf, i) 447 napi_disable(&pf->vf[i].repr->q_vector->napi); 448 } 449 450 /** 451 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode 452 * @pf: pointer to PF structure 453 */ 454 static int ice_eswitch_enable_switchdev(struct ice_pf *pf) 455 { 456 struct ice_vsi *ctrl_vsi; 457 458 pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); 459 if (!pf->switchdev.control_vsi) 460 return -ENODEV; 461 462 ctrl_vsi = pf->switchdev.control_vsi; 463 pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); 464 if (!pf->switchdev.uplink_vsi) 465 goto err_vsi; 466 467 if (ice_eswitch_setup_env(pf)) 468 goto err_vsi; 469 470 if (ice_repr_add_for_all_vfs(pf)) 471 goto err_repr_add; 472 473 if (ice_eswitch_setup_reprs(pf)) 474 goto err_setup_reprs; 475 476 ice_eswitch_remap_rings_to_vectors(pf); 477 478 if (ice_vsi_open(ctrl_vsi)) 479 goto err_setup_reprs; 480 481 ice_eswitch_napi_enable(pf); 482 483 return 0; 484 485 err_setup_reprs: 486 ice_repr_rem_from_all_vfs(pf); 487 err_repr_add: 488 ice_eswitch_release_env(pf); 489 err_vsi: 490 ice_vsi_release(ctrl_vsi); 491 return -ENODEV; 492 } 493 494 /** 495 * ice_eswitch_disable_switchdev - disable switchdev resources 496 * @pf: pointer to PF structure 497 */ 498 static void ice_eswitch_disable_switchdev(struct ice_pf *pf) 499 { 500 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 501 502 ice_eswitch_napi_disable(pf); 503 ice_eswitch_release_env(pf); 504 ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx); 505 ice_eswitch_release_reprs(pf, ctrl_vsi); 506 ice_vsi_release(ctrl_vsi); 507 ice_repr_rem_from_all_vfs(pf); 508 } 509 510 /** 511 * ice_eswitch_mode_set - set new eswitch mode 512 * @devlink: pointer to devlink structure 513 * @mode: eswitch mode to switch to 514 * @extack: pointer to extack structure 515 */ 516 int 517 ice_eswitch_mode_set(struct devlink *devlink, u16 mode, 518 struct netlink_ext_ack *extack) 519 { 520 struct ice_pf *pf = devlink_priv(devlink); 521 522 if (pf->eswitch_mode == mode) 523 return 0; 524 525 if (pf->num_alloc_vfs) { 526 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); 527 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); 528 return -EOPNOTSUPP; 529 } 530 531 switch (mode) { 532 case DEVLINK_ESWITCH_MODE_LEGACY: 533 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", 534 pf->hw.pf_id); 535 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); 536 break; 537 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 538 { 539 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", 540 pf->hw.pf_id); 541 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); 542 break; 543 } 544 default: 545 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode"); 546 return -EINVAL; 547 } 548 549 pf->eswitch_mode = mode; 550 return 0; 551 } 552 553 /** 554 * ice_eswitch_mode_get - get current eswitch mode 555 * @devlink: pointer to devlink structure 556 * @mode: output parameter for current eswitch mode 557 */ 558 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) 559 { 560 struct ice_pf *pf = devlink_priv(devlink); 561 562 *mode = pf->eswitch_mode; 563 return 0; 564 } 565 566 /** 567 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev 568 * @pf: pointer to PF structure 569 * 570 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV, 571 * false otherwise. 572 */ 573 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) 574 { 575 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; 576 } 577 578 /** 579 * ice_eswitch_release - cleanup eswitch 580 * @pf: pointer to PF structure 581 */ 582 void ice_eswitch_release(struct ice_pf *pf) 583 { 584 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) 585 return; 586 587 ice_eswitch_disable_switchdev(pf); 588 pf->switchdev.is_running = false; 589 } 590 591 /** 592 * ice_eswitch_configure - configure eswitch 593 * @pf: pointer to PF structure 594 */ 595 int ice_eswitch_configure(struct ice_pf *pf) 596 { 597 int status; 598 599 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) 600 return 0; 601 602 status = ice_eswitch_enable_switchdev(pf); 603 if (status) 604 return status; 605 606 pf->switchdev.is_running = true; 607 return 0; 608 } 609 610 /** 611 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors 612 * @pf: pointer to PF structure 613 */ 614 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) 615 { 616 struct ice_repr *repr; 617 int i; 618 619 if (test_bit(ICE_DOWN, pf->state)) 620 return; 621 622 ice_for_each_vf(pf, i) { 623 repr = pf->vf[i].repr; 624 if (repr) 625 ice_repr_start_tx_queues(repr); 626 } 627 } 628 629 /** 630 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors 631 * @pf: pointer to PF structure 632 */ 633 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) 634 { 635 struct ice_repr *repr; 636 int i; 637 638 if (test_bit(ICE_DOWN, pf->state)) 639 return; 640 641 ice_for_each_vf(pf, i) { 642 repr = pf->vf[i].repr; 643 if (repr) 644 ice_repr_stop_tx_queues(repr); 645 } 646 } 647 648 /** 649 * ice_eswitch_rebuild - rebuild eswitch 650 * @pf: pointer to PF structure 651 */ 652 int ice_eswitch_rebuild(struct ice_pf *pf) 653 { 654 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 655 int status; 656 657 ice_eswitch_napi_disable(pf); 658 ice_eswitch_napi_del(pf); 659 660 status = ice_eswitch_setup_env(pf); 661 if (status) 662 return status; 663 664 status = ice_eswitch_setup_reprs(pf); 665 if (status) 666 return status; 667 668 ice_eswitch_remap_rings_to_vectors(pf); 669 670 ice_replay_tc_fltrs(pf); 671 672 status = ice_vsi_open(ctrl_vsi); 673 if (status) 674 return status; 675 676 ice_eswitch_napi_enable(pf); 677 ice_eswitch_start_all_tx_queues(pf); 678 679 return 0; 680 } 681