Lines Matching refs:vsi

68 static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx);
74 static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list,
85 static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info);
89 static void ice_vsi_set_rss_params(struct ice_vsi *vsi);
91 static int ice_set_rss_key(struct ice_vsi *vsi);
92 static int ice_set_rss_lut(struct ice_vsi *vsi);
93 static void ice_set_rss_flow_flds(struct ice_vsi *vsi);
94 static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi);
106 static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi,
118 static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi,
126 static int ice_add_ethertype_to_list(struct ice_vsi *vsi,
174 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt);
187 static void ice_remove_vsi_mirroring(struct ice_vsi *vsi);
363 * @vsi: the VSI to setup
372 ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi,
376 vsi->type = type;
377 vsi->sc = sc;
378 vsi->idx = idx;
379 sc->all_vsi[idx] = vsi;
380 vsi->dynamic = dynamic;
383 vsi->rule_mir_ingress = ICE_INVAL_MIRROR_RULE_ID;
384 vsi->rule_mir_egress = ICE_INVAL_MIRROR_RULE_ID;
387 ice_add_vsi_tunables(vsi, sc->vsi_sysctls);
400 struct ice_vsi *vsi;
413 vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_NOWAIT | M_ZERO);
414 if (!vsi) {
419 ice_setup_vsi_common(sc, vsi, type, idx, true);
421 return vsi;
440 * @vsi: VSI structure
452 ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
461 vsi->tx_qmap = malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK);
464 vsi->rx_qmap = malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK);
468 vsi->tx_qmap[i] = ICE_INVALID_RES_IDX;
471 vsi->rx_qmap[i] = ICE_INVALID_RES_IDX;
477 * @vsi: the VSI private structure
484 ice_free_vsi_qmaps(struct ice_vsi *vsi)
486 struct ice_softc *sc = vsi->sc;
488 if (vsi->tx_qmap) {
489 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap,
490 vsi->num_tx_queues);
491 free(vsi->tx_qmap, M_ICE);
492 vsi->tx_qmap = NULL;
495 if (vsi->rx_qmap) {
496 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap,
497 vsi->num_rx_queues);
498 free(vsi->rx_qmap, M_ICE);
499 vsi->rx_qmap = NULL;
580 * @vsi: the VSI to configure
586 * @pre vsi->qmap_type is set to a valid type
589 ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
594 MPASS(vsi->rx_qmap != NULL);
596 switch (vsi->qmap_type) {
600 ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]);
601 ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues);
607 for (int i = 0; i < vsi->num_rx_queues; i++)
608 ctx->info.q_mapping[i] = CPU_TO_LE16(vsi->rx_qmap[i]);
615 if (vsi->num_rx_queues)
616 pow = flsl(vsi->num_rx_queues - 1);
623 vsi->tc_info[0].qoffset = 0;
624 vsi->tc_info[0].qcount_rx = vsi->num_rx_queues;
625 vsi->tc_info[0].qcount_tx = vsi->num_tx_queues;
627 vsi->tc_info[i].qoffset = 0;
628 vsi->tc_info[i].qcount_rx = 1;
629 vsi->tc_info[i].qcount_tx = 1;
631 vsi->tc_map = 0x1;
638 * @vsi: VSI to setup
640 * @pre vsi->mirror_src_vsi is set to the SW VSI num that traffic is to be
646 ice_setup_vsi_mirroring(struct ice_vsi *vsi)
649 struct ice_softc *sc = vsi->sc;
656 rule.vsi_idx = ice_get_hw_vsi_num(hw, vsi->mirror_src_vsi);
659 dest_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
666 "Could not add INGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n",
672 vsi->rule_mir_ingress = rule_id;
679 "Could not add EGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n",
685 vsi->rule_mir_egress = rule_id;
692 * @vsi: VSI to remove mirror rules from
695 ice_remove_vsi_mirroring(struct ice_vsi *vsi)
697 struct ice_hw *hw = &vsi->sc->hw;
701 if (vsi->rule_mir_ingress != ICE_INVAL_MIRROR_RULE_ID)
702 status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_ingress, keep_alloc, NULL);
705 device_printf(vsi->sc->dev, "Could not remove mirror VSI ingress rule, err %s aq_err %s\n",
710 if (vsi->rule_mir_egress != ICE_INVAL_MIRROR_RULE_ID)
711 status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_egress, keep_alloc, NULL);
714 device_printf(vsi->sc->dev, "Could not remove mirror VSI egress rule, err %s aq_err %s\n",
720 * @vsi: the vsi to initialize
724 * @pre vsi->num_tx_queues is set
725 * @pre vsi->num_rx_queues is set
728 ice_initialize_vsi(struct ice_vsi *vsi)
731 struct ice_hw *hw = &vsi->sc->hw;
737 switch (vsi->type) {
749 ice_set_rss_vsi_ctx(&ctx, vsi->type);
755 ice_vsi_set_rss_params(vsi);
758 err = ice_setup_vsi_qmap(vsi, &ctx);
764 status = ice_add_vsi(hw, vsi->idx, &ctx, NULL);
766 device_printf(vsi->sc->dev,
772 vsi->info = ctx.info;
775 max_txqs[0] = vsi->num_tx_queues;
777 status = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
780 device_printf(vsi->sc->dev,
784 ice_deinit_vsi(vsi);
789 ice_reset_vsi_stats(vsi);
796 * @vsi: the VSI to release
802 ice_deinit_vsi(struct ice_vsi *vsi)
805 struct ice_softc *sc = vsi->sc;
810 MPASS(vsi == sc->all_vsi[vsi->idx]);
812 ctx.info = vsi->info;
814 status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx);
822 vsi->idx, ice_status_str(status));
826 status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL);
830 vsi->idx, ice_status_str(status),
837 * @vsi: the VSI to release
844 ice_release_vsi(struct ice_vsi *vsi)
846 struct ice_softc *sc = vsi->sc;
847 int idx = vsi->idx;
850 MPASS(vsi == sc->all_vsi[idx]);
854 ice_clean_vsi_rss_cfg(vsi);
856 ice_del_vsi_sysctl_ctx(vsi);
859 ice_remove_vsi_mirroring(vsi);
866 ice_deinit_vsi(vsi);
868 ice_free_vsi_qmaps(vsi);
870 if (vsi->dynamic) {
1390 * @vsi: the VSI to configure
1395 ice_configure_all_rxq_interrupts(struct ice_vsi *vsi)
1397 struct ice_hw *hw = &vsi->sc->hw;
1400 for (i = 0; i < vsi->num_rx_queues; i++) {
1401 struct ice_rx_queue *rxq = &vsi->rx_queues[i];
1403 ice_configure_rxq_interrupt(hw, vsi->rx_qmap[rxq->me],
1408 i, rxq->me, vsi->rx_qmap[rxq->me], rxq->irqv->me);
1438 * @vsi: the VSI to configure
1443 ice_configure_all_txq_interrupts(struct ice_vsi *vsi)
1445 struct ice_hw *hw = &vsi->sc->hw;
1448 for (i = 0; i < vsi->num_tx_queues; i++) {
1449 struct ice_tx_queue *txq = &vsi->tx_queues[i];
1451 ice_configure_txq_interrupt(hw, vsi->tx_qmap[txq->me],
1460 * @vsi: the VSI to configure
1470 ice_flush_rxq_interrupts(struct ice_vsi *vsi)
1472 struct ice_hw *hw = &vsi->sc->hw;
1475 for (i = 0; i < vsi->num_rx_queues; i++) {
1476 struct ice_rx_queue *rxq = &vsi->rx_queues[i];
1480 reg = vsi->rx_qmap[rxq->me];
1497 * @vsi: the VSI to configure
1507 ice_flush_txq_interrupts(struct ice_vsi *vsi)
1509 struct ice_hw *hw = &vsi->sc->hw;
1512 for (i = 0; i < vsi->num_tx_queues; i++) {
1513 struct ice_tx_queue *txq = &vsi->tx_queues[i];
1517 reg = vsi->tx_qmap[txq->me];
1534 * @vsi: the VSI to configure
1539 ice_configure_rx_itr(struct ice_vsi *vsi)
1541 struct ice_hw *hw = &vsi->sc->hw;
1546 for (i = 0; i < vsi->num_rx_queues; i++) {
1547 struct ice_rx_queue *rxq = &vsi->rx_queues[i];
1550 ice_itr_to_reg(hw, vsi->rx_itr));
1558 * @vsi: the VSI to configure
1563 ice_configure_tx_itr(struct ice_vsi *vsi)
1565 struct ice_hw *hw = &vsi->sc->hw;
1570 for (i = 0; i < vsi->num_tx_queues; i++) {
1571 struct ice_tx_queue *txq = &vsi->tx_queues[i];
1574 ice_itr_to_reg(hw, vsi->tx_itr));
1589 struct ice_vsi *vsi = txq->vsi;
1590 struct ice_softc *sc = vsi->sc;
1603 switch (vsi->type) {
1614 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
1636 * @vsi: the VSI to configure
1642 ice_cfg_vsi_for_tx(struct ice_vsi *vsi)
1645 struct ice_hw *hw = &vsi->sc->hw;
1646 device_t dev = vsi->sc->dev;
1659 for (i = 0; i < vsi->num_tx_queues; i++) {
1661 struct ice_tx_queue *txq = &vsi->tx_queues[i];
1663 pf_q = vsi->tx_qmap[txq->me];
1673 status = ice_ena_vsi_txq(hw->port_info, vsi->idx, txq->tc,
1709 struct ice_vsi *vsi = rxq->vsi;
1710 struct ice_softc *sc = vsi->sc;
1717 pf_q = vsi->rx_qmap[rxq->me];
1724 rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S;
1744 rlan_ctx.rxmax = min(vsi->max_frame_size,
1745 ICE_MAX_RX_SEGS * vsi->mbuf_sz);
1749 if (vsi->type != ICE_VSI_VF) {
1777 * @vsi: the VSI to configure
1785 ice_cfg_vsi_for_rx(struct ice_vsi *vsi)
1789 for (i = 0; i < vsi->num_rx_queues; i++) {
1790 MPASS(vsi->mbuf_sz > 0);
1791 err = ice_setup_rx_ctx(&vsi->rx_queues[i]);
1841 * @vsi: VSI containing queue to enable/disable
1850 ice_control_rx_queue(struct ice_vsi *vsi, u16 qidx, bool enable)
1852 struct ice_hw *hw = &vsi->sc->hw;
1853 device_t dev = vsi->sc->dev;
1857 struct ice_rx_queue *rxq = &vsi->rx_queues[qidx];
1858 int pf_q = vsi->rx_qmap[rxq->me];
1900 * @vsi: VSI to enable/disable queues
1908 ice_control_all_rx_queues(struct ice_vsi *vsi, bool enable)
1916 for (i = 0; i < vsi->num_rx_queues; i++) {
1917 err = ice_control_rx_queue(vsi, i, enable);
1927 * @vsi: the VSI to forward to
1939 ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list,
1952 entry->fltr_info.vsi_handle = vsi->idx;
1979 * @vsi: the VSI to add the filter for
1989 ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
1992 struct ice_hw *hw = &vsi->sc->hw;
1993 device_t dev = vsi->sc->dev;
1999 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI);
2029 struct ice_vsi *vsi = &sc->pf_vsi;
2034 err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr);
2039 err = ice_add_vsi_mac_filter(vsi, broadcastaddr);
2048 * @vsi: the VSI to add the filter for
2059 ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
2062 struct ice_hw *hw = &vsi->sc->hw;
2063 device_t dev = vsi->sc->dev;
2069 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI);
2099 struct ice_vsi *vsi = &sc->pf_vsi;
2104 err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr);
2109 err = ice_remove_vsi_mac_filter(vsi, broadcastaddr);
2553 * @vsi: the VSI to be updated
2559 ice_update_vsi_hw_stats(struct ice_vsi *vsi)
2562 struct ice_hw *hw = &vsi->sc->hw;
2565 if (!ice_is_vsi_valid(hw, vsi->idx))
2568 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */
2569 prev_es = &vsi->hw_stats.prev;
2570 cur_es = &vsi->hw_stats.cur;
2574 vsi->hw_stats.offsets_loaded, \
2579 vsi->hw_stats.offsets_loaded, \
2593 ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded,
2595 ice_update_port_oversize(vsi->sc, cur_es->rx_errors);
2599 vsi->hw_stats.offsets_loaded = true;
2604 * @vsi: VSI structure
2611 ice_reset_vsi_stats(struct ice_vsi *vsi)
2614 memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev));
2615 memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur));
2616 vsi->hw_stats.offsets_loaded = false;
4839 struct ice_vsi *vsi = (struct ice_vsi *)arg1;
4844 if (ice_driver_is_detaching(vsi->sc))
4852 for (i = 0; i < vsi->num_tx_queues; i++)
4853 stat += vsi->tx_queues[i].stats.cso[type];
4870 struct ice_vsi *vsi = (struct ice_vsi *)arg1;
4875 if (ice_driver_is_detaching(vsi->sc))
4883 for (i = 0; i < vsi->num_rx_queues; i++)
4884 stat += vsi->rx_queues[i].stats.cso[type];
4902 struct ice_vsi *vsi = (struct ice_vsi *)arg1;
4903 struct ice_hw_port_stats *hs = &vsi->sc->stats.cur;
4909 if (ice_driver_is_detaching(vsi->sc))
4920 for (i = 0; i < vsi->num_rx_queues; i++)
4924 stat += vsi->rx_queues[i].stats.cso[type];
4957 * @vsi: pointer to the VSI to add sysctls for
4967 ice_add_sysctls_sw_stats(struct ice_vsi *vsi,
5011 vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU",
5020 vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU",
5028 * @vsi: pointer to VSI structure
5033 ice_add_vsi_sysctls(struct ice_vsi *vsi)
5035 struct sysctl_ctx_list *ctx = &vsi->ctx;
5039 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
5047 ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur);
5050 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards,
5055 vsi, 0, ice_sysctl_rx_errors_stat, "QU",
5059 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc,
5063 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors,
5070 ice_add_sysctls_sw_stats(vsi, ctx, sw_node);
5279 * @vsi: vsi structure addresses are targeted towards
5286 ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info)
5297 (info->vsi_handle == vsi->idx) &&
5498 * @vsi: The VSI to add the filter for
5505 ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
5507 struct ice_hw *hw = &vsi->sc->hw;
5526 vlan_entries[i].fltr_info.vsi_handle = vsi->idx;
5536 device_printf(vsi->sc->dev, "Failed to add VLAN filters:\n");
5538 device_printf(vsi->sc->dev,
5550 * @vsi: The VSI to add the filter for
5556 ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
5558 return ice_add_vlan_hw_filters(vsi, &vid, 1);
5563 * @vsi: The VSI to remove the filters from
5570 ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
5572 struct ice_hw *hw = &vsi->sc->hw;
5591 vlan_entries[i].fltr_info.vsi_handle = vsi->idx;
5601 device_printf(vsi->sc->dev, "Failed to remove VLAN filters:\n");
5603 device_printf(vsi->sc->dev,
5615 * @vsi: The VSI to remove the filter from
5621 ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
5623 return ice_remove_vlan_hw_filters(vsi, &vid, 1);
5644 struct ice_vsi *vsi = (struct ice_vsi *)arg1;
5645 struct ice_softc *sc = vsi->sc;
5653 ret = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req);
5657 if (vsi->rx_itr < 0)
5658 vsi->rx_itr = ICE_DFLT_RX_ITR;
5659 if (vsi->rx_itr > ICE_ITR_MAX)
5660 vsi->rx_itr = ICE_ITR_MAX;
5666 vsi->rx_itr = (vsi->rx_itr / increment ) * increment;
5673 ice_configure_rx_itr(vsi);
5696 struct ice_vsi *vsi = (struct ice_vsi *)arg1;
5697 struct ice_softc *sc = vsi->sc;
5705 ret = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req);
5710 if (vsi->tx_itr < 0)
5711 vsi->tx_itr = ICE_DFLT_TX_ITR;
5712 if (vsi->tx_itr > ICE_ITR_MAX)
5713 vsi->tx_itr = ICE_ITR_MAX;
5719 vsi->tx_itr = (vsi->tx_itr / increment ) * increment;
5726 ice_configure_tx_itr(vsi);
5733 * @vsi: pointer to VSI structure
5748 ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent)
5756 sysctl_ctx_init(&vsi->ctx);
5759 snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx);
5760 snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx);
5761 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name,
5763 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
5765 vsi->rx_itr = ICE_DFLT_TX_ITR;
5766 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr",
5768 vsi, 0, ice_sysctl_rx_itr, "S",
5771 vsi->tx_itr = ICE_DFLT_TX_ITR;
5772 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr",
5774 vsi, 0, ice_sysctl_tx_itr, "S",
5780 * @vsi: the VSI to remove contexts for
5786 ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi)
5788 device_t dev = vsi->sc->dev;
5791 if (vsi->vsi_node) {
5792 err = sysctl_ctx_free(&vsi->ctx);
5795 vsi->idx, ice_err_str(err));
5796 vsi->vsi_node = NULL;
5887 sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi",
7007 * @vsi: the VSI to disable
7013 ice_vsi_disable_tx(struct ice_vsi *vsi)
7015 struct ice_softc *sc = vsi->sc;
7023 if (vsi->num_tx_queues > 255)
7026 q_teids_size = sizeof(*q_teids) * vsi->num_tx_queues;
7031 q_ids_size = sizeof(*q_ids) * vsi->num_tx_queues;
7038 q_handles_size = sizeof(*q_handles) * vsi->num_tx_queues;
7046 struct ice_tc_info *tc_info = &vsi->tc_info[tc];
7051 if (!(vsi->tc_map & BIT(tc)))
7059 struct ice_tx_queue *txq = &vsi->tx_queues[j];
7061 q_ids[buf_idx] = vsi->tx_qmap[j];
7067 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, tc, buf_idx,
7101 * @vsi: the VSI to configure
7107 ice_vsi_set_rss_params(struct ice_vsi *vsi)
7109 struct ice_softc *sc = vsi->sc;
7114 switch (vsi->type) {
7117 vsi->rss_table_size = cap->rss_table_size;
7118 vsi->rss_lut_type = ICE_LUT_PF;
7122 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
7123 vsi->rss_lut_type = ICE_LUT_VSI;
7128 vsi->idx, vsi->type);
7135 * @vsi: The VSI to add the context for
7143 ice_vsi_add_txqs_ctx(struct ice_vsi *vsi)
7147 sysctl_ctx_init(&vsi->txqs_ctx);
7149 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
7151 vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs",
7157 * @vsi: The VSI to add the context for
7165 ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi)
7169 sysctl_ctx_init(&vsi->rxqs_ctx);
7171 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
7173 vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs",
7179 * @vsi: The VSI to delete from
7186 ice_vsi_del_txqs_ctx(struct ice_vsi *vsi)
7188 device_t dev = vsi->sc->dev;
7191 if (vsi->txqs_node) {
7192 err = sysctl_ctx_free(&vsi->txqs_ctx);
7195 vsi->idx, ice_err_str(err));
7196 vsi->txqs_node = NULL;
7202 * @vsi: The VSI to delete from
7209 ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi)
7211 device_t dev = vsi->sc->dev;
7214 if (vsi->rxqs_node) {
7215 err = sysctl_ctx_free(&vsi->rxqs_ctx);
7218 vsi->idx, ice_err_str(err));
7219 vsi->rxqs_node = NULL;
7233 struct ice_vsi *vsi = txq->vsi;
7234 struct sysctl_ctx_list *ctx = &vsi->txqs_ctx;
7249 txqs_list = SYSCTL_CHILDREN(vsi->txqs_node);
7280 struct ice_vsi *vsi = rxq->vsi;
7281 struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx;
7295 rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node);
7343 * @vsi: the VSI to configure
7350 ice_set_rss_key(struct ice_vsi *vsi)
7353 struct ice_softc *sc = vsi->sc;
7363 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata);
7376 * @vsi: the VSI to configure
7384 ice_set_rss_flow_flds(struct ice_vsi *vsi)
7386 struct ice_softc *sc = vsi->sc;
7398 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
7402 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
7407 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
7411 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
7416 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
7420 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
7425 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
7429 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
7434 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
7438 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
7443 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
7447 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
7455 vsi->idx);
7461 * @vsi: the VSI to configure
7470 ice_set_rss_lut(struct ice_vsi *vsi)
7472 struct ice_softc *sc = vsi->sc;
7480 lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO);
7490 for (i = 0; i < vsi->rss_table_size; i++) {
7493 lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues;
7496 lut_params.vsi_handle = vsi->idx;
7497 lut_params.lut_size = vsi->rss_table_size;
7498 lut_params.lut_type = vsi->rss_lut_type;
7515 * @vsi: the VSI to configure
7521 ice_config_rss(struct ice_vsi *vsi)
7526 if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS))
7529 err = ice_set_rss_key(vsi);
7533 ice_set_rss_flow_flds(vsi);
7535 return ice_set_rss_lut(vsi);
7765 * @vsi: the vsi to retrieve the value for
7772 ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
7774 struct ice_hw_port_stats *hs = &vsi->sc->stats.cur;
7775 struct ice_eth_stats *es = &vsi->hw_stats.cur;
7811 return if_get_counter_default(vsi->sc->ifp, counter);
7851 struct ice_vsi *vsi = sc->all_vsi[i];
7853 if (!vsi)
7856 status = ice_replay_vsi(hw, vsi->idx);
7859 vsi->idx, ice_status_str(status),
7872 * @vsi: pointer to the VSI structure
7883 ice_clean_vsi_rss_cfg(struct ice_vsi *vsi)
7885 struct ice_softc *sc = vsi->sc;
7890 status = ice_rem_vsi_rss_cfg(hw, vsi->idx);
7894 vsi->idx, ice_status_str(status));
7897 ice_rem_vsi_rss_list(hw, vsi->idx);
7920 struct ice_vsi *vsi = sc->all_vsi[i];
7922 if (vsi)
7923 ice_clean_vsi_rss_cfg(vsi);
8755 * @vsi: the VSI being configured
8759 ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
8766 if (vsi->num_tcs == 0) {
8768 vsi->num_tcs = 1;
8769 vsi->tc_map = 0x1;
8772 qcount_rx = vsi->num_rx_queues;
8773 num_q_per_tc = min(qcount_rx / vsi->num_tcs, ICE_MAX_RXQS_PER_TC);
8780 if (i < vsi->num_tcs)
8784 rem_queues = qcount_rx % vsi->num_tcs;
8800 if (!(vsi->tc_map & BIT(i))) {
8802 vsi->tc_info[i].qoffset = 0;
8803 vsi->tc_info[i].qcount_rx = 1;
8804 vsi->tc_info[i].qcount_tx = 1;
8811 vsi->tc_info[i].qoffset = offset;
8812 vsi->tc_info[i].qcount_rx = qcounts[i];
8813 vsi->tc_info[i].qcount_tx = qcounts[i];
8826 vsi->tx_queues[j].q_handle = k;
8827 vsi->tx_queues[j].tc = i;
8829 vsi->rx_queues[j].tc = i;
8837 ctxt->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]);
8838 ctxt->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues);
8856 struct ice_vsi *vsi = &sc->pf_vsi;
8869 vsi->tc_map = tc_map;
8870 vsi->num_tcs = num_tcs;
8874 ctx.info = vsi->info;
8877 ice_vsi_setup_q_map(vsi, &ctx);
8881 status = ice_update_vsi(hw, vsi->idx, &ctx, NULL);
8889 vsi->info = ctx.info;
8893 max_txqs[i] = vsi->tc_info[i].qcount_tx;
8903 status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, vsi->tc_map,
8913 vsi->info.valid_sections = 0;
9248 * @vsi: the VSI to target packets to
9260 ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list,
9276 entry->fltr_info.vsi_handle = vsi->idx;
9299 struct ice_vsi *vsi = &sc->pf_vsi;
9315 err = ice_add_ethertype_to_list(vsi, &ethertype_list,
9324 err = ice_add_ethertype_to_list(vsi, &ethertype_list,
9357 struct ice_vsi *vsi = &sc->pf_vsi;
9369 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
9385 err = ice_add_ethertype_to_list(vsi, &ethertype_list,
9425 struct ice_vsi *vsi = &sc->pf_vsi;
9445 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
9459 err = ice_add_ethertype_to_list(vsi, &ethertype_list,