Lines Matching +full:tcs +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
14 * ice_vsi_type_str - maps VSI type enum to string equivalents
38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
55 ice_flush(&vsi->back->hw);
67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
75 struct ice_pf *pf = vsi->back;
79 if (vsi->type == ICE_VSI_CHNL)
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84 sizeof(*vsi->tx_rings), GFP_KERNEL);
85 if (!vsi->tx_rings)
86 return -ENOMEM;
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89 sizeof(*vsi->rx_rings), GFP_KERNEL);
90 if (!vsi->rx_rings)
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
100 sizeof(*vsi->txq_map), GFP_KERNEL);
102 if (!vsi->txq_map)
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
106 sizeof(*vsi->rxq_map), GFP_KERNEL);
107 if (!vsi->rxq_map)
111 if (vsi->type == ICE_VSI_LB)
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116 sizeof(*vsi->q_vectors), GFP_KERNEL);
117 if (!vsi->q_vectors)
123 devm_kfree(dev, vsi->rxq_map);
125 devm_kfree(dev, vsi->txq_map);
127 devm_kfree(dev, vsi->rx_rings);
129 devm_kfree(dev, vsi->tx_rings);
130 return -ENOMEM;
134 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
139 switch (vsi->type) {
145 * ethtool -G so we should keep those values instead of
148 if (!vsi->num_rx_desc)
149 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
150 if (!vsi->num_tx_desc)
151 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
155 vsi->type);
171 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
178 enum ice_vsi_type vsi_type = vsi->type;
179 struct ice_pf *pf = vsi->back;
180 struct ice_vf *vf = vsi->vf;
187 if (vsi->req_txq) {
188 vsi->alloc_txq = vsi->req_txq;
189 vsi->num_txq = vsi->req_txq;
191 vsi->alloc_txq = ice_get_txq_count(pf);
194 pf->num_lan_tx = vsi->alloc_txq;
197 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
198 vsi->alloc_rxq = 1;
200 if (vsi->req_rxq) {
201 vsi->alloc_rxq = vsi->req_rxq;
202 vsi->num_rxq = vsi->req_rxq;
204 vsi->alloc_rxq = ice_get_rxq_count(pf);
208 pf->num_lan_rx = vsi->alloc_rxq;
210 vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
213 vsi->alloc_txq = 1;
214 vsi->alloc_rxq = 1;
215 vsi->num_q_vectors = 1;
216 vsi->irq_dyn_alloc = true;
219 if (vf->num_req_qs)
220 vf->num_vf_qs = vf->num_req_qs;
221 vsi->alloc_txq = vf->num_vf_qs;
222 vsi->alloc_rxq = vf->num_vf_qs;
223 /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
224 * data queue interrupts). Since vsi->num_q_vectors is number
228 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
231 vsi->alloc_txq = 1;
232 vsi->alloc_rxq = 1;
233 vsi->num_q_vectors = 1;
236 vsi->alloc_txq = 0;
237 vsi->alloc_rxq = 0;
240 vsi->alloc_txq = 1;
241 vsi->alloc_rxq = 1;
252 * ice_get_free_slot - get the next non-NULL location index in array
265 if (curr < (size - 1) && !tmp_array[curr + 1]) {
281 * ice_vsi_delete_from_hw - delete a VSI from the switch
286 struct ice_pf *pf = vsi->back;
295 if (vsi->type == ICE_VSI_VF)
296 ctxt->vf_num = vsi->vf->vf_id;
297 ctxt->vsi_num = vsi->vsi_num;
299 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
301 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
303 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
304 vsi->vsi_num, status);
310 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
315 struct ice_pf *pf = vsi->back;
321 devm_kfree(dev, vsi->q_vectors);
322 vsi->q_vectors = NULL;
323 devm_kfree(dev, vsi->tx_rings);
324 vsi->tx_rings = NULL;
325 devm_kfree(dev, vsi->rx_rings);
326 vsi->rx_rings = NULL;
327 devm_kfree(dev, vsi->txq_map);
328 vsi->txq_map = NULL;
329 devm_kfree(dev, vsi->rxq_map);
330 vsi->rxq_map = NULL;
334 * ice_vsi_free_stats - Free the ring statistics structures
340 struct ice_pf *pf = vsi->back;
343 if (vsi->type == ICE_VSI_CHNL)
345 if (!pf->vsi_stats)
348 vsi_stat = pf->vsi_stats[vsi->idx];
353 if (vsi_stat->tx_ring_stats[i]) {
354 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
355 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
360 if (vsi_stat->rx_ring_stats[i]) {
361 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
362 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
366 kfree(vsi_stat->tx_ring_stats);
367 kfree(vsi_stat->rx_ring_stats);
369 pf->vsi_stats[vsi->idx] = NULL;
373 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
381 struct ice_pf *pf = vsi->back;
384 vsi_stats = pf->vsi_stats[vsi->idx];
385 tx_ring_stats = vsi_stats->tx_ring_stats;
386 rx_ring_stats = vsi_stats->rx_ring_stats;
393 ring = vsi->tx_rings[i];
404 ring->ring_stats = ring_stats;
412 ring = vsi->rx_rings[i];
423 ring->ring_stats = ring_stats;
430 return -ENOMEM;
434 * ice_vsi_free - clean up and deallocate the provided VSI
445 if (!vsi || !vsi->back)
448 pf = vsi->back;
451 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
452 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
456 mutex_lock(&pf->sw_mutex);
459 pf->vsi[vsi->idx] = NULL;
460 pf->next_vsi = vsi->idx;
464 mutex_destroy(&vsi->xdp_state_lock);
465 mutex_unlock(&pf->sw_mutex);
476 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
484 if (!q_vector->tx.tx_ring)
488 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
489 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
495 * ice_msix_clean_rings - MSIX mode Interrupt Handler
503 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
506 q_vector->total_events++;
508 napi_schedule(&q_vector->napi);
514 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
520 struct ice_pf *pf = vsi->back;
522 if (vsi->type == ICE_VSI_CHNL)
524 if (!pf->vsi_stats)
525 return -ENOENT;
527 if (pf->vsi_stats[vsi->idx])
533 return -ENOMEM;
535 vsi_stat->tx_ring_stats =
536 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
538 if (!vsi_stat->tx_ring_stats)
541 vsi_stat->rx_ring_stats =
542 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
544 if (!vsi_stat->rx_ring_stats)
547 pf->vsi_stats[vsi->idx] = vsi_stat;
552 kfree(vsi_stat->rx_ring_stats);
554 kfree(vsi_stat->tx_ring_stats);
556 pf->vsi_stats[vsi->idx] = NULL;
557 return -ENOMEM;
561 * ice_vsi_alloc_def - set default values for already allocated VSI
568 if (vsi->type != ICE_VSI_CHNL) {
571 return -ENOMEM;
574 vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
576 switch (vsi->type) {
580 vsi->irq_handler = ice_msix_clean_rings;
584 vsi->irq_handler = ice_msix_clean_ctrl_vsi;
588 return -EINVAL;
590 vsi->num_rxq = ch->num_rxq;
591 vsi->num_txq = ch->num_txq;
592 vsi->next_base_q = ch->base_q;
599 return -EINVAL;
606 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
621 mutex_lock(&pf->sw_mutex);
624 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
627 if (pf->next_vsi == ICE_NO_VSI) {
636 vsi->back = pf;
637 set_bit(ICE_VSI_DOWN, vsi->state);
640 vsi->idx = pf->next_vsi;
641 pf->vsi[pf->next_vsi] = vsi;
643 /* prepare pf->next_vsi for next use */
644 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
645 pf->next_vsi);
647 mutex_init(&vsi->xdp_state_lock);
650 mutex_unlock(&pf->sw_mutex);
655 * ice_alloc_fd_res - Allocate FD resource for a VSI
660 * Returns 0 on success, -EPERM on no-op or -EIO on failure
664 struct ice_pf *pf = vsi->back;
671 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
672 return -EPERM;
674 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
675 vsi->type == ICE_VSI_CHNL))
676 return -EPERM;
679 g_val = pf->hw.func_caps.fd_fltr_guar;
681 return -EPERM;
684 b_val = pf->hw.func_caps.fd_fltr_best_effort;
686 return -EPERM;
696 if (vsi->type == ICE_VSI_PF) {
697 vsi->num_gfltr = g_val;
701 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
703 return -EPERM;
705 vsi->num_gfltr = ICE_PF_VSI_GFLTR;
709 vsi->num_bfltr = b_val;
710 } else if (vsi->type == ICE_VSI_VF) {
711 vsi->num_gfltr = 0;
714 vsi->num_bfltr = b_val;
721 return -EPERM;
723 if (!main_vsi->all_numtc)
724 return -EINVAL;
727 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
733 return -EPERM;
735 g_val -= ICE_PF_VSI_GFLTR;
737 vsi->num_gfltr = g_val / numtc;
740 vsi->num_bfltr = b_val;
747 * ice_vsi_get_qs - Assign queues from PF to VSI
754 struct ice_pf *pf = vsi->back;
756 .qs_mutex = &pf->avail_q_mutex,
757 .pf_map = pf->avail_txqs,
758 .pf_map_size = pf->max_pf_txqs,
759 .q_count = vsi->alloc_txq,
761 .vsi_map = vsi->txq_map,
766 .qs_mutex = &pf->avail_q_mutex,
767 .pf_map = pf->avail_rxqs,
768 .pf_map_size = pf->max_pf_rxqs,
769 .q_count = vsi->alloc_rxq,
771 .vsi_map = vsi->rxq_map,
777 if (vsi->type == ICE_VSI_CHNL)
783 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
788 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
794 * ice_vsi_put_qs - Release queues from VSI to PF
799 struct ice_pf *pf = vsi->back;
802 mutex_lock(&pf->avail_q_mutex);
805 clear_bit(vsi->txq_map[i], pf->avail_txqs);
806 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
810 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
811 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
814 mutex_unlock(&pf->avail_q_mutex);
825 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
842 return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
846 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
854 struct ice_pf *pf = vsi->back;
860 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
863 vsi->vsi_num, status);
867 * ice_rss_clean - Delete RSS related VSI structures and configuration
872 struct ice_pf *pf = vsi->back;
877 devm_kfree(dev, vsi->rss_hkey_user);
878 devm_kfree(dev, vsi->rss_lut_user);
883 ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
887 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
893 struct ice_pf *pf = vsi->back;
896 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
897 vsi->rss_size = 1;
901 cap = &pf->hw.func_caps.common_cap;
902 max_rss_size = BIT(cap->rss_table_entry_width);
903 switch (vsi->type) {
907 vsi->rss_table_size = (u16)cap->rss_table_size;
908 if (vsi->type == ICE_VSI_CHNL)
909 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
911 vsi->rss_size = min_t(u16, num_online_cpus(),
913 vsi->rss_lut_type = ICE_LUT_PF;
916 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
917 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
918 vsi->rss_lut_type = ICE_LUT_VSI;
924 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
925 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
926 vsi->rss_lut_type = ICE_LUT_VSI;
932 ice_vsi_type_str(vsi->type));
938 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
948 memset(&ctxt->info, 0, sizeof(ctxt->info));
950 ctxt->alloc_from_pool = true;
952 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
954 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
956 ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
958 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
961 * DVM - leave inner VLAN in packet by default
964 ctxt->info.inner_vlan_flags |=
967 ctxt->info.outer_vlan_flags =
970 ctxt->info.outer_vlan_flags |=
973 ctxt->info.outer_vlan_flags |=
986 ctxt->info.ingress_table = cpu_to_le32(table);
987 ctxt->info.egress_table = cpu_to_le32(table);
989 ctxt->info.outer_up_table = cpu_to_le32(table);
994 * ice_vsi_setup_q_map - Setup a VSI queue map
1000 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
1002 u16 qcount_tx = vsi->alloc_txq;
1003 u16 qcount_rx = vsi->alloc_rxq;
1007 if (!vsi->tc_cfg.numtc) {
1009 vsi->tc_cfg.numtc = 1;
1010 vsi->tc_cfg.ena_tc = 1;
1013 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
1016 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
1020 /* find the (rounded up) power-of-2 of qcount */
1024 * VSI for each traffic class and the offset of these queues.
1025 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
1026 * queues allocated to TC0. No:of queues is a power-of-2.
1028 * If TC is not enabled, the queue offset is set to 0, and allocate one
1032 * Setup number and offset of Rx queues for all TCs for the VSI
1035 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1037 vsi->tc_cfg.tc_info[i].qoffset = 0;
1038 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1039 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1040 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
1041 ctxt->info.tc_mapping[i] = 0;
1046 vsi->tc_cfg.tc_info[i].qoffset = offset;
1047 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
1048 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1049 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
1051 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1053 offset += num_rxq_per_tc;
1055 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1058 /* if offset is non-zero, means it is calculated correctly based on
1059 * enabled TCs for a given VSI otherwise qcount_rx will always
1060 * be correct and non-zero because it is based off - VSI's
1064 if (offset)
1065 rx_count = offset;
1069 if (rx_count > vsi->alloc_rxq) {
1070 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1071 rx_count, vsi->alloc_rxq);
1072 return -EINVAL;
1075 if (tx_count > vsi->alloc_txq) {
1076 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1077 tx_count, vsi->alloc_txq);
1078 return -EINVAL;
1081 vsi->num_txq = tx_count;
1082 vsi->num_rxq = rx_count;
1084 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1085 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1089 vsi->num_txq = vsi->num_rxq;
1093 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1098 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1099 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1105 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1114 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1115 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1119 ctxt->info.valid_sections |= cpu_to_le16(val);
1127 ctxt->info.fd_options = cpu_to_le16(val);
1129 ctxt->info.max_fd_fltr_dedicated =
1130 cpu_to_le16(vsi->num_gfltr);
1132 ctxt->info.max_fd_fltr_shared =
1133 cpu_to_le16(vsi->num_bfltr);
1138 ctxt->info.fd_def_q = cpu_to_le16(val);
1143 ctxt->info.fd_report_opt = cpu_to_le16(val);
1147 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1157 pf = vsi->back;
1160 switch (vsi->type) {
1173 ice_vsi_type_str(vsi->type));
1178 vsi->rss_hfunc = hash_type;
1180 ctxt->info.q_opt_rss =
1189 u8 offset = 0;
1192 qcount = vsi->num_rxq;
1195 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1198 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1199 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1200 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1201 ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
1205 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1212 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1216 * ice_vsi_init - Create and initialize a VSI
1228 struct ice_pf *pf = vsi->back;
1229 struct ice_hw *hw = &pf->hw;
1237 return -ENOMEM;
1239 switch (vsi->type) {
1243 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1247 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1250 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1251 /* VF number here is the absolute VF number (0-255) */
1252 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1255 ret = -ENODEV;
1262 if (vsi->type == ICE_VSI_CHNL) {
1267 ctxt->info.sw_flags2 |=
1270 ctxt->info.sw_flags2 &=
1275 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1278 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1279 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1282 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1283 vsi->type != ICE_VSI_CTRL) {
1289 ctxt->info.valid_sections |=
1293 ctxt->info.sw_id = vsi->port_info->sw_id;
1294 if (vsi->type == ICE_VSI_CHNL) {
1306 ctxt->info.valid_sections |=
1311 if (vsi->type == ICE_VSI_PF) {
1312 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1313 ctxt->info.valid_sections |=
1318 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1321 ret = -EIO;
1325 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1328 ret = -EIO;
1334 vsi->info = ctxt->info;
1337 vsi->vsi_num = ctxt->vsi_num;
1345 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1353 if (vsi->q_vectors) {
1355 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1358 q_vector->tx.tx_ring = NULL;
1359 q_vector->rx.rx_ring = NULL;
1364 if (vsi->tx_rings) {
1366 if (vsi->tx_rings[i]) {
1367 kfree_rcu(vsi->tx_rings[i], rcu);
1368 WRITE_ONCE(vsi->tx_rings[i], NULL);
1372 if (vsi->rx_rings) {
1374 if (vsi->rx_rings[i]) {
1375 kfree_rcu(vsi->rx_rings[i], rcu);
1376 WRITE_ONCE(vsi->rx_rings[i], NULL);
1383 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1388 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1389 struct ice_pf *pf = vsi->back;
1404 ring->q_index = i;
1405 ring->reg_idx = vsi->txq_map[i];
1406 ring->vsi = vsi;
1407 ring->tx_tstamps = &pf->ptp.port.tx;
1408 ring->dev = dev;
1409 ring->count = vsi->num_tx_desc;
1410 ring->txq_teid = ICE_INVAL_TEID;
1412 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
1414 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1415 WRITE_ONCE(vsi->tx_rings[i], ring);
1427 ring->q_index = i;
1428 ring->reg_idx = vsi->rxq_map[i];
1429 ring->vsi = vsi;
1430 ring->netdev = vsi->netdev;
1431 ring->dev = dev;
1432 ring->count = vsi->num_rx_desc;
1433 ring->cached_phctime = pf->ptp.cached_phc_time;
1436 ring->flags |= ICE_RX_FLAGS_RING_GCS;
1438 WRITE_ONCE(vsi->rx_rings[i], ring);
1445 return -ENOMEM;
1449 * ice_vsi_manage_rss_lut - disable/enable RSS
1461 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1466 if (vsi->rss_lut_user)
1467 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1469 ice_fill_rss_lut(lut, vsi->rss_table_size,
1470 vsi->rss_size);
1473 ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1478 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1488 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1490 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1494 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1499 struct ice_pf *pf = vsi->back;
1505 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1506 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
1507 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1509 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1513 * orig_rss_size so that when tc-qdisc is deleted, main VSI
1515 * to begin with (prior to setup-tc for ADQ config)
1517 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1518 vsi->orig_rss_size <= vsi->num_rxq) {
1519 vsi->rss_size = vsi->orig_rss_size;
1521 vsi->orig_rss_size = 0;
1525 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1527 return -ENOMEM;
1529 if (vsi->rss_lut_user)
1530 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1532 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1534 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1542 err = -ENOMEM;
1546 if (vsi->rss_hkey_user)
1547 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1562 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1571 struct ice_pf *pf = vsi->back;
1578 vsi->vsi_num);
1582 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
1585 vsi->vsi_num, status);
1599 /* configure RSS for sctp4 with input set IP src/dst - only support
1600 * RSS on SCTPv4 on outer headers (non-tunneled)
1629 /* configure RSS for sctp6 with input set IPv6 src/dst - only support
1630 * RSS on SCTPv6 on outer headers (non-tunneled)
1658 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1670 u16 vsi_num = vsi->vsi_num;
1671 struct ice_pf *pf = vsi->back;
1672 struct ice_hw *hw = &pf->hw;
1689 cfg->addl_hdrs, cfg->hash_flds,
1690 cfg->hdr_type, cfg->symm);
1695 * ice_pf_state_is_nominal - checks the PF for nominal state
1712 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1725 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1731 struct ice_hw *hw = &vsi->back->hw;
1732 struct ice_pf *pf = vsi->back;
1733 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1735 prev_es = &vsi->eth_stats_prev;
1736 cur_es = &vsi->eth_stats;
1738 if (ice_is_reset_in_progress(pf->state))
1739 vsi->stat_offsets_loaded = false;
1741 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1742 &prev_es->rx_bytes, &cur_es->rx_bytes);
1744 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1745 &prev_es->rx_unicast, &cur_es->rx_unicast);
1747 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1748 &prev_es->rx_multicast, &cur_es->rx_multicast);
1750 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1751 &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1753 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1754 &prev_es->rx_discards, &cur_es->rx_discards);
1756 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1757 &prev_es->tx_bytes, &cur_es->tx_bytes);
1759 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1760 &prev_es->tx_unicast, &cur_es->tx_unicast);
1762 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1763 &prev_es->tx_multicast, &cur_es->tx_multicast);
1765 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1766 &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1768 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1769 &prev_es->tx_errors, &cur_es->tx_errors);
1771 vsi->stat_offsets_loaded = true;
1775 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1803 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1820 * ice_write_intrl - write throttle rate limit to interrupt specific register
1826 struct ice_hw *hw = &q_vector->vsi->back->hw;
1828 wr32(hw, GLINT_RATE(q_vector->reg_idx),
1834 switch (rc->type) {
1836 if (rc->rx_ring)
1837 return rc->rx_ring->q_vector;
1840 if (rc->tx_ring)
1841 return rc->tx_ring->q_vector;
1851 * __ice_write_itr - write throttle rate to register
1859 struct ice_hw *hw = &q_vector->vsi->back->hw;
1861 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1866 * ice_write_itr - write throttle rate to queue specific register
1882 * ice_set_q_vector_intrl - set up interrupt rate limiting
1885 * Interrupt rate limiting is local to the vector, not per-queue so we must
1893 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
1902 ice_write_intrl(q_vector, q_vector->intrl);
1907 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1915 struct ice_pf *pf = vsi->back;
1916 struct ice_hw *hw = &pf->hw;
1921 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1922 u16 reg_idx = q_vector->reg_idx;
1931 * For SR-IOV VF VSIs queue vector index always starts
1937 for (q = 0; q < q_vector->num_ring_tx; q++) {
1939 q_vector->tx.itr_idx);
1943 for (q = 0; q < q_vector->num_ring_rx; q++) {
1945 q_vector->rx.itr_idx);
1952 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
1963 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
1974 * ice_vsi_stop_tx_rings - Disable Tx rings
1987 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1988 return -EINVAL;
1995 return -EINVAL;
2009 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2018 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
2022 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2027 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2038 struct ice_pf *pf = vsi->back;
2039 struct ice_hw *hw = &pf->hw;
2046 pf_q = vsi->rxq_map[i];
2057 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2058 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2059 vsi->tc_cfg.numtc = 1;
2068 * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
2080 struct ice_pf *pf = vsi->back;
2091 if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
2099 vsi->vsi_num, status);
2102 status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
2104 set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
2112 vsi->vsi_num, status);
2116 * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
2132 if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2147 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2155 struct device *dev = ice_pf_to_dev(vsi->back);
2161 struct ice_pf *pf = vsi->back;
2167 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2168 * - VF aggregator nodes will contain VF VSI
2170 port_info = pf->hw.port_info;
2174 switch (vsi->type) {
2182 agg_node_iter = &pf->pf_agg_node[0];
2193 agg_node_iter = &pf->vf_agg_node[0];
2198 ice_vsi_type_str(vsi->type));
2207 if (agg_node_iter->num_vsis &&
2208 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2213 if (agg_node_iter->valid &&
2214 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2215 agg_id = agg_node_iter->agg_id;
2221 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2234 if (!agg_node->valid) {
2236 (u8)vsi->tc_cfg.ena_tc);
2243 agg_node->valid = true;
2244 agg_node->agg_id = agg_id;
2248 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2249 (u8)vsi->tc_cfg.ena_tc);
2252 vsi->idx, agg_id);
2257 agg_node->num_vsis++;
2259 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2262 vsi->agg_node = agg_node;
2264 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2265 vsi->agg_node->num_vsis);
2276 if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2279 if (vsi->type == ICE_VSI_CHNL) {
2280 if (!vsi->alloc_txq && vsi->num_txq)
2281 max_txqs[i] = vsi->num_txq;
2283 max_txqs[i] = pf->num_lan_tx;
2285 max_txqs[i] = vsi->alloc_txq;
2288 if (vsi->type == ICE_VSI_PF)
2289 max_txqs[i] += vsi->num_xdp_txq;
2292 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2293 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2297 vsi->vsi_num, ret);
2305 * ice_vsi_cfg_def - configure default VSI based on the type
2310 struct device *dev = ice_pf_to_dev(vsi->back);
2311 struct ice_pf *pf = vsi->back;
2314 vsi->vsw = pf->first_sw;
2316 ret = ice_vsi_alloc_def(vsi, vsi->ch);
2329 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2330 vsi->idx);
2341 ret = ice_vsi_init(vsi, vsi->flags);
2347 switch (vsi->type) {
2367 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
2375 vsi->stat_offsets_loaded = false;
2378 if (vsi->type != ICE_VSI_CTRL)
2383 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2390 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2413 vsi->stat_offsets_loaded = false;
2419 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2436 ret = -EINVAL;
2458 * ice_vsi_cfg - configure a previously allocated VSI
2463 struct ice_pf *pf = vsi->back;
2466 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
2467 return -EINVAL;
2473 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
2477 if (vsi->type == ICE_VSI_CTRL) {
2478 if (vsi->vf) {
2479 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2480 vsi->vf->ctrl_vsi_idx = vsi->idx;
2482 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2483 pf->ctrl_vsi_idx = vsi->idx;
2491 * ice_vsi_decfg - remove all VSI configuration
2496 struct ice_pf *pf = vsi->back;
2499 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2500 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
2503 vsi->vsi_num, err);
2505 if (vsi->xdp_rings)
2516 /* SR-IOV determines needed MSIX resources all at once instead of per
2518 * many interrupts each VF needs. SR-IOV MSIX resources are also
2522 if (vsi->type == ICE_VSI_VF &&
2523 vsi->agg_node && vsi->agg_node->valid)
2524 vsi->agg_node->num_vsis--;
2528 * ice_vsi_setup - Set up a VSI by a given type
2547 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
2548 WARN_ON(!params->port_info))
2557 vsi->params = *params;
2571 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
2577 if (!vsi->agg_node)
2589 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2594 struct ice_pf *pf = vsi->back;
2595 struct ice_hw *hw = &pf->hw;
2601 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2604 for (q = 0; q < q_vector->num_ring_tx; q++) {
2605 ice_write_itr(&q_vector->tx, 0);
2606 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2607 if (vsi->xdp_rings) {
2608 u32 xdp_txq = txq + vsi->num_xdp_txq;
2610 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2615 for (q = 0; q < q_vector->num_ring_rx; q++) {
2616 ice_write_itr(&q_vector->rx, 0);
2617 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2626 * ice_vsi_free_irq - Free the IRQ association with the OS
2631 struct ice_pf *pf = vsi->back;
2634 if (!vsi->q_vectors || !vsi->irqs_ready)
2638 if (vsi->type == ICE_VSI_VF)
2641 vsi->irqs_ready = false;
2646 irq_num = vsi->q_vectors[i]->irq.virq;
2649 if (!vsi->q_vectors[i] ||
2650 !(vsi->q_vectors[i]->num_ring_tx ||
2651 vsi->q_vectors[i]->num_ring_rx))
2655 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2660 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2667 if (!vsi->tx_rings)
2671 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2672 ice_free_tx_ring(vsi->tx_rings[i]);
2676 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2683 if (!vsi->rx_rings)
2687 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2688 ice_free_rx_ring(vsi->rx_rings[i]);
2692 * ice_vsi_close - Shut down a VSI
2697 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2707 * ice_ena_vsi - resume a VSI
2715 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2718 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2720 if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
2721 vsi->type == ICE_VSI_SF)) {
2722 if (netif_running(vsi->netdev)) {
2726 err = ice_open_internal(vsi->netdev);
2731 } else if (vsi->type == ICE_VSI_CTRL) {
2739 * ice_dis_vsi - pause a VSI
2745 bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
2747 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2749 if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
2750 vsi->type == ICE_VSI_SF)) {
2751 if (netif_running(vsi->netdev)) {
2754 already_down = test_bit(ICE_VSI_DOWN, vsi->state);
2763 } else if (vsi->type == ICE_VSI_CTRL && !already_down) {
2769 * ice_vsi_set_napi_queues - associate netdev queues with napi
2777 struct net_device *netdev = vsi->netdev;
2785 &vsi->rx_rings[q_idx]->q_vector->napi);
2789 &vsi->tx_rings[q_idx]->q_vector->napi);
2792 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2794 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
2799 * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
2807 struct net_device *netdev = vsi->netdev;
2815 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2817 netif_napi_set_irq(&q_vector->napi, -1);
2828 * ice_napi_add - register NAPI handler for the VSI
2839 if (!vsi->netdev)
2843 netif_napi_add_config(vsi->netdev,
2844 &vsi->q_vectors[v_idx]->napi,
2850 * ice_vsi_release - Delete a VSI and free its resources
2859 if (!vsi->back)
2860 return -ENODEV;
2861 pf = vsi->back;
2863 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2872 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
2873 (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
2874 ice_vf_is_lldp_ena(vsi->vf))))
2883 if (!ice_is_reset_in_progress(pf->state))
2890 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2903 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2905 coalesce[i].itr_tx = q_vector->tx.itr_settings;
2906 coalesce[i].itr_rx = q_vector->rx.itr_settings;
2907 coalesce[i].intrl = q_vector->intrl;
2909 if (i < vsi->num_txq)
2911 if (i < vsi->num_rxq)
2915 return vsi->num_q_vectors;
2919 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2944 for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
2960 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
2961 rc = &vsi->q_vectors[i]->rx;
2962 rc->itr_settings = coalesce[i].itr_rx;
2963 ice_write_itr(rc, rc->itr_setting);
2964 } else if (i < vsi->alloc_rxq) {
2965 rc = &vsi->q_vectors[i]->rx;
2966 rc->itr_settings = coalesce[0].itr_rx;
2967 ice_write_itr(rc, rc->itr_setting);
2970 if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
2971 rc = &vsi->q_vectors[i]->tx;
2972 rc->itr_settings = coalesce[i].itr_tx;
2973 ice_write_itr(rc, rc->itr_setting);
2974 } else if (i < vsi->alloc_txq) {
2975 rc = &vsi->q_vectors[i]->tx;
2976 rc->itr_settings = coalesce[0].itr_tx;
2977 ice_write_itr(rc, rc->itr_setting);
2980 vsi->q_vectors[i]->intrl = coalesce[i].intrl;
2981 ice_set_q_vector_intrl(vsi->q_vectors[i]);
2987 for (; i < vsi->num_q_vectors; i++) {
2989 rc = &vsi->q_vectors[i]->tx;
2990 rc->itr_settings = coalesce[0].itr_tx;
2991 ice_write_itr(rc, rc->itr_setting);
2994 rc = &vsi->q_vectors[i]->rx;
2995 rc->itr_settings = coalesce[0].itr_rx;
2996 ice_write_itr(rc, rc->itr_setting);
2998 vsi->q_vectors[i]->intrl = coalesce[0].intrl;
2999 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3004 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
3010 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
3011 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
3015 struct ice_pf *pf = vsi->back;
3016 u16 prev_txq = vsi->alloc_txq;
3017 u16 prev_rxq = vsi->alloc_rxq;
3020 vsi_stat = pf->vsi_stats[vsi->idx];
3024 if (vsi_stat->tx_ring_stats[i]) {
3025 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3026 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3031 tx_ring_stats = vsi_stat->tx_ring_stats;
3032 vsi_stat->tx_ring_stats =
3033 krealloc_array(vsi_stat->tx_ring_stats, req_txq,
3034 sizeof(*vsi_stat->tx_ring_stats),
3036 if (!vsi_stat->tx_ring_stats) {
3037 vsi_stat->tx_ring_stats = tx_ring_stats;
3038 return -ENOMEM;
3043 if (vsi_stat->rx_ring_stats[i]) {
3044 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3045 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3050 rx_ring_stats = vsi_stat->rx_ring_stats;
3051 vsi_stat->rx_ring_stats =
3052 krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
3053 sizeof(*vsi_stat->rx_ring_stats),
3055 if (!vsi_stat->rx_ring_stats) {
3056 vsi_stat->rx_ring_stats = rx_ring_stats;
3057 return -ENOMEM;
3064 * ice_vsi_rebuild - Rebuild VSI after reset
3081 return -EINVAL;
3083 vsi->flags = vsi_flags;
3084 pf = vsi->back;
3085 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3086 return -EINVAL;
3088 mutex_lock(&vsi->xdp_state_lock);
3099 coalesce = kcalloc(vsi->num_q_vectors,
3102 ret = -ENOMEM;
3111 ret = -EIO;
3120 clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
3128 mutex_unlock(&vsi->xdp_state_lock);
3133 * ice_is_reset_in_progress - check for a reset in progress
3145 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3154 * Returns 0 on success, -EBUSY if the reset is not finished within the
3155 * timeout, and -ERESTARTSYS if the thread was interrupted.
3161 ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
3162 !ice_is_reset_in_progress(pf->state),
3167 return -EBUSY;
3173 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3179 vsi->info.mapping_flags = ctx->info.mapping_flags;
3180 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3181 sizeof(vsi->info.q_mapping));
3182 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3183 sizeof(vsi->info.tc_mapping));
3187 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3193 struct net_device *netdev = vsi->netdev;
3194 struct ice_pf *pf = vsi->back;
3195 int numtc = vsi->tc_cfg.numtc;
3204 if (vsi->type == ICE_VSI_CHNL)
3212 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3213 numtc = vsi->all_numtc;
3218 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
3221 if (vsi->tc_cfg.ena_tc & BIT(i))
3223 vsi->tc_cfg.tc_info[i].netdev_tc,
3224 vsi->tc_cfg.tc_info[i].qcount_tx,
3225 vsi->tc_cfg.tc_info[i].qoffset);
3226 /* setup TC queue map for CHNL TCs */
3228 if (!(vsi->all_enatc & BIT(i)))
3230 if (!vsi->mqprio_qopt.qopt.count[i])
3233 vsi->mqprio_qopt.qopt.count[i],
3234 vsi->mqprio_qopt.qopt.offset[i]);
3237 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3241 u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3244 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3250 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3261 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
3262 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3263 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3268 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3275 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3277 vsi->tc_cfg.tc_info[i].qoffset = 0;
3278 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3279 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3280 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3281 ctxt->info.tc_mapping[i] = 0;
3285 offset = vsi->mqprio_qopt.qopt.offset[i];
3286 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3287 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3288 vsi->tc_cfg.tc_info[i].qoffset = offset;
3289 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3290 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3291 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3294 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3296 if (!(vsi->all_enatc & BIT(i)))
3298 offset = vsi->mqprio_qopt.qopt.offset[i];
3299 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3300 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3304 new_txq = offset + qcount_tx;
3305 if (new_txq > vsi->alloc_txq) {
3306 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3307 new_txq, vsi->alloc_txq);
3308 return -EINVAL;
3311 new_rxq = offset + qcount_rx;
3312 if (new_rxq > vsi->alloc_rxq) {
3313 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3314 new_rxq, vsi->alloc_rxq);
3315 return -EINVAL;
3319 vsi->num_txq = new_txq;
3320 vsi->num_rxq = new_rxq;
3323 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
3324 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3325 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
3327 /* Find queue count available for channel VSIs and starting offset
3330 if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3331 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3332 vsi->next_base_q = tc0_qcount;
3334 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
3335 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
3336 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3337 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3343 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3352 struct ice_pf *pf = vsi->back;
3360 if (vsi->tc_cfg.ena_tc == ena_tc &&
3361 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3365 /* build bitmap of enabled TCs */
3369 max_txqs[i] = vsi->alloc_txq;
3373 if (vsi->type == ICE_VSI_CHNL &&
3374 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3375 max_txqs[i] = vsi->num_txq;
3378 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3379 vsi->tc_cfg.ena_tc = ena_tc;
3380 vsi->tc_cfg.numtc = num_tc;
3384 return -ENOMEM;
3386 ctx->vf_num = 0;
3387 ctx->info = vsi->info;
3389 if (vsi->type == ICE_VSI_PF &&
3390 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3396 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3401 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3402 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3408 if (vsi->type == ICE_VSI_PF &&
3409 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3410 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3412 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3413 vsi->tc_cfg.ena_tc, max_txqs);
3417 vsi->vsi_num, ret);
3421 vsi->info.valid_sections = 0;
3430 * ice_update_ring_stats - Update ring statistics
3439 stats->bytes += bytes;
3440 stats->pkts += pkts;
3444 * ice_update_tx_ring_stats - Update Tx ring specific counters
3451 u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3452 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3453 u64_stats_update_end(&tx_ring->ring_stats->syncp);
3457 * ice_update_rx_ring_stats - Update Rx ring specific counters
3464 u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3465 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3466 u64_stats_update_end(&rx_ring->ring_stats->syncp);
3470 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3484 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3492 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3496 * ice_set_dflt_vsi - set the default forwarding VSI
3511 return -EINVAL;
3513 dev = ice_pf_to_dev(vsi->back);
3515 if (ice_lag_is_switchdev_running(vsi->back)) {
3517 vsi->vsi_num);
3524 vsi->vsi_num);
3528 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3531 vsi->vsi_num, status);
3539 * ice_clear_dflt_vsi - clear the default forwarding VSI
3552 return -EINVAL;
3554 dev = ice_pf_to_dev(vsi->back);
3557 if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3558 return -ENODEV;
3560 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3564 vsi->vsi_num, status);
3565 return -EIO;
3572 * ice_get_link_speed_mbps - get link speed in Mbps
3581 link_speed = vsi->port_info->phy.link_info.link_speed;
3583 return (int)ice_get_link_speed(fls(link_speed) - 1);
3587 * ice_get_link_speed_kbps - get link speed in Kbps
3602 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3607 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3612 struct ice_pf *pf = vsi->back;
3618 if (!vsi->port_info) {
3620 vsi->idx, vsi->type);
3621 return -EINVAL;
3627 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3629 return -EINVAL;
3634 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3638 min_tx_rate, ice_vsi_type_str(vsi->type),
3639 vsi->idx);
3644 min_tx_rate, ice_vsi_type_str(vsi->type));
3646 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3647 vsi->idx, 0,
3651 ice_vsi_type_str(vsi->type), vsi->idx);
3656 ice_vsi_type_str(vsi->type), vsi->idx);
3663 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3668 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3673 struct ice_pf *pf = vsi->back;
3679 if (!vsi->port_info) {
3681 vsi->idx, vsi->type);
3682 return -EINVAL;
3688 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3690 return -EINVAL;
3695 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3699 max_tx_rate, ice_vsi_type_str(vsi->type),
3700 vsi->idx);
3705 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3707 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3708 vsi->idx, 0,
3712 ice_vsi_type_str(vsi->type), vsi->idx);
3717 ice_vsi_type_str(vsi->type), vsi->idx);
3724 * ice_set_link - turn on/off physical link
3730 struct device *dev = ice_pf_to_dev(vsi->back);
3731 struct ice_port_info *pi = vsi->port_info;
3732 struct ice_hw *hw = pi->hw;
3735 if (vsi->type != ICE_VSI_PF)
3736 return -EINVAL;
3745 if (status == -EIO) {
3746 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3749 ice_aq_str(hw->adminq.sq_last_status));
3753 ice_aq_str(hw->adminq.sq_last_status));
3761 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3784 err = vlan_ops->add_vlan(vsi, &vlan);
3785 if (err && err != -EEXIST)
3789 if (!ice_is_dvm_ena(&vsi->back->hw))
3793 err = vlan_ops->add_vlan(vsi, &vlan);
3794 if (err && err != -EEXIST)
3801 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3814 err = vlan_ops->del_vlan(vsi, &vlan);
3815 if (err && err != -EEXIST)
3819 if (!ice_is_dvm_ena(&vsi->back->hw))
3823 err = vlan_ops->del_vlan(vsi, &vlan);
3824 if (err && err != -EEXIST)
3830 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3835 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3846 if (vsi->type == ICE_VSI_VF) {
3847 if (WARN_ON(!vsi->vf))
3850 if (ice_vf_is_port_vlan_ena(vsi->vf))
3854 if (ice_is_dvm_ena(&vsi->back->hw))
3861 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3862 * @vsi: VSI used to determine if any non-zero VLANs have been added
3866 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3870 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3871 * @vsi: VSI used to get the number of non-zero VLANs added
3875 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
3890 return test_bit(f, pf->features);
3903 set_bit(f, pf->features);
3916 clear_bit(f, pf->features);
3927 switch (pf->hw.device_id) {
3935 if (ice_is_phy_rclk_in_netlist(&pf->hw))
3937 /* If we don't own the timer - don't enable other caps */
3940 if (ice_is_cgu_in_netlist(&pf->hw))
3942 if (ice_is_clock_mux_in_netlist(&pf->hw))
3944 if (ice_gnss_is_module_present(&pf->hw))
3951 if (pf->hw.mac_type == ICE_MAC_E830) {
3958 * ice_vsi_update_security - update security block in VSI
3967 ctx.info = vsi->info;
3971 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
3972 return -ENODEV;
3974 vsi->info = ctx.info;
3979 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
3984 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
3990 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
3995 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
4001 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
4009 .info = vsi->info,
4018 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4019 return -ENODEV;
4021 vsi->info = ctx.info;