Lines Matching refs:vsi

47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)  in ice_vsi_ctrl_all_rx_rings()  argument
52 ice_for_each_rxq(vsi, i) in ice_vsi_ctrl_all_rx_rings()
53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); in ice_vsi_ctrl_all_rx_rings()
55 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
57 ice_for_each_rxq(vsi, i) { in ice_vsi_ctrl_all_rx_rings()
58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); in ice_vsi_ctrl_all_rx_rings()
73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) in ice_vsi_alloc_arrays() argument
75 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
79 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_arrays()
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
84 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
85 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
89 sizeof(*vsi->rx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
90 if (!vsi->rx_rings) in ice_vsi_alloc_arrays()
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), in ice_vsi_alloc_arrays()
100 sizeof(*vsi->txq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
102 if (!vsi->txq_map) in ice_vsi_alloc_arrays()
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
106 sizeof(*vsi->rxq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
107 if (!vsi->rxq_map) in ice_vsi_alloc_arrays()
111 if (vsi->type == ICE_VSI_LB) in ice_vsi_alloc_arrays()
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, in ice_vsi_alloc_arrays()
116 sizeof(*vsi->q_vectors), GFP_KERNEL); in ice_vsi_alloc_arrays()
117 if (!vsi->q_vectors) in ice_vsi_alloc_arrays()
123 devm_kfree(dev, vsi->rxq_map); in ice_vsi_alloc_arrays()
125 devm_kfree(dev, vsi->txq_map); in ice_vsi_alloc_arrays()
127 devm_kfree(dev, vsi->rx_rings); in ice_vsi_alloc_arrays()
129 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
137 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) in ice_vsi_set_num_desc() argument
139 switch (vsi->type) { in ice_vsi_set_num_desc()
148 if (!vsi->num_rx_desc) in ice_vsi_set_num_desc()
149 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; in ice_vsi_set_num_desc()
150 if (!vsi->num_tx_desc) in ice_vsi_set_num_desc()
151 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; in ice_vsi_set_num_desc()
154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
155 vsi->type); in ice_vsi_set_num_desc()
166 static void ice_vsi_set_num_qs(struct ice_vsi *vsi) in ice_vsi_set_num_qs() argument
168 enum ice_vsi_type vsi_type = vsi->type; in ice_vsi_set_num_qs()
169 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
170 struct ice_vf *vf = vsi->vf; in ice_vsi_set_num_qs()
177 if (vsi->req_txq) { in ice_vsi_set_num_qs()
178 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
179 vsi->num_txq = vsi->req_txq; in ice_vsi_set_num_qs()
181 vsi->alloc_txq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
186 pf->num_lan_tx = vsi->alloc_txq; in ice_vsi_set_num_qs()
190 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
192 if (vsi->req_rxq) { in ice_vsi_set_num_qs()
193 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
194 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
196 vsi->alloc_rxq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
202 pf->num_lan_rx = vsi->alloc_rxq; in ice_vsi_set_num_qs()
204 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, in ice_vsi_set_num_qs()
205 max_t(int, vsi->alloc_rxq, in ice_vsi_set_num_qs()
206 vsi->alloc_txq)); in ice_vsi_set_num_qs()
209 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
210 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
211 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
212 vsi->irq_dyn_alloc = true; in ice_vsi_set_num_qs()
217 vsi->alloc_txq = vf->num_vf_qs; in ice_vsi_set_num_qs()
218 vsi->alloc_rxq = vf->num_vf_qs; in ice_vsi_set_num_qs()
224 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; in ice_vsi_set_num_qs()
227 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
228 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
229 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
232 vsi->alloc_txq = 0; in ice_vsi_set_num_qs()
233 vsi->alloc_rxq = 0; in ice_vsi_set_num_qs()
236 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
237 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
244 ice_vsi_set_num_desc(vsi); in ice_vsi_set_num_qs()
280 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) in ice_vsi_delete_from_hw() argument
282 struct ice_pf *pf = vsi->back; in ice_vsi_delete_from_hw()
286 ice_fltr_remove_all(vsi); in ice_vsi_delete_from_hw()
291 if (vsi->type == ICE_VSI_VF) in ice_vsi_delete_from_hw()
292 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw()
293 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw()
295 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw()
297 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw()
300 vsi->vsi_num, status); in ice_vsi_delete_from_hw()
309 static void ice_vsi_free_arrays(struct ice_vsi *vsi) in ice_vsi_free_arrays() argument
311 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
317 devm_kfree(dev, vsi->q_vectors); in ice_vsi_free_arrays()
318 vsi->q_vectors = NULL; in ice_vsi_free_arrays()
319 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays()
320 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
321 devm_kfree(dev, vsi->rx_rings); in ice_vsi_free_arrays()
322 vsi->rx_rings = NULL; in ice_vsi_free_arrays()
323 devm_kfree(dev, vsi->txq_map); in ice_vsi_free_arrays()
324 vsi->txq_map = NULL; in ice_vsi_free_arrays()
325 devm_kfree(dev, vsi->rxq_map); in ice_vsi_free_arrays()
326 vsi->rxq_map = NULL; in ice_vsi_free_arrays()
333 static void ice_vsi_free_stats(struct ice_vsi *vsi) in ice_vsi_free_stats() argument
336 struct ice_pf *pf = vsi->back; in ice_vsi_free_stats()
339 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_free_stats()
344 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_free_stats()
348 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_free_stats()
355 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_free_stats()
365 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_free_stats()
372 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) in ice_vsi_alloc_ring_stats() argument
377 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_ring_stats()
380 vsi_stats = pf->vsi_stats[vsi->idx]; in ice_vsi_alloc_ring_stats()
385 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_alloc_ring_stats()
389 ring = vsi->tx_rings[i]; in ice_vsi_alloc_ring_stats()
404 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_alloc_ring_stats()
408 ring = vsi->rx_rings[i]; in ice_vsi_alloc_ring_stats()
425 ice_vsi_free_stats(vsi); in ice_vsi_alloc_ring_stats()
436 void ice_vsi_free(struct ice_vsi *vsi) in ice_vsi_free() argument
441 if (!vsi || !vsi->back) in ice_vsi_free()
444 pf = vsi->back; in ice_vsi_free()
447 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { in ice_vsi_free()
448 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); in ice_vsi_free()
455 pf->vsi[vsi->idx] = NULL; in ice_vsi_free()
456 pf->next_vsi = vsi->idx; in ice_vsi_free()
458 ice_vsi_free_stats(vsi); in ice_vsi_free()
459 ice_vsi_free_arrays(vsi); in ice_vsi_free()
460 mutex_destroy(&vsi->xdp_state_lock); in ice_vsi_free()
462 devm_kfree(dev, vsi); in ice_vsi_free()
465 void ice_vsi_delete(struct ice_vsi *vsi) in ice_vsi_delete() argument
467 ice_vsi_delete_from_hw(vsi); in ice_vsi_delete()
468 ice_vsi_free(vsi); in ice_vsi_delete()
513 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) in ice_vsi_alloc_stat_arrays() argument
516 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_stat_arrays()
518 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_stat_arrays()
523 if (pf->vsi_stats[vsi->idx]) in ice_vsi_alloc_stat_arrays()
532 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_alloc_stat_arrays()
538 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_alloc_stat_arrays()
543 pf->vsi_stats[vsi->idx] = vsi_stat; in ice_vsi_alloc_stat_arrays()
552 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_alloc_stat_arrays()
562 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) in ice_vsi_alloc_def() argument
564 if (vsi->type != ICE_VSI_CHNL) { in ice_vsi_alloc_def()
565 ice_vsi_set_num_qs(vsi); in ice_vsi_alloc_def()
566 if (ice_vsi_alloc_arrays(vsi)) in ice_vsi_alloc_def()
570 switch (vsi->type) { in ice_vsi_alloc_def()
574 vsi->irq_handler = ice_msix_clean_rings; in ice_vsi_alloc_def()
578 vsi->irq_handler = ice_msix_clean_ctrl_vsi; in ice_vsi_alloc_def()
584 vsi->num_rxq = ch->num_rxq; in ice_vsi_alloc_def()
585 vsi->num_txq = ch->num_txq; in ice_vsi_alloc_def()
586 vsi->next_base_q = ch->base_q; in ice_vsi_alloc_def()
592 ice_vsi_free_arrays(vsi); in ice_vsi_alloc_def()
612 struct ice_vsi *vsi = NULL; in ice_vsi_alloc() local
626 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); in ice_vsi_alloc()
627 if (!vsi) in ice_vsi_alloc()
630 vsi->back = pf; in ice_vsi_alloc()
631 set_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_alloc()
634 vsi->idx = pf->next_vsi; in ice_vsi_alloc()
635 pf->vsi[pf->next_vsi] = vsi; in ice_vsi_alloc()
638 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, in ice_vsi_alloc()
641 mutex_init(&vsi->xdp_state_lock); in ice_vsi_alloc()
645 return vsi; in ice_vsi_alloc()
656 static int ice_alloc_fd_res(struct ice_vsi *vsi) in ice_alloc_fd_res() argument
658 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
668 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || in ice_alloc_fd_res()
669 vsi->type == ICE_VSI_CHNL)) in ice_alloc_fd_res()
690 if (vsi->type == ICE_VSI_PF) { in ice_alloc_fd_res()
691 vsi->num_gfltr = g_val; in ice_alloc_fd_res()
699 vsi->num_gfltr = ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
703 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
704 } else if (vsi->type == ICE_VSI_VF) { in ice_alloc_fd_res()
705 vsi->num_gfltr = 0; in ice_alloc_fd_res()
708 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
731 vsi->num_gfltr = g_val / numtc; in ice_alloc_fd_res()
734 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
746 static int ice_vsi_get_qs(struct ice_vsi *vsi) in ice_vsi_get_qs() argument
748 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
753 .q_count = vsi->alloc_txq, in ice_vsi_get_qs()
755 .vsi_map = vsi->txq_map, in ice_vsi_get_qs()
763 .q_count = vsi->alloc_rxq, in ice_vsi_get_qs()
765 .vsi_map = vsi->rxq_map, in ice_vsi_get_qs()
771 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_get_qs()
777 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
782 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
791 static void ice_vsi_put_qs(struct ice_vsi *vsi) in ice_vsi_put_qs() argument
793 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
798 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_put_qs()
799 clear_bit(vsi->txq_map[i], pf->avail_txqs); in ice_vsi_put_qs()
800 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
803 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_put_qs()
804 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); in ice_vsi_put_qs()
805 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
840 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) in ice_vsi_clean_rss_flow_fld() argument
842 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
848 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); in ice_vsi_clean_rss_flow_fld()
851 vsi->vsi_num, status); in ice_vsi_clean_rss_flow_fld()
858 static void ice_rss_clean(struct ice_vsi *vsi) in ice_rss_clean() argument
860 struct ice_pf *pf = vsi->back; in ice_rss_clean()
865 devm_kfree(dev, vsi->rss_hkey_user); in ice_rss_clean()
866 devm_kfree(dev, vsi->rss_lut_user); in ice_rss_clean()
868 ice_vsi_clean_rss_flow_fld(vsi); in ice_rss_clean()
871 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); in ice_rss_clean()
878 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) in ice_vsi_set_rss_params() argument
881 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
885 vsi->rss_size = 1; in ice_vsi_set_rss_params()
891 switch (vsi->type) { in ice_vsi_set_rss_params()
895 vsi->rss_table_size = (u16)cap->rss_table_size; in ice_vsi_set_rss_params()
896 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_set_rss_params()
897 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); in ice_vsi_set_rss_params()
899 vsi->rss_size = min_t(u16, num_online_cpus(), in ice_vsi_set_rss_params()
901 vsi->rss_lut_type = ICE_LUT_PF; in ice_vsi_set_rss_params()
904 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
905 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); in ice_vsi_set_rss_params()
906 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
912 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
913 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; in ice_vsi_set_rss_params()
914 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
920 ice_vsi_type_str(vsi->type)); in ice_vsi_set_rss_params()
986 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) in ice_vsi_setup_q_map() argument
990 u16 qcount_tx = vsi->alloc_txq; in ice_vsi_setup_q_map()
991 u16 qcount_rx = vsi->alloc_rxq; in ice_vsi_setup_q_map()
995 if (!vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map()
997 vsi->tc_cfg.numtc = 1; in ice_vsi_setup_q_map()
998 vsi->tc_cfg.ena_tc = 1; in ice_vsi_setup_q_map()
1001 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); in ice_vsi_setup_q_map()
1004 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; in ice_vsi_setup_q_map()
1023 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map()
1025 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map()
1026 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map()
1027 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map()
1028 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map()
1034 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map()
1035 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; in ice_vsi_setup_q_map()
1036 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; in ice_vsi_setup_q_map()
1037 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map()
1057 if (rx_count > vsi->alloc_rxq) { in ice_vsi_setup_q_map()
1058 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1059 rx_count, vsi->alloc_rxq); in ice_vsi_setup_q_map()
1063 if (tx_count > vsi->alloc_txq) { in ice_vsi_setup_q_map()
1064 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1065 tx_count, vsi->alloc_txq); in ice_vsi_setup_q_map()
1069 vsi->num_txq = tx_count; in ice_vsi_setup_q_map()
1070 vsi->num_rxq = rx_count; in ice_vsi_setup_q_map()
1072 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map()
1073 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
1077 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map()
1086 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map()
1087 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map()
1097 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) in ice_set_fd_vsi_ctx() argument
1102 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && in ice_set_fd_vsi_ctx()
1103 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) in ice_set_fd_vsi_ctx()
1118 cpu_to_le16(vsi->num_gfltr); in ice_set_fd_vsi_ctx()
1121 cpu_to_le16(vsi->num_bfltr); in ice_set_fd_vsi_ctx()
1139 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) in ice_set_rss_vsi_ctx() argument
1145 pf = vsi->back; in ice_set_rss_vsi_ctx()
1148 switch (vsi->type) { in ice_set_rss_vsi_ctx()
1161 ice_vsi_type_str(vsi->type)); in ice_set_rss_vsi_ctx()
1166 vsi->rss_hfunc = hash_type; in ice_set_rss_vsi_ctx()
1174 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) in ice_chnl_vsi_setup_q_map() argument
1176 struct ice_pf *pf = vsi->back; in ice_chnl_vsi_setup_q_map()
1181 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); in ice_chnl_vsi_setup_q_map()
1189 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); in ice_chnl_vsi_setup_q_map()
1199 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) in ice_vsi_is_vlan_pruning_ena() argument
1201 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_vsi_is_vlan_pruning_ena()
1215 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) in ice_vsi_init() argument
1217 struct ice_pf *pf = vsi->back; in ice_vsi_init()
1228 switch (vsi->type) { in ice_vsi_init()
1241 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; in ice_vsi_init()
1251 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1265 ice_set_fd_vsi_ctx(ctxt, vsi); in ice_vsi_init()
1267 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) in ice_vsi_init()
1272 vsi->type != ICE_VSI_CTRL) { in ice_vsi_init()
1273 ice_set_rss_vsi_ctx(ctxt, vsi); in ice_vsi_init()
1282 ctxt->info.sw_id = vsi->port_info->sw_id; in ice_vsi_init()
1283 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1284 ice_chnl_vsi_setup_q_map(vsi, ctxt); in ice_vsi_init()
1286 ret = ice_vsi_setup_q_map(vsi, ctxt); in ice_vsi_init()
1300 if (vsi->type == ICE_VSI_PF) { in ice_vsi_init()
1307 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1314 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1323 vsi->info = ctxt->info; in ice_vsi_init()
1326 vsi->vsi_num = ctxt->vsi_num; in ice_vsi_init()
1337 static void ice_vsi_clear_rings(struct ice_vsi *vsi) in ice_vsi_clear_rings() argument
1342 if (vsi->q_vectors) { in ice_vsi_clear_rings()
1343 ice_for_each_q_vector(vsi, i) { in ice_vsi_clear_rings()
1344 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings()
1353 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1354 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_clear_rings()
1355 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1356 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
1357 WRITE_ONCE(vsi->tx_rings[i], NULL); in ice_vsi_clear_rings()
1361 if (vsi->rx_rings) { in ice_vsi_clear_rings()
1362 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_clear_rings()
1363 if (vsi->rx_rings[i]) { in ice_vsi_clear_rings()
1364 kfree_rcu(vsi->rx_rings[i], rcu); in ice_vsi_clear_rings()
1365 WRITE_ONCE(vsi->rx_rings[i], NULL); in ice_vsi_clear_rings()
1375 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) in ice_vsi_alloc_rings() argument
1377 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); in ice_vsi_alloc_rings()
1378 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1384 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_alloc_rings()
1394 ring->reg_idx = vsi->txq_map[i]; in ice_vsi_alloc_rings()
1395 ring->vsi = vsi; in ice_vsi_alloc_rings()
1398 ring->count = vsi->num_tx_desc; in ice_vsi_alloc_rings()
1404 WRITE_ONCE(vsi->tx_rings[i], ring); in ice_vsi_alloc_rings()
1408 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_alloc_rings()
1417 ring->reg_idx = vsi->rxq_map[i]; in ice_vsi_alloc_rings()
1418 ring->vsi = vsi; in ice_vsi_alloc_rings()
1419 ring->netdev = vsi->netdev; in ice_vsi_alloc_rings()
1421 ring->count = vsi->num_rx_desc; in ice_vsi_alloc_rings()
1423 WRITE_ONCE(vsi->rx_rings[i], ring); in ice_vsi_alloc_rings()
1429 ice_vsi_clear_rings(vsi); in ice_vsi_alloc_rings()
1442 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) in ice_vsi_manage_rss_lut() argument
1446 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_manage_rss_lut()
1451 if (vsi->rss_lut_user) in ice_vsi_manage_rss_lut()
1452 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1454 ice_fill_rss_lut(lut, vsi->rss_table_size, in ice_vsi_manage_rss_lut()
1455 vsi->rss_size); in ice_vsi_manage_rss_lut()
1458 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1467 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) in ice_vsi_cfg_crc_strip() argument
1471 ice_for_each_rxq(vsi, i) in ice_vsi_cfg_crc_strip()
1473 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1475 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1482 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) in ice_vsi_cfg_rss_lut_key() argument
1484 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1490 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && in ice_vsi_cfg_rss_lut_key()
1492 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); in ice_vsi_cfg_rss_lut_key()
1494 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key()
1502 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && in ice_vsi_cfg_rss_lut_key()
1503 vsi->orig_rss_size <= vsi->num_rxq) { in ice_vsi_cfg_rss_lut_key()
1504 vsi->rss_size = vsi->orig_rss_size; in ice_vsi_cfg_rss_lut_key()
1506 vsi->orig_rss_size = 0; in ice_vsi_cfg_rss_lut_key()
1510 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_cfg_rss_lut_key()
1514 if (vsi->rss_lut_user) in ice_vsi_cfg_rss_lut_key()
1515 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1517 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); in ice_vsi_cfg_rss_lut_key()
1519 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1531 if (vsi->rss_hkey_user) in ice_vsi_cfg_rss_lut_key()
1532 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); in ice_vsi_cfg_rss_lut_key()
1536 err = ice_set_rss_key(vsi, key); in ice_vsi_cfg_rss_lut_key()
1554 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) in ice_vsi_set_vf_rss_flow_fld() argument
1556 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1563 vsi->vsi_num); in ice_vsi_set_vf_rss_flow_fld()
1567 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); in ice_vsi_set_vf_rss_flow_fld()
1570 vsi->vsi_num, status); in ice_vsi_set_vf_rss_flow_fld()
1653 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) in ice_vsi_set_rss_flow_fld() argument
1655 u16 vsi_num = vsi->vsi_num; in ice_vsi_set_rss_flow_fld()
1656 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1671 status = ice_add_rss_cfg(hw, vsi, cfg); in ice_vsi_set_rss_flow_fld()
1713 void ice_update_eth_stats(struct ice_vsi *vsi) in ice_update_eth_stats() argument
1716 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1717 struct ice_pf *pf = vsi->back; in ice_update_eth_stats()
1718 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ in ice_update_eth_stats()
1720 prev_es = &vsi->eth_stats_prev; in ice_update_eth_stats()
1721 cur_es = &vsi->eth_stats; in ice_update_eth_stats()
1724 vsi->stat_offsets_loaded = false; in ice_update_eth_stats()
1726 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1729 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1732 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1735 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1738 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1741 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1744 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1747 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1750 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1753 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1756 vsi->stat_offsets_loaded = true; in ice_update_eth_stats()
1812 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1845 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
1899 void ice_vsi_cfg_msix(struct ice_vsi *vsi) in ice_vsi_cfg_msix() argument
1901 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
1906 ice_for_each_q_vector(vsi, i) { in ice_vsi_cfg_msix()
1907 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_cfg_msix()
1924 ice_cfg_txq_interrupt(vsi, txq, reg_idx, in ice_vsi_cfg_msix()
1930 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, in ice_vsi_cfg_msix()
1943 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) in ice_vsi_start_all_rx_rings() argument
1945 return ice_vsi_ctrl_all_rx_rings(vsi, true); in ice_vsi_start_all_rx_rings()
1954 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) in ice_vsi_stop_all_rx_rings() argument
1956 return ice_vsi_ctrl_all_rx_rings(vsi, false); in ice_vsi_stop_all_rx_rings()
1968 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, in ice_vsi_stop_tx_rings() argument
1973 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) in ice_vsi_stop_tx_rings()
1983 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
1984 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, in ice_vsi_stop_tx_rings()
2001 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, in ice_vsi_stop_lan_tx_rings() argument
2004 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); in ice_vsi_stop_lan_tx_rings()
2011 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) in ice_vsi_stop_xdp_tx_rings() argument
2013 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
2022 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) in ice_vsi_is_rx_queue_active() argument
2024 struct ice_pf *pf = vsi->back; in ice_vsi_is_rx_queue_active()
2028 ice_for_each_rxq(vsi, i) { in ice_vsi_is_rx_queue_active()
2032 pf_q = vsi->rxq_map[i]; in ice_vsi_is_rx_queue_active()
2041 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) in ice_vsi_set_tc_cfg() argument
2043 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { in ice_vsi_set_tc_cfg()
2044 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; in ice_vsi_set_tc_cfg()
2045 vsi->tc_cfg.numtc = 1; in ice_vsi_set_tc_cfg()
2050 ice_vsi_set_dcb_tc_cfg(vsi); in ice_vsi_set_tc_cfg()
2059 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) in ice_cfg_sw_lldp() argument
2063 struct ice_pf *pf = vsi->back; in ice_cfg_sw_lldp()
2071 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, in ice_cfg_sw_lldp()
2075 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, in ice_cfg_sw_lldp()
2078 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, in ice_cfg_sw_lldp()
2086 vsi->vsi_num, status); in ice_cfg_sw_lldp()
2096 static void ice_set_agg_vsi(struct ice_vsi *vsi) in ice_set_agg_vsi() argument
2098 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2104 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2117 switch (vsi->type) { in ice_set_agg_vsi()
2141 ice_vsi_type_str(vsi->type)); in ice_set_agg_vsi()
2179 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2191 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, in ice_set_agg_vsi()
2192 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2195 vsi->idx, agg_id); in ice_set_agg_vsi()
2205 vsi->agg_node = agg_node; in ice_set_agg_vsi()
2207 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, in ice_set_agg_vsi()
2208 vsi->agg_node->num_vsis); in ice_set_agg_vsi()
2211 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) in ice_vsi_cfg_tc_lan() argument
2219 if (!(vsi->tc_cfg.ena_tc & BIT(i))) in ice_vsi_cfg_tc_lan()
2222 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_cfg_tc_lan()
2223 if (!vsi->alloc_txq && vsi->num_txq) in ice_vsi_cfg_tc_lan()
2224 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc_lan()
2228 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc_lan()
2231 if (vsi->type == ICE_VSI_PF) in ice_vsi_cfg_tc_lan()
2232 max_txqs[i] += vsi->num_xdp_txq; in ice_vsi_cfg_tc_lan()
2235 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); in ice_vsi_cfg_tc_lan()
2236 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_cfg_tc_lan()
2240 vsi->vsi_num, ret); in ice_vsi_cfg_tc_lan()
2251 static int ice_vsi_cfg_def(struct ice_vsi *vsi) in ice_vsi_cfg_def() argument
2253 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_cfg_def()
2254 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_def()
2257 vsi->vsw = pf->first_sw; in ice_vsi_cfg_def()
2259 ret = ice_vsi_alloc_def(vsi, vsi->ch); in ice_vsi_cfg_def()
2264 ret = ice_vsi_alloc_stat_arrays(vsi); in ice_vsi_cfg_def()
2268 ice_alloc_fd_res(vsi); in ice_vsi_cfg_def()
2270 ret = ice_vsi_get_qs(vsi); in ice_vsi_cfg_def()
2273 vsi->idx); in ice_vsi_cfg_def()
2278 ice_vsi_set_rss_params(vsi); in ice_vsi_cfg_def()
2281 ice_vsi_set_tc_cfg(vsi); in ice_vsi_cfg_def()
2284 ret = ice_vsi_init(vsi, vsi->flags); in ice_vsi_cfg_def()
2288 ice_vsi_init_vlan_ops(vsi); in ice_vsi_cfg_def()
2290 switch (vsi->type) { in ice_vsi_cfg_def()
2294 ret = ice_vsi_alloc_q_vectors(vsi); in ice_vsi_cfg_def()
2298 ret = ice_vsi_alloc_rings(vsi); in ice_vsi_cfg_def()
2302 ret = ice_vsi_alloc_ring_stats(vsi); in ice_vsi_cfg_def()
2306 if (ice_is_xdp_ena_vsi(vsi)) { in ice_vsi_cfg_def()
2307 ret = ice_vsi_determine_xdp_res(vsi); in ice_vsi_cfg_def()
2310 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, in ice_vsi_cfg_def()
2316 ice_vsi_map_rings_to_vectors(vsi); in ice_vsi_cfg_def()
2318 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2321 if (vsi->type != ICE_VSI_CTRL) in ice_vsi_cfg_def()
2327 ice_vsi_cfg_rss_lut_key(vsi); in ice_vsi_cfg_def()
2328 ice_vsi_set_rss_flow_fld(vsi); in ice_vsi_cfg_def()
2330 ice_init_arfs(vsi); in ice_vsi_cfg_def()
2334 ice_vsi_cfg_rss_lut_key(vsi); in ice_vsi_cfg_def()
2335 ice_vsi_set_rss_flow_fld(vsi); in ice_vsi_cfg_def()
2344 ret = ice_vsi_alloc_q_vectors(vsi); in ice_vsi_cfg_def()
2348 ret = ice_vsi_alloc_rings(vsi); in ice_vsi_cfg_def()
2352 ret = ice_vsi_alloc_ring_stats(vsi); in ice_vsi_cfg_def()
2356 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2363 ice_vsi_cfg_rss_lut_key(vsi); in ice_vsi_cfg_def()
2364 ice_vsi_set_vf_rss_flow_fld(vsi); in ice_vsi_cfg_def()
2368 ret = ice_vsi_alloc_rings(vsi); in ice_vsi_cfg_def()
2372 ret = ice_vsi_alloc_ring_stats(vsi); in ice_vsi_cfg_def()
2388 ice_vsi_free_q_vectors(vsi); in ice_vsi_cfg_def()
2390 ice_vsi_delete_from_hw(vsi); in ice_vsi_cfg_def()
2392 ice_vsi_put_qs(vsi); in ice_vsi_cfg_def()
2394 ice_vsi_free_stats(vsi); in ice_vsi_cfg_def()
2396 ice_vsi_free_arrays(vsi); in ice_vsi_cfg_def()
2404 int ice_vsi_cfg(struct ice_vsi *vsi) in ice_vsi_cfg() argument
2406 struct ice_pf *pf = vsi->back; in ice_vsi_cfg()
2409 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_cfg()
2412 ret = ice_vsi_cfg_def(vsi); in ice_vsi_cfg()
2416 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); in ice_vsi_cfg()
2418 ice_vsi_decfg(vsi); in ice_vsi_cfg()
2420 if (vsi->type == ICE_VSI_CTRL) { in ice_vsi_cfg()
2421 if (vsi->vf) { in ice_vsi_cfg()
2422 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2423 vsi->vf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2426 pf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2437 void ice_vsi_decfg(struct ice_vsi *vsi) in ice_vsi_decfg() argument
2439 struct ice_pf *pf = vsi->back; in ice_vsi_decfg()
2442 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2443 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2446 vsi->vsi_num, err); in ice_vsi_decfg()
2448 if (vsi->xdp_rings) in ice_vsi_decfg()
2452 ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); in ice_vsi_decfg()
2454 ice_vsi_clear_rings(vsi); in ice_vsi_decfg()
2455 ice_vsi_free_q_vectors(vsi); in ice_vsi_decfg()
2456 ice_vsi_put_qs(vsi); in ice_vsi_decfg()
2457 ice_vsi_free_arrays(vsi); in ice_vsi_decfg()
2465 if (vsi->type == ICE_VSI_VF && in ice_vsi_decfg()
2466 vsi->agg_node && vsi->agg_node->valid) in ice_vsi_decfg()
2467 vsi->agg_node->num_vsis--; in ice_vsi_decfg()
2484 struct ice_vsi *vsi; in ice_vsi_setup() local
2494 vsi = ice_vsi_alloc(pf); in ice_vsi_setup()
2495 if (!vsi) { in ice_vsi_setup()
2500 vsi->params = *params; in ice_vsi_setup()
2501 ret = ice_vsi_cfg(vsi); in ice_vsi_setup()
2514 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { in ice_vsi_setup()
2515 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, in ice_vsi_setup()
2517 ice_cfg_sw_lldp(vsi, true, true); in ice_vsi_setup()
2520 if (!vsi->agg_node) in ice_vsi_setup()
2521 ice_set_agg_vsi(vsi); in ice_vsi_setup()
2523 return vsi; in ice_vsi_setup()
2526 ice_vsi_free(vsi); in ice_vsi_setup()
2535 static void ice_vsi_release_msix(struct ice_vsi *vsi) in ice_vsi_release_msix() argument
2537 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2543 ice_for_each_q_vector(vsi, i) { in ice_vsi_release_msix()
2544 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_release_msix()
2549 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); in ice_vsi_release_msix()
2550 if (vsi->xdp_rings) { in ice_vsi_release_msix()
2551 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_vsi_release_msix()
2553 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); in ice_vsi_release_msix()
2560 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); in ice_vsi_release_msix()
2572 void ice_vsi_free_irq(struct ice_vsi *vsi) in ice_vsi_free_irq() argument
2574 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2577 if (!vsi->q_vectors || !vsi->irqs_ready) in ice_vsi_free_irq()
2580 ice_vsi_release_msix(vsi); in ice_vsi_free_irq()
2581 if (vsi->type == ICE_VSI_VF) in ice_vsi_free_irq()
2584 vsi->irqs_ready = false; in ice_vsi_free_irq()
2585 ice_free_cpu_rx_rmap(vsi); in ice_vsi_free_irq()
2587 ice_for_each_q_vector(vsi, i) { in ice_vsi_free_irq()
2590 irq_num = vsi->q_vectors[i]->irq.virq; in ice_vsi_free_irq()
2593 if (!vsi->q_vectors[i] || in ice_vsi_free_irq()
2594 !(vsi->q_vectors[i]->num_ring_tx || in ice_vsi_free_irq()
2595 vsi->q_vectors[i]->num_ring_rx)) in ice_vsi_free_irq()
2605 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); in ice_vsi_free_irq()
2613 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) in ice_vsi_free_tx_rings() argument
2617 if (!vsi->tx_rings) in ice_vsi_free_tx_rings()
2620 ice_for_each_txq(vsi, i) in ice_vsi_free_tx_rings()
2621 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_vsi_free_tx_rings()
2622 ice_free_tx_ring(vsi->tx_rings[i]); in ice_vsi_free_tx_rings()
2629 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) in ice_vsi_free_rx_rings() argument
2633 if (!vsi->rx_rings) in ice_vsi_free_rx_rings()
2636 ice_for_each_rxq(vsi, i) in ice_vsi_free_rx_rings()
2637 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in ice_vsi_free_rx_rings()
2638 ice_free_rx_ring(vsi->rx_rings[i]); in ice_vsi_free_rx_rings()
2645 void ice_vsi_close(struct ice_vsi *vsi) in ice_vsi_close() argument
2647 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_vsi_close()
2648 ice_down(vsi); in ice_vsi_close()
2650 ice_vsi_clear_napi_queues(vsi); in ice_vsi_close()
2651 ice_vsi_free_irq(vsi); in ice_vsi_close()
2652 ice_vsi_free_tx_rings(vsi); in ice_vsi_close()
2653 ice_vsi_free_rx_rings(vsi); in ice_vsi_close()
2661 int ice_ena_vsi(struct ice_vsi *vsi, bool locked) in ice_ena_vsi() argument
2665 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) in ice_ena_vsi()
2668 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_ena_vsi()
2670 if (vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_ena_vsi()
2671 vsi->type == ICE_VSI_SF)) { in ice_ena_vsi()
2672 if (netif_running(vsi->netdev)) { in ice_ena_vsi()
2676 err = ice_open_internal(vsi->netdev); in ice_ena_vsi()
2681 } else if (vsi->type == ICE_VSI_CTRL) { in ice_ena_vsi()
2682 err = ice_vsi_open_ctrl(vsi); in ice_ena_vsi()
2693 void ice_dis_vsi(struct ice_vsi *vsi, bool locked) in ice_dis_vsi() argument
2695 bool already_down = test_bit(ICE_VSI_DOWN, vsi->state); in ice_dis_vsi()
2697 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_dis_vsi()
2699 if (vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_dis_vsi()
2700 vsi->type == ICE_VSI_SF)) { in ice_dis_vsi()
2701 if (netif_running(vsi->netdev)) { in ice_dis_vsi()
2704 already_down = test_bit(ICE_VSI_DOWN, vsi->state); in ice_dis_vsi()
2706 ice_vsi_close(vsi); in ice_dis_vsi()
2711 ice_vsi_close(vsi); in ice_dis_vsi()
2713 } else if (vsi->type == ICE_VSI_CTRL && !already_down) { in ice_dis_vsi()
2714 ice_vsi_close(vsi); in ice_dis_vsi()
2725 void ice_vsi_set_napi_queues(struct ice_vsi *vsi) in ice_vsi_set_napi_queues() argument
2727 struct net_device *netdev = vsi->netdev; in ice_vsi_set_napi_queues()
2733 ice_for_each_rxq(vsi, q_idx) in ice_vsi_set_napi_queues()
2735 &vsi->rx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues()
2737 ice_for_each_txq(vsi, q_idx) in ice_vsi_set_napi_queues()
2739 &vsi->tx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues()
2741 ice_for_each_q_vector(vsi, v_idx) { in ice_vsi_set_napi_queues()
2742 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_vsi_set_napi_queues()
2755 void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) in ice_vsi_clear_napi_queues() argument
2757 struct net_device *netdev = vsi->netdev; in ice_vsi_clear_napi_queues()
2763 ice_for_each_txq(vsi, q_idx) in ice_vsi_clear_napi_queues()
2766 ice_for_each_rxq(vsi, q_idx) in ice_vsi_clear_napi_queues()
2778 void ice_napi_add(struct ice_vsi *vsi) in ice_napi_add() argument
2782 if (!vsi->netdev) in ice_napi_add()
2785 ice_for_each_q_vector(vsi, v_idx) in ice_napi_add()
2786 netif_napi_add_config(vsi->netdev, in ice_napi_add()
2787 &vsi->q_vectors[v_idx]->napi, in ice_napi_add()
2798 int ice_vsi_release(struct ice_vsi *vsi) in ice_vsi_release() argument
2802 if (!vsi->back) in ice_vsi_release()
2804 pf = vsi->back; in ice_vsi_release()
2807 ice_rss_clean(vsi); in ice_vsi_release()
2809 ice_vsi_close(vsi); in ice_vsi_release()
2814 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && in ice_vsi_release()
2816 ice_cfg_sw_lldp(vsi, false, false); in ice_vsi_release()
2818 ice_vsi_decfg(vsi); in ice_vsi_release()
2825 ice_vsi_delete(vsi); in ice_vsi_release()
2838 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, in ice_vsi_rebuild_get_coalesce() argument
2843 ice_for_each_q_vector(vsi, i) { in ice_vsi_rebuild_get_coalesce()
2844 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_rebuild_get_coalesce()
2850 if (i < vsi->num_txq) in ice_vsi_rebuild_get_coalesce()
2852 if (i < vsi->num_rxq) in ice_vsi_rebuild_get_coalesce()
2856 return vsi->num_q_vectors; in ice_vsi_rebuild_get_coalesce()
2870 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, in ice_vsi_rebuild_set_coalesce() argument
2876 if ((size && !coalesce) || !vsi) in ice_vsi_rebuild_set_coalesce()
2885 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
2901 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { in ice_vsi_rebuild_set_coalesce()
2902 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2905 } else if (i < vsi->alloc_rxq) { in ice_vsi_rebuild_set_coalesce()
2906 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2911 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { in ice_vsi_rebuild_set_coalesce()
2912 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2915 } else if (i < vsi->alloc_txq) { in ice_vsi_rebuild_set_coalesce()
2916 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2921 vsi->q_vectors[i]->intrl = coalesce[i].intrl; in ice_vsi_rebuild_set_coalesce()
2922 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
2928 for (; i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
2930 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2935 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2939 vsi->q_vectors[i]->intrl = coalesce[0].intrl; in ice_vsi_rebuild_set_coalesce()
2940 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
2949 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) in ice_vsi_realloc_stat_arrays() argument
2951 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
2952 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
2956 struct ice_pf *pf = vsi->back; in ice_vsi_realloc_stat_arrays()
2957 u16 prev_txq = vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
2958 u16 prev_rxq = vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
2961 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_realloc_stat_arrays()
3014 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) in ice_vsi_rebuild() argument
3021 if (!vsi) in ice_vsi_rebuild()
3024 vsi->flags = vsi_flags; in ice_vsi_rebuild()
3025 pf = vsi->back; in ice_vsi_rebuild()
3026 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_rebuild()
3029 mutex_lock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3031 ret = ice_vsi_realloc_stat_arrays(vsi); in ice_vsi_rebuild()
3035 ice_vsi_decfg(vsi); in ice_vsi_rebuild()
3036 ret = ice_vsi_cfg_def(vsi); in ice_vsi_rebuild()
3040 coalesce = kcalloc(vsi->num_q_vectors, in ice_vsi_rebuild()
3047 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); in ice_vsi_rebuild()
3049 ret = ice_vsi_cfg_tc_lan(pf, vsi); in ice_vsi_rebuild()
3060 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); in ice_vsi_rebuild()
3061 clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); in ice_vsi_rebuild()
3067 ice_vsi_decfg(vsi); in ice_vsi_rebuild()
3069 mutex_unlock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3118 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) in ice_vsi_update_q_map() argument
3120 vsi->info.mapping_flags = ctx->info.mapping_flags; in ice_vsi_update_q_map()
3121 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, in ice_vsi_update_q_map()
3122 sizeof(vsi->info.q_mapping)); in ice_vsi_update_q_map()
3123 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, in ice_vsi_update_q_map()
3124 sizeof(vsi->info.tc_mapping)); in ice_vsi_update_q_map()
3132 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) in ice_vsi_cfg_netdev_tc() argument
3134 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc()
3135 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
3136 int numtc = vsi->tc_cfg.numtc; in ice_vsi_cfg_netdev_tc()
3145 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_cfg_netdev_tc()
3153 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) in ice_vsi_cfg_netdev_tc()
3154 numtc = vsi->all_numtc; in ice_vsi_cfg_netdev_tc()
3162 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc()
3164 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc()
3165 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc()
3166 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc()
3169 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_cfg_netdev_tc()
3171 if (!vsi->mqprio_qopt.qopt.count[i]) in ice_vsi_cfg_netdev_tc()
3174 vsi->mqprio_qopt.qopt.count[i], in ice_vsi_cfg_netdev_tc()
3175 vsi->mqprio_qopt.qopt.offset[i]); in ice_vsi_cfg_netdev_tc()
3185 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc()
3199 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, in ice_vsi_setup_q_map_mqprio() argument
3203 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; in ice_vsi_setup_q_map_mqprio()
3204 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; in ice_vsi_setup_q_map_mqprio()
3209 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; in ice_vsi_setup_q_map_mqprio()
3216 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map_mqprio()
3218 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map_mqprio()
3219 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map_mqprio()
3220 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map_mqprio()
3221 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map_mqprio()
3226 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3227 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3228 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3229 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map_mqprio()
3230 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; in ice_vsi_setup_q_map_mqprio()
3231 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; in ice_vsi_setup_q_map_mqprio()
3232 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map_mqprio()
3235 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map_mqprio()
3237 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_setup_q_map_mqprio()
3239 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3240 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3241 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3246 if (new_txq > vsi->alloc_txq) { in ice_vsi_setup_q_map_mqprio()
3247 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3248 new_txq, vsi->alloc_txq); in ice_vsi_setup_q_map_mqprio()
3253 if (new_rxq > vsi->alloc_rxq) { in ice_vsi_setup_q_map_mqprio()
3254 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3255 new_rxq, vsi->alloc_rxq); in ice_vsi_setup_q_map_mqprio()
3260 vsi->num_txq = new_txq; in ice_vsi_setup_q_map_mqprio()
3261 vsi->num_rxq = new_rxq; in ice_vsi_setup_q_map_mqprio()
3265 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map_mqprio()
3271 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { in ice_vsi_setup_q_map_mqprio()
3272 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3273 vsi->next_base_q = tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3275 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); in ice_vsi_setup_q_map_mqprio()
3276 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); in ice_vsi_setup_q_map_mqprio()
3277 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", in ice_vsi_setup_q_map_mqprio()
3278 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); in ice_vsi_setup_q_map_mqprio()
3290 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) in ice_vsi_cfg_tc() argument
3293 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3301 if (vsi->tc_cfg.ena_tc == ena_tc && in ice_vsi_cfg_tc()
3302 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in ice_vsi_cfg_tc()
3310 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc()
3314 if (vsi->type == ICE_VSI_CHNL && in ice_vsi_cfg_tc()
3316 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc()
3319 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); in ice_vsi_cfg_tc()
3320 vsi->tc_cfg.ena_tc = ena_tc; in ice_vsi_cfg_tc()
3321 vsi->tc_cfg.numtc = num_tc; in ice_vsi_cfg_tc()
3328 ctx->info = vsi->info; in ice_vsi_cfg_tc()
3330 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3332 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); in ice_vsi_cfg_tc()
3334 ret = ice_vsi_setup_q_map(vsi, ctx); in ice_vsi_cfg_tc()
3337 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); in ice_vsi_cfg_tc()
3343 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); in ice_vsi_cfg_tc()
3349 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3351 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); in ice_vsi_cfg_tc()
3353 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, in ice_vsi_cfg_tc()
3354 vsi->tc_cfg.ena_tc, max_txqs); in ice_vsi_cfg_tc()
3358 vsi->vsi_num, ret); in ice_vsi_cfg_tc()
3361 ice_vsi_update_q_map(vsi, ctx); in ice_vsi_cfg_tc()
3362 vsi->info.valid_sections = 0; in ice_vsi_cfg_tc()
3364 ice_vsi_cfg_netdev_tc(vsi, ena_tc); in ice_vsi_cfg_tc()
3431 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) in ice_is_vsi_dflt_vsi() argument
3433 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); in ice_is_vsi_dflt_vsi()
3446 int ice_set_dflt_vsi(struct ice_vsi *vsi) in ice_set_dflt_vsi() argument
3451 if (!vsi) in ice_set_dflt_vsi()
3454 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3456 if (ice_lag_is_switchdev_running(vsi->back)) { in ice_set_dflt_vsi()
3458 vsi->vsi_num); in ice_set_dflt_vsi()
3463 if (ice_is_vsi_dflt_vsi(vsi)) { in ice_set_dflt_vsi()
3465 vsi->vsi_num); in ice_set_dflt_vsi()
3469 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); in ice_set_dflt_vsi()
3472 vsi->vsi_num, status); in ice_set_dflt_vsi()
3487 int ice_clear_dflt_vsi(struct ice_vsi *vsi) in ice_clear_dflt_vsi() argument
3492 if (!vsi) in ice_clear_dflt_vsi()
3495 dev = ice_pf_to_dev(vsi->back); in ice_clear_dflt_vsi()
3498 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) in ice_clear_dflt_vsi()
3501 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, in ice_clear_dflt_vsi()
3505 vsi->vsi_num, status); in ice_clear_dflt_vsi()
3518 int ice_get_link_speed_mbps(struct ice_vsi *vsi) in ice_get_link_speed_mbps() argument
3522 link_speed = vsi->port_info->phy.link_info.link_speed; in ice_get_link_speed_mbps()
3533 int ice_get_link_speed_kbps(struct ice_vsi *vsi) in ice_get_link_speed_kbps() argument
3537 speed_mbps = ice_get_link_speed_mbps(vsi); in ice_get_link_speed_kbps()
3551 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) in ice_set_min_bw_limit() argument
3553 struct ice_pf *pf = vsi->back; in ice_set_min_bw_limit()
3559 if (!vsi->port_info) { in ice_set_min_bw_limit()
3561 vsi->idx, vsi->type); in ice_set_min_bw_limit()
3565 speed = ice_get_link_speed_kbps(vsi); in ice_set_min_bw_limit()
3568 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_min_bw_limit()
3575 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_min_bw_limit()
3579 min_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_min_bw_limit()
3580 vsi->idx); in ice_set_min_bw_limit()
3585 min_tx_rate, ice_vsi_type_str(vsi->type)); in ice_set_min_bw_limit()
3587 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_min_bw_limit()
3588 vsi->idx, 0, in ice_set_min_bw_limit()
3592 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3597 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3612 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) in ice_set_max_bw_limit() argument
3614 struct ice_pf *pf = vsi->back; in ice_set_max_bw_limit()
3620 if (!vsi->port_info) { in ice_set_max_bw_limit()
3622 vsi->idx, vsi->type); in ice_set_max_bw_limit()
3626 speed = ice_get_link_speed_kbps(vsi); in ice_set_max_bw_limit()
3629 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_max_bw_limit()
3636 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_max_bw_limit()
3640 max_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_max_bw_limit()
3641 vsi->idx); in ice_set_max_bw_limit()
3646 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3648 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_max_bw_limit()
3649 vsi->idx, 0, in ice_set_max_bw_limit()
3653 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3658 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3669 int ice_set_link(struct ice_vsi *vsi, bool ena) in ice_set_link() argument
3671 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3672 struct ice_port_info *pi = vsi->port_info; in ice_set_link()
3676 if (vsi->type != ICE_VSI_PF) in ice_set_link()
3718 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi) in ice_vsi_add_vlan_zero() argument
3720 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vsi_add_vlan_zero()
3725 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3730 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_add_vlan_zero()
3734 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3748 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) in ice_vsi_del_vlan_zero() argument
3750 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vsi_del_vlan_zero()
3755 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3760 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_del_vlan_zero()
3764 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3771 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vsi_del_vlan_zero()
3782 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) in ice_vsi_num_zero_vlans() argument
3787 if (vsi->type == ICE_VSI_VF) { in ice_vsi_num_zero_vlans()
3788 if (WARN_ON(!vsi->vf)) in ice_vsi_num_zero_vlans()
3791 if (ice_vf_is_port_vlan_ena(vsi->vf)) in ice_vsi_num_zero_vlans()
3795 if (ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_num_zero_vlans()
3805 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) in ice_vsi_has_non_zero_vlans() argument
3807 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); in ice_vsi_has_non_zero_vlans()
3814 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi) in ice_vsi_num_non_zero_vlans() argument
3816 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); in ice_vsi_num_non_zero_vlans()
3902 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) in ice_vsi_update_security() argument
3906 ctx.info = vsi->info; in ice_vsi_update_security()
3910 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_security()
3913 vsi->info = ctx.info; in ice_vsi_update_security()
3945 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) in ice_vsi_update_local_lb() argument
3948 .info = vsi->info, in ice_vsi_update_local_lb()
3957 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_local_lb()
3960 vsi->info = ctx.info; in ice_vsi_update_local_lb()