Lines Matching +full:container +full:- +full:rules
1 // SPDX-License-Identifier: GPL-2.0
14 * ice_vsi_type_str - maps VSI type enum to string equivalents
38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
55 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
75 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
79 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_arrays()
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
84 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
85 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
86 return -ENOMEM; in ice_vsi_alloc_arrays()
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
89 sizeof(*vsi->rx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
90 if (!vsi->rx_rings) in ice_vsi_alloc_arrays()
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, in ice_vsi_alloc_arrays()
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), in ice_vsi_alloc_arrays()
100 sizeof(*vsi->txq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
102 if (!vsi->txq_map) in ice_vsi_alloc_arrays()
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
106 sizeof(*vsi->rxq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
107 if (!vsi->rxq_map) in ice_vsi_alloc_arrays()
111 if (vsi->type == ICE_VSI_LB) in ice_vsi_alloc_arrays()
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, in ice_vsi_alloc_arrays()
116 sizeof(*vsi->q_vectors), GFP_KERNEL); in ice_vsi_alloc_arrays()
117 if (!vsi->q_vectors) in ice_vsi_alloc_arrays()
123 devm_kfree(dev, vsi->rxq_map); in ice_vsi_alloc_arrays()
125 devm_kfree(dev, vsi->txq_map); in ice_vsi_alloc_arrays()
127 devm_kfree(dev, vsi->rx_rings); in ice_vsi_alloc_arrays()
129 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
130 return -ENOMEM; in ice_vsi_alloc_arrays()
134 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
139 switch (vsi->type) { in ice_vsi_set_num_desc()
145 * ethtool -G so we should keep those values instead of in ice_vsi_set_num_desc()
148 if (!vsi->num_rx_desc) in ice_vsi_set_num_desc()
149 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; in ice_vsi_set_num_desc()
150 if (!vsi->num_tx_desc) in ice_vsi_set_num_desc()
151 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; in ice_vsi_set_num_desc()
154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
155 vsi->type); in ice_vsi_set_num_desc()
171 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
178 enum ice_vsi_type vsi_type = vsi->type; in ice_vsi_set_num_qs()
179 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
180 struct ice_vf *vf = vsi->vf; in ice_vsi_set_num_qs()
187 if (vsi->req_txq) { in ice_vsi_set_num_qs()
188 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
189 vsi->num_txq = vsi->req_txq; in ice_vsi_set_num_qs()
191 vsi->alloc_txq = ice_get_txq_count(pf); in ice_vsi_set_num_qs()
194 pf->num_lan_tx = vsi->alloc_txq; in ice_vsi_set_num_qs()
197 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_num_qs()
198 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
200 if (vsi->req_rxq) { in ice_vsi_set_num_qs()
201 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
202 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
204 vsi->alloc_rxq = ice_get_rxq_count(pf); in ice_vsi_set_num_qs()
208 pf->num_lan_rx = vsi->alloc_rxq; in ice_vsi_set_num_qs()
210 vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq); in ice_vsi_set_num_qs()
213 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
214 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
215 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
216 vsi->irq_dyn_alloc = true; in ice_vsi_set_num_qs()
219 if (vf->num_req_qs) in ice_vsi_set_num_qs()
220 vf->num_vf_qs = vf->num_req_qs; in ice_vsi_set_num_qs()
221 vsi->alloc_txq = vf->num_vf_qs; in ice_vsi_set_num_qs()
222 vsi->alloc_rxq = vf->num_vf_qs; in ice_vsi_set_num_qs()
223 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + in ice_vsi_set_num_qs()
224 * data queue interrupts). Since vsi->num_q_vectors is number in ice_vsi_set_num_qs()
228 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; in ice_vsi_set_num_qs()
231 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
232 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
233 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
236 vsi->alloc_txq = 0; in ice_vsi_set_num_qs()
237 vsi->alloc_rxq = 0; in ice_vsi_set_num_qs()
240 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
241 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
252 * ice_get_free_slot - get the next non-NULL location index in array
265 if (curr < (size - 1) && !tmp_array[curr + 1]) { in ice_get_free_slot()
281 * ice_vsi_delete_from_hw - delete a VSI from the switch
286 struct ice_pf *pf = vsi->back; in ice_vsi_delete_from_hw()
295 if (vsi->type == ICE_VSI_VF) in ice_vsi_delete_from_hw()
296 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw()
297 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw()
299 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw()
301 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw()
303 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", in ice_vsi_delete_from_hw()
304 vsi->vsi_num, status); in ice_vsi_delete_from_hw()
310 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
315 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
321 devm_kfree(dev, vsi->q_vectors); in ice_vsi_free_arrays()
322 vsi->q_vectors = NULL; in ice_vsi_free_arrays()
323 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays()
324 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
325 devm_kfree(dev, vsi->rx_rings); in ice_vsi_free_arrays()
326 vsi->rx_rings = NULL; in ice_vsi_free_arrays()
327 devm_kfree(dev, vsi->txq_map); in ice_vsi_free_arrays()
328 vsi->txq_map = NULL; in ice_vsi_free_arrays()
329 devm_kfree(dev, vsi->rxq_map); in ice_vsi_free_arrays()
330 vsi->rxq_map = NULL; in ice_vsi_free_arrays()
334 * ice_vsi_free_stats - Free the ring statistics structures
340 struct ice_pf *pf = vsi->back; in ice_vsi_free_stats()
343 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_free_stats()
345 if (!pf->vsi_stats) in ice_vsi_free_stats()
348 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_free_stats()
353 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_free_stats()
354 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_free_stats()
355 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_free_stats()
360 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_free_stats()
361 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_free_stats()
362 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_free_stats()
366 kfree(vsi_stat->tx_ring_stats); in ice_vsi_free_stats()
367 kfree(vsi_stat->rx_ring_stats); in ice_vsi_free_stats()
369 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_free_stats()
373 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
381 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_ring_stats()
384 vsi_stats = pf->vsi_stats[vsi->idx]; in ice_vsi_alloc_ring_stats()
385 tx_ring_stats = vsi_stats->tx_ring_stats; in ice_vsi_alloc_ring_stats()
386 rx_ring_stats = vsi_stats->rx_ring_stats; in ice_vsi_alloc_ring_stats()
393 ring = vsi->tx_rings[i]; in ice_vsi_alloc_ring_stats()
404 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
412 ring = vsi->rx_rings[i]; in ice_vsi_alloc_ring_stats()
423 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
430 return -ENOMEM; in ice_vsi_alloc_ring_stats()
434 * ice_vsi_free - clean up and deallocate the provided VSI
445 if (!vsi || !vsi->back) in ice_vsi_free()
448 pf = vsi->back; in ice_vsi_free()
451 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { in ice_vsi_free()
452 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); in ice_vsi_free()
456 mutex_lock(&pf->sw_mutex); in ice_vsi_free()
459 pf->vsi[vsi->idx] = NULL; in ice_vsi_free()
460 pf->next_vsi = vsi->idx; in ice_vsi_free()
464 mutex_destroy(&vsi->xdp_state_lock); in ice_vsi_free()
465 mutex_unlock(&pf->sw_mutex); in ice_vsi_free()
476 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
484 if (!q_vector->tx.tx_ring) in ice_msix_clean_ctrl_vsi()
488 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); in ice_msix_clean_ctrl_vsi()
489 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); in ice_msix_clean_ctrl_vsi()
495 * ice_msix_clean_rings - MSIX mode Interrupt Handler
503 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_msix_clean_rings()
506 q_vector->total_events++; in ice_msix_clean_rings()
508 napi_schedule(&q_vector->napi); in ice_msix_clean_rings()
514 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
520 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_stat_arrays()
522 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_stat_arrays()
524 if (!pf->vsi_stats) in ice_vsi_alloc_stat_arrays()
525 return -ENOENT; in ice_vsi_alloc_stat_arrays()
527 if (pf->vsi_stats[vsi->idx]) in ice_vsi_alloc_stat_arrays()
533 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
535 vsi_stat->tx_ring_stats = in ice_vsi_alloc_stat_arrays()
536 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_alloc_stat_arrays()
538 if (!vsi_stat->tx_ring_stats) in ice_vsi_alloc_stat_arrays()
541 vsi_stat->rx_ring_stats = in ice_vsi_alloc_stat_arrays()
542 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_alloc_stat_arrays()
544 if (!vsi_stat->rx_ring_stats) in ice_vsi_alloc_stat_arrays()
547 pf->vsi_stats[vsi->idx] = vsi_stat; in ice_vsi_alloc_stat_arrays()
552 kfree(vsi_stat->rx_ring_stats); in ice_vsi_alloc_stat_arrays()
554 kfree(vsi_stat->tx_ring_stats); in ice_vsi_alloc_stat_arrays()
556 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_alloc_stat_arrays()
557 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
561 * ice_vsi_alloc_def - set default values for already allocated VSI
568 if (vsi->type != ICE_VSI_CHNL) { in ice_vsi_alloc_def()
571 return -ENOMEM; in ice_vsi_alloc_def()
574 vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); in ice_vsi_alloc_def()
576 switch (vsi->type) { in ice_vsi_alloc_def()
580 vsi->irq_handler = ice_msix_clean_rings; in ice_vsi_alloc_def()
584 vsi->irq_handler = ice_msix_clean_ctrl_vsi; in ice_vsi_alloc_def()
588 return -EINVAL; in ice_vsi_alloc_def()
590 vsi->num_rxq = ch->num_rxq; in ice_vsi_alloc_def()
591 vsi->num_txq = ch->num_txq; in ice_vsi_alloc_def()
592 vsi->next_base_q = ch->base_q; in ice_vsi_alloc_def()
599 return -EINVAL; in ice_vsi_alloc_def()
606 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
621 mutex_lock(&pf->sw_mutex); in ice_vsi_alloc()
624 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index in ice_vsi_alloc()
627 if (pf->next_vsi == ICE_NO_VSI) { in ice_vsi_alloc()
636 vsi->back = pf; in ice_vsi_alloc()
637 set_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_alloc()
640 vsi->idx = pf->next_vsi; in ice_vsi_alloc()
641 pf->vsi[pf->next_vsi] = vsi; in ice_vsi_alloc()
643 /* prepare pf->next_vsi for next use */ in ice_vsi_alloc()
644 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, in ice_vsi_alloc()
645 pf->next_vsi); in ice_vsi_alloc()
647 mutex_init(&vsi->xdp_state_lock); in ice_vsi_alloc()
650 mutex_unlock(&pf->sw_mutex); in ice_vsi_alloc()
655 * ice_alloc_fd_res - Allocate FD resource for a VSI
660 * Returns 0 on success, -EPERM on no-op or -EIO on failure
664 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
671 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_alloc_fd_res()
672 return -EPERM; in ice_alloc_fd_res()
674 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || in ice_alloc_fd_res()
675 vsi->type == ICE_VSI_CHNL)) in ice_alloc_fd_res()
676 return -EPERM; in ice_alloc_fd_res()
679 g_val = pf->hw.func_caps.fd_fltr_guar; in ice_alloc_fd_res()
681 return -EPERM; in ice_alloc_fd_res()
684 b_val = pf->hw.func_caps.fd_fltr_best_effort; in ice_alloc_fd_res()
686 return -EPERM; in ice_alloc_fd_res()
696 if (vsi->type == ICE_VSI_PF) { in ice_alloc_fd_res()
697 vsi->num_gfltr = g_val; in ice_alloc_fd_res()
701 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_alloc_fd_res()
703 return -EPERM; in ice_alloc_fd_res()
705 vsi->num_gfltr = ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
709 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
710 } else if (vsi->type == ICE_VSI_VF) { in ice_alloc_fd_res()
711 vsi->num_gfltr = 0; in ice_alloc_fd_res()
714 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
721 return -EPERM; in ice_alloc_fd_res()
723 if (!main_vsi->all_numtc) in ice_alloc_fd_res()
724 return -EINVAL; in ice_alloc_fd_res()
727 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; in ice_alloc_fd_res()
733 return -EPERM; in ice_alloc_fd_res()
735 g_val -= ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
737 vsi->num_gfltr = g_val / numtc; in ice_alloc_fd_res()
740 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
747 * ice_vsi_get_qs - Assign queues from PF to VSI
754 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
756 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
757 .pf_map = pf->avail_txqs, in ice_vsi_get_qs()
758 .pf_map_size = pf->max_pf_txqs, in ice_vsi_get_qs()
759 .q_count = vsi->alloc_txq, in ice_vsi_get_qs()
761 .vsi_map = vsi->txq_map, in ice_vsi_get_qs()
766 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
767 .pf_map = pf->avail_rxqs, in ice_vsi_get_qs()
768 .pf_map_size = pf->max_pf_rxqs, in ice_vsi_get_qs()
769 .q_count = vsi->alloc_rxq, in ice_vsi_get_qs()
771 .vsi_map = vsi->rxq_map, in ice_vsi_get_qs()
777 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_get_qs()
783 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
788 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
794 * ice_vsi_put_qs - Release queues from VSI to PF
799 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
802 mutex_lock(&pf->avail_q_mutex); in ice_vsi_put_qs()
805 clear_bit(vsi->txq_map[i], pf->avail_txqs); in ice_vsi_put_qs()
806 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
810 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); in ice_vsi_put_qs()
811 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
814 mutex_unlock(&pf->avail_q_mutex); in ice_vsi_put_qs()
825 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_is_safe_mode()
842 return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool; in ice_is_rdma_ena()
846 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
854 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
860 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); in ice_vsi_clean_rss_flow_fld()
863 vsi->vsi_num, status); in ice_vsi_clean_rss_flow_fld()
867 * ice_rss_clean - Delete RSS related VSI structures and configuration
872 struct ice_pf *pf = vsi->back; in ice_rss_clean()
877 devm_kfree(dev, vsi->rss_hkey_user); in ice_rss_clean()
878 devm_kfree(dev, vsi->rss_lut_user); in ice_rss_clean()
883 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); in ice_rss_clean()
887 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
893 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
896 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_rss_params()
897 vsi->rss_size = 1; in ice_vsi_set_rss_params()
901 cap = &pf->hw.func_caps.common_cap; in ice_vsi_set_rss_params()
902 max_rss_size = BIT(cap->rss_table_entry_width); in ice_vsi_set_rss_params()
903 switch (vsi->type) { in ice_vsi_set_rss_params()
907 vsi->rss_table_size = (u16)cap->rss_table_size; in ice_vsi_set_rss_params()
908 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_set_rss_params()
909 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); in ice_vsi_set_rss_params()
911 vsi->rss_size = min_t(u16, num_online_cpus(), in ice_vsi_set_rss_params()
913 vsi->rss_lut_type = ICE_LUT_PF; in ice_vsi_set_rss_params()
916 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
917 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); in ice_vsi_set_rss_params()
918 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
924 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
925 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; in ice_vsi_set_rss_params()
926 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
932 ice_vsi_type_str(vsi->type)); in ice_vsi_set_rss_params()
938 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
948 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx()
950 ctxt->alloc_from_pool = true; in ice_set_dflt_vsi_ctx()
952 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; in ice_set_dflt_vsi_ctx()
954 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; in ice_set_dflt_vsi_ctx()
956 ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, in ice_set_dflt_vsi_ctx()
958 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which in ice_set_dflt_vsi_ctx()
961 * DVM - leave inner VLAN in packet by default in ice_set_dflt_vsi_ctx()
964 ctxt->info.inner_vlan_flags |= in ice_set_dflt_vsi_ctx()
967 ctxt->info.outer_vlan_flags = in ice_set_dflt_vsi_ctx()
970 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
973 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
986 ctxt->info.ingress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
987 ctxt->info.egress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
989 ctxt->info.outer_up_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
994 * ice_vsi_setup_q_map - Setup a VSI queue map
1002 u16 qcount_tx = vsi->alloc_txq; in ice_vsi_setup_q_map()
1003 u16 qcount_rx = vsi->alloc_rxq; in ice_vsi_setup_q_map()
1007 if (!vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map()
1009 vsi->tc_cfg.numtc = 1; in ice_vsi_setup_q_map()
1010 vsi->tc_cfg.ena_tc = 1; in ice_vsi_setup_q_map()
1013 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); in ice_vsi_setup_q_map()
1016 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; in ice_vsi_setup_q_map()
1020 /* find the (rounded up) power-of-2 of qcount */ in ice_vsi_setup_q_map()
1026 * queues allocated to TC0. No:of queues is a power-of-2. in ice_vsi_setup_q_map()
1035 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map()
1037 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map()
1038 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map()
1039 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map()
1040 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map()
1041 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map()
1046 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map()
1047 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; in ice_vsi_setup_q_map()
1048 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; in ice_vsi_setup_q_map()
1049 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map()
1055 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map()
1058 /* if offset is non-zero, means it is calculated correctly based on in ice_vsi_setup_q_map()
1060 * be correct and non-zero because it is based off - VSI's in ice_vsi_setup_q_map()
1069 if (rx_count > vsi->alloc_rxq) { in ice_vsi_setup_q_map()
1070 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1071 rx_count, vsi->alloc_rxq); in ice_vsi_setup_q_map()
1072 return -EINVAL; in ice_vsi_setup_q_map()
1075 if (tx_count > vsi->alloc_txq) { in ice_vsi_setup_q_map()
1076 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1077 tx_count, vsi->alloc_txq); in ice_vsi_setup_q_map()
1078 return -EINVAL; in ice_vsi_setup_q_map()
1081 vsi->num_txq = tx_count; in ice_vsi_setup_q_map()
1082 vsi->num_rxq = rx_count; in ice_vsi_setup_q_map()
1084 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map()
1085 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
1089 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map()
1093 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_vsi_setup_q_map()
1098 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map()
1099 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map()
1105 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1114 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && in ice_set_fd_vsi_ctx()
1115 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) in ice_set_fd_vsi_ctx()
1119 ctxt->info.valid_sections |= cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1127 ctxt->info.fd_options = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1129 ctxt->info.max_fd_fltr_dedicated = in ice_set_fd_vsi_ctx()
1130 cpu_to_le16(vsi->num_gfltr); in ice_set_fd_vsi_ctx()
1132 ctxt->info.max_fd_fltr_shared = in ice_set_fd_vsi_ctx()
1133 cpu_to_le16(vsi->num_bfltr); in ice_set_fd_vsi_ctx()
1138 ctxt->info.fd_def_q = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1143 ctxt->info.fd_report_opt = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1147 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1157 pf = vsi->back; in ice_set_rss_vsi_ctx()
1160 switch (vsi->type) { in ice_set_rss_vsi_ctx()
1173 ice_vsi_type_str(vsi->type)); in ice_set_rss_vsi_ctx()
1178 vsi->rss_hfunc = hash_type; in ice_set_rss_vsi_ctx()
1180 ctxt->info.q_opt_rss = in ice_set_rss_vsi_ctx()
1192 qcount = vsi->num_rxq; in ice_chnl_vsi_setup_q_map()
1198 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_chnl_vsi_setup_q_map()
1199 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_chnl_vsi_setup_q_map()
1200 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); in ice_chnl_vsi_setup_q_map()
1201 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); in ice_chnl_vsi_setup_q_map()
1205 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1212 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_vsi_is_vlan_pruning_ena()
1216 * ice_vsi_init - Create and initialize a VSI
1228 struct ice_pf *pf = vsi->back; in ice_vsi_init()
1229 struct ice_hw *hw = &pf->hw; in ice_vsi_init()
1237 return -ENOMEM; in ice_vsi_init()
1239 switch (vsi->type) { in ice_vsi_init()
1243 ctxt->flags = ICE_AQ_VSI_TYPE_PF; in ice_vsi_init()
1247 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; in ice_vsi_init()
1250 ctxt->flags = ICE_AQ_VSI_TYPE_VF; in ice_vsi_init()
1251 /* VF number here is the absolute VF number (0-255) */ in ice_vsi_init()
1252 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; in ice_vsi_init()
1255 ret = -ENODEV; in ice_vsi_init()
1262 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1267 ctxt->info.sw_flags2 |= in ice_vsi_init()
1270 ctxt->info.sw_flags2 &= in ice_vsi_init()
1275 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_vsi_init()
1278 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) in ice_vsi_init()
1279 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_init()
1282 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && in ice_vsi_init()
1283 vsi->type != ICE_VSI_CTRL) { in ice_vsi_init()
1289 ctxt->info.valid_sections |= in ice_vsi_init()
1293 ctxt->info.sw_id = vsi->port_info->sw_id; in ice_vsi_init()
1294 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1306 ctxt->info.valid_sections |= in ice_vsi_init()
1311 if (vsi->type == ICE_VSI_PF) { in ice_vsi_init()
1312 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_init()
1313 ctxt->info.valid_sections |= in ice_vsi_init()
1318 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1321 ret = -EIO; in ice_vsi_init()
1325 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1328 ret = -EIO; in ice_vsi_init()
1334 vsi->info = ctxt->info; in ice_vsi_init()
1337 vsi->vsi_num = ctxt->vsi_num; in ice_vsi_init()
1345 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1353 if (vsi->q_vectors) { in ice_vsi_clear_rings()
1355 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings()
1358 q_vector->tx.tx_ring = NULL; in ice_vsi_clear_rings()
1359 q_vector->rx.rx_ring = NULL; in ice_vsi_clear_rings()
1364 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1366 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1367 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
1368 WRITE_ONCE(vsi->tx_rings[i], NULL); in ice_vsi_clear_rings()
1372 if (vsi->rx_rings) { in ice_vsi_clear_rings()
1374 if (vsi->rx_rings[i]) { in ice_vsi_clear_rings()
1375 kfree_rcu(vsi->rx_rings[i], rcu); in ice_vsi_clear_rings()
1376 WRITE_ONCE(vsi->rx_rings[i], NULL); in ice_vsi_clear_rings()
1383 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1388 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); in ice_vsi_alloc_rings()
1389 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1404 ring->q_index = i; in ice_vsi_alloc_rings()
1405 ring->reg_idx = vsi->txq_map[i]; in ice_vsi_alloc_rings()
1406 ring->vsi = vsi; in ice_vsi_alloc_rings()
1407 ring->tx_tstamps = &pf->ptp.port.tx; in ice_vsi_alloc_rings()
1408 ring->dev = dev; in ice_vsi_alloc_rings()
1409 ring->count = vsi->num_tx_desc; in ice_vsi_alloc_rings()
1410 ring->txq_teid = ICE_INVAL_TEID; in ice_vsi_alloc_rings()
1412 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; in ice_vsi_alloc_rings()
1414 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; in ice_vsi_alloc_rings()
1415 WRITE_ONCE(vsi->tx_rings[i], ring); in ice_vsi_alloc_rings()
1427 ring->q_index = i; in ice_vsi_alloc_rings()
1428 ring->reg_idx = vsi->rxq_map[i]; in ice_vsi_alloc_rings()
1429 ring->vsi = vsi; in ice_vsi_alloc_rings()
1430 ring->netdev = vsi->netdev; in ice_vsi_alloc_rings()
1431 ring->dev = dev; in ice_vsi_alloc_rings()
1432 ring->count = vsi->num_rx_desc; in ice_vsi_alloc_rings()
1433 ring->cached_phctime = pf->ptp.cached_phc_time; in ice_vsi_alloc_rings()
1436 ring->flags |= ICE_RX_FLAGS_RING_GCS; in ice_vsi_alloc_rings()
1438 WRITE_ONCE(vsi->rx_rings[i], ring); in ice_vsi_alloc_rings()
1445 return -ENOMEM; in ice_vsi_alloc_rings()
1449 * ice_vsi_manage_rss_lut - disable/enable RSS
1461 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_manage_rss_lut()
1466 if (vsi->rss_lut_user) in ice_vsi_manage_rss_lut()
1467 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1469 ice_fill_rss_lut(lut, vsi->rss_table_size, in ice_vsi_manage_rss_lut()
1470 vsi->rss_size); in ice_vsi_manage_rss_lut()
1473 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1478 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1488 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1490 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1494 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1499 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1505 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && in ice_vsi_cfg_rss_lut_key()
1506 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { in ice_vsi_cfg_rss_lut_key()
1507 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); in ice_vsi_cfg_rss_lut_key()
1509 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key()
1513 * orig_rss_size so that when tc-qdisc is deleted, main VSI in ice_vsi_cfg_rss_lut_key()
1515 * to begin with (prior to setup-tc for ADQ config) in ice_vsi_cfg_rss_lut_key()
1517 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && in ice_vsi_cfg_rss_lut_key()
1518 vsi->orig_rss_size <= vsi->num_rxq) { in ice_vsi_cfg_rss_lut_key()
1519 vsi->rss_size = vsi->orig_rss_size; in ice_vsi_cfg_rss_lut_key()
1521 vsi->orig_rss_size = 0; in ice_vsi_cfg_rss_lut_key()
1525 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_cfg_rss_lut_key()
1527 return -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1529 if (vsi->rss_lut_user) in ice_vsi_cfg_rss_lut_key()
1530 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1532 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); in ice_vsi_cfg_rss_lut_key()
1534 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1542 err = -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1546 if (vsi->rss_hkey_user) in ice_vsi_cfg_rss_lut_key()
1547 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); in ice_vsi_cfg_rss_lut_key()
1562 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1571 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1578 vsi->vsi_num); in ice_vsi_set_vf_rss_flow_fld()
1582 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); in ice_vsi_set_vf_rss_flow_fld()
1585 vsi->vsi_num, status); in ice_vsi_set_vf_rss_flow_fld()
1599 /* configure RSS for sctp4 with input set IP src/dst - only support
1600 * RSS on SCTPv4 on outer headers (non-tunneled)
1629 /* configure RSS for sctp6 with input set IPv6 src/dst - only support
1630 * RSS on SCTPv6 on outer headers (non-tunneled)
1658 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1670 u16 vsi_num = vsi->vsi_num; in ice_vsi_set_rss_flow_fld()
1671 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1672 struct ice_hw *hw = &pf->hw; in ice_vsi_set_rss_flow_fld()
1689 cfg->addl_hdrs, cfg->hash_flds, in ice_vsi_set_rss_flow_fld()
1690 cfg->hdr_type, cfg->symm); in ice_vsi_set_rss_flow_fld()
1695 * ice_pf_state_is_nominal - checks the PF for nominal state
1712 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) in ice_pf_state_is_nominal()
1725 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1731 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1732 struct ice_pf *pf = vsi->back; in ice_update_eth_stats()
1733 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ in ice_update_eth_stats()
1735 prev_es = &vsi->eth_stats_prev; in ice_update_eth_stats()
1736 cur_es = &vsi->eth_stats; in ice_update_eth_stats()
1738 if (ice_is_reset_in_progress(pf->state)) in ice_update_eth_stats()
1739 vsi->stat_offsets_loaded = false; in ice_update_eth_stats()
1741 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1742 &prev_es->rx_bytes, &cur_es->rx_bytes); in ice_update_eth_stats()
1744 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1745 &prev_es->rx_unicast, &cur_es->rx_unicast); in ice_update_eth_stats()
1747 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1748 &prev_es->rx_multicast, &cur_es->rx_multicast); in ice_update_eth_stats()
1750 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1751 &prev_es->rx_broadcast, &cur_es->rx_broadcast); in ice_update_eth_stats()
1753 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1754 &prev_es->rx_discards, &cur_es->rx_discards); in ice_update_eth_stats()
1756 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1757 &prev_es->tx_bytes, &cur_es->tx_bytes); in ice_update_eth_stats()
1759 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1760 &prev_es->tx_unicast, &cur_es->tx_unicast); in ice_update_eth_stats()
1762 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1763 &prev_es->tx_multicast, &cur_es->tx_multicast); in ice_update_eth_stats()
1765 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1766 &prev_es->tx_broadcast, &cur_es->tx_broadcast); in ice_update_eth_stats()
1768 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1769 &prev_es->tx_errors, &cur_es->tx_errors); in ice_update_eth_stats()
1771 vsi->stat_offsets_loaded = true; in ice_update_eth_stats()
1775 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1803 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1820 * ice_write_intrl - write throttle rate limit to interrupt specific register
1826 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1828 wr32(hw, GLINT_RATE(q_vector->reg_idx), in ice_write_intrl()
1834 switch (rc->type) { in ice_pull_qvec_from_rc()
1836 if (rc->rx_ring) in ice_pull_qvec_from_rc()
1837 return rc->rx_ring->q_vector; in ice_pull_qvec_from_rc()
1840 if (rc->tx_ring) in ice_pull_qvec_from_rc()
1841 return rc->tx_ring->q_vector; in ice_pull_qvec_from_rc()
1851 * __ice_write_itr - write throttle rate to register
1853 * @rc: pointer to ring container
1859 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
1861 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), in __ice_write_itr()
1866 * ice_write_itr - write throttle rate to queue specific register
1867 * @rc: pointer to ring container
1882 * ice_set_q_vector_intrl - set up interrupt rate limiting
1885 * Interrupt rate limiting is local to the vector, not per-queue so we must
1886 * detect if either ring container has dynamic moderation enabled to decide
1893 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { in ice_set_q_vector_intrl()
1902 ice_write_intrl(q_vector, q_vector->intrl); in ice_set_q_vector_intrl()
1907 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1915 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
1916 struct ice_hw *hw = &pf->hw; in ice_vsi_cfg_msix()
1921 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_cfg_msix()
1922 u16 reg_idx = q_vector->reg_idx; in ice_vsi_cfg_msix()
1931 * For SR-IOV VF VSIs queue vector index always starts in ice_vsi_cfg_msix()
1937 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_cfg_msix()
1939 q_vector->tx.itr_idx); in ice_vsi_cfg_msix()
1943 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_cfg_msix()
1945 q_vector->rx.itr_idx); in ice_vsi_cfg_msix()
1952 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
1963 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
1974 * ice_vsi_stop_tx_rings - Disable Tx rings
1987 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) in ice_vsi_stop_tx_rings()
1988 return -EINVAL; in ice_vsi_stop_tx_rings()
1995 return -EINVAL; in ice_vsi_stop_tx_rings()
2009 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2018 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); in ice_vsi_stop_lan_tx_rings()
2022 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2027 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
2038 struct ice_pf *pf = vsi->back; in ice_vsi_is_rx_queue_active()
2039 struct ice_hw *hw = &pf->hw; in ice_vsi_is_rx_queue_active()
2046 pf_q = vsi->rxq_map[i]; in ice_vsi_is_rx_queue_active()
2057 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { in ice_vsi_set_tc_cfg()
2058 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; in ice_vsi_set_tc_cfg()
2059 vsi->tc_cfg.numtc = 1; in ice_vsi_set_tc_cfg()
2068 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2077 struct ice_pf *pf = vsi->back; in ice_cfg_sw_lldp()
2088 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { in ice_cfg_sw_lldp()
2089 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, in ice_cfg_sw_lldp()
2100 vsi->vsi_num, status); in ice_cfg_sw_lldp()
2104 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2112 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2118 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2124 * - PF aggregator node to contains VSIs of type _PF and _CTRL in ice_set_agg_vsi()
2125 * - VF aggregator nodes will contain VF VSI in ice_set_agg_vsi()
2127 port_info = pf->hw.port_info; in ice_set_agg_vsi()
2131 switch (vsi->type) { in ice_set_agg_vsi()
2139 agg_node_iter = &pf->pf_agg_node[0]; in ice_set_agg_vsi()
2150 agg_node_iter = &pf->vf_agg_node[0]; in ice_set_agg_vsi()
2155 ice_vsi_type_str(vsi->type)); in ice_set_agg_vsi()
2164 if (agg_node_iter->num_vsis && in ice_set_agg_vsi()
2165 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { in ice_set_agg_vsi()
2170 if (agg_node_iter->valid && in ice_set_agg_vsi()
2171 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2172 agg_id = agg_node_iter->agg_id; in ice_set_agg_vsi()
2178 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2191 if (!agg_node->valid) { in ice_set_agg_vsi()
2193 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2200 agg_node->valid = true; in ice_set_agg_vsi()
2201 agg_node->agg_id = agg_id; in ice_set_agg_vsi()
2205 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, in ice_set_agg_vsi()
2206 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2209 vsi->idx, agg_id); in ice_set_agg_vsi()
2214 agg_node->num_vsis++; in ice_set_agg_vsi()
2216 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved in ice_set_agg_vsi()
2219 vsi->agg_node = agg_node; in ice_set_agg_vsi()
2221 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, in ice_set_agg_vsi()
2222 vsi->agg_node->num_vsis); in ice_set_agg_vsi()
2233 if (!(vsi->tc_cfg.ena_tc & BIT(i))) in ice_vsi_cfg_tc_lan()
2236 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_cfg_tc_lan()
2237 if (!vsi->alloc_txq && vsi->num_txq) in ice_vsi_cfg_tc_lan()
2238 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc_lan()
2240 max_txqs[i] = pf->num_lan_tx; in ice_vsi_cfg_tc_lan()
2242 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc_lan()
2245 if (vsi->type == ICE_VSI_PF) in ice_vsi_cfg_tc_lan()
2246 max_txqs[i] += vsi->num_xdp_txq; in ice_vsi_cfg_tc_lan()
2249 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); in ice_vsi_cfg_tc_lan()
2250 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_cfg_tc_lan()
2254 vsi->vsi_num, ret); in ice_vsi_cfg_tc_lan()
2262 * ice_vsi_cfg_def - configure default VSI based on the type
2267 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_cfg_def()
2268 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_def()
2271 vsi->vsw = pf->first_sw; in ice_vsi_cfg_def()
2273 ret = ice_vsi_alloc_def(vsi, vsi->ch); in ice_vsi_cfg_def()
2286 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", in ice_vsi_cfg_def()
2287 vsi->idx); in ice_vsi_cfg_def()
2298 ret = ice_vsi_init(vsi, vsi->flags); in ice_vsi_cfg_def()
2304 switch (vsi->type) { in ice_vsi_cfg_def()
2324 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, in ice_vsi_cfg_def()
2332 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2335 if (vsi->type != ICE_VSI_CTRL) in ice_vsi_cfg_def()
2340 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2347 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2370 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2376 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2393 ret = -EINVAL; in ice_vsi_cfg_def()
2415 * ice_vsi_cfg - configure a previously allocated VSI
2420 struct ice_pf *pf = vsi->back; in ice_vsi_cfg()
2423 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_cfg()
2424 return -EINVAL; in ice_vsi_cfg()
2430 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); in ice_vsi_cfg()
2434 if (vsi->type == ICE_VSI_CTRL) { in ice_vsi_cfg()
2435 if (vsi->vf) { in ice_vsi_cfg()
2436 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2437 vsi->vf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2439 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2440 pf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2448 * ice_vsi_decfg - remove all VSI configuration
2453 struct ice_pf *pf = vsi->back; in ice_vsi_decfg()
2456 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2457 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2460 vsi->vsi_num, err); in ice_vsi_decfg()
2462 if (vsi->xdp_rings) in ice_vsi_decfg()
2473 /* SR-IOV determines needed MSIX resources all at once instead of per in ice_vsi_decfg()
2475 * many interrupts each VF needs. SR-IOV MSIX resources are also in ice_vsi_decfg()
2479 if (vsi->type == ICE_VSI_VF && in ice_vsi_decfg()
2480 vsi->agg_node && vsi->agg_node->valid) in ice_vsi_decfg()
2481 vsi->agg_node->num_vsis--; in ice_vsi_decfg()
2485 * ice_vsi_setup - Set up a VSI by a given type
2504 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || in ice_vsi_setup()
2505 WARN_ON(!params->port_info)) in ice_vsi_setup()
2514 vsi->params = *params; in ice_vsi_setup()
2524 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to in ice_vsi_setup()
2528 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { in ice_vsi_setup()
2534 if (!vsi->agg_node) in ice_vsi_setup()
2546 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2551 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2552 struct ice_hw *hw = &pf->hw; in ice_vsi_release_msix()
2558 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_release_msix()
2561 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_release_msix()
2562 ice_write_itr(&q_vector->tx, 0); in ice_vsi_release_msix()
2563 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); in ice_vsi_release_msix()
2564 if (vsi->xdp_rings) { in ice_vsi_release_msix()
2565 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_vsi_release_msix()
2567 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); in ice_vsi_release_msix()
2572 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_release_msix()
2573 ice_write_itr(&q_vector->rx, 0); in ice_vsi_release_msix()
2574 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); in ice_vsi_release_msix()
2583 * ice_vsi_free_irq - Free the IRQ association with the OS
2588 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2591 if (!vsi->q_vectors || !vsi->irqs_ready) in ice_vsi_free_irq()
2595 if (vsi->type == ICE_VSI_VF) in ice_vsi_free_irq()
2598 vsi->irqs_ready = false; in ice_vsi_free_irq()
2603 irq_num = vsi->q_vectors[i]->irq.virq; in ice_vsi_free_irq()
2606 if (!vsi->q_vectors[i] || in ice_vsi_free_irq()
2607 !(vsi->q_vectors[i]->num_ring_tx || in ice_vsi_free_irq()
2608 vsi->q_vectors[i]->num_ring_rx)) in ice_vsi_free_irq()
2612 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); in ice_vsi_free_irq()
2617 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2624 if (!vsi->tx_rings) in ice_vsi_free_tx_rings()
2628 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_vsi_free_tx_rings()
2629 ice_free_tx_ring(vsi->tx_rings[i]); in ice_vsi_free_tx_rings()
2633 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2640 if (!vsi->rx_rings) in ice_vsi_free_rx_rings()
2644 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in ice_vsi_free_rx_rings()
2645 ice_free_rx_ring(vsi->rx_rings[i]); in ice_vsi_free_rx_rings()
2649 * ice_vsi_close - Shut down a VSI
2654 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_vsi_close()
2664 * ice_ena_vsi - resume a VSI
2672 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) in ice_ena_vsi()
2675 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_ena_vsi()
2677 if (vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_ena_vsi()
2678 vsi->type == ICE_VSI_SF)) { in ice_ena_vsi()
2679 if (netif_running(vsi->netdev)) { in ice_ena_vsi()
2683 err = ice_open_internal(vsi->netdev); in ice_ena_vsi()
2688 } else if (vsi->type == ICE_VSI_CTRL) { in ice_ena_vsi()
2696 * ice_dis_vsi - pause a VSI
2702 bool already_down = test_bit(ICE_VSI_DOWN, vsi->state); in ice_dis_vsi()
2704 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_dis_vsi()
2706 if (vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_dis_vsi()
2707 vsi->type == ICE_VSI_SF)) { in ice_dis_vsi()
2708 if (netif_running(vsi->netdev)) { in ice_dis_vsi()
2711 already_down = test_bit(ICE_VSI_DOWN, vsi->state); in ice_dis_vsi()
2720 } else if (vsi->type == ICE_VSI_CTRL && !already_down) { in ice_dis_vsi()
2726 * ice_vsi_set_napi_queues - associate netdev queues with napi
2734 struct net_device *netdev = vsi->netdev; in ice_vsi_set_napi_queues()
2742 &vsi->rx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues()
2746 &vsi->tx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues()
2749 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_vsi_set_napi_queues()
2751 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); in ice_vsi_set_napi_queues()
2756 * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
2764 struct net_device *netdev = vsi->netdev; in ice_vsi_clear_napi_queues()
2772 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_vsi_clear_napi_queues()
2774 netif_napi_set_irq(&q_vector->napi, -1); in ice_vsi_clear_napi_queues()
2785 * ice_napi_add - register NAPI handler for the VSI
2796 if (!vsi->netdev) in ice_napi_add()
2800 netif_napi_add_config(vsi->netdev, in ice_napi_add()
2801 &vsi->q_vectors[v_idx]->napi, in ice_napi_add()
2807 * ice_vsi_release - Delete a VSI and free its resources
2816 if (!vsi->back) in ice_vsi_release()
2817 return -ENODEV; in ice_vsi_release()
2818 pf = vsi->back; in ice_vsi_release()
2820 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) in ice_vsi_release()
2828 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && in ice_vsi_release()
2829 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) in ice_vsi_release()
2838 if (!ice_is_reset_in_progress(pf->state)) in ice_vsi_release()
2845 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2858 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_rebuild_get_coalesce()
2860 coalesce[i].itr_tx = q_vector->tx.itr_settings; in ice_vsi_rebuild_get_coalesce()
2861 coalesce[i].itr_rx = q_vector->rx.itr_settings; in ice_vsi_rebuild_get_coalesce()
2862 coalesce[i].intrl = q_vector->intrl; in ice_vsi_rebuild_get_coalesce()
2864 if (i < vsi->num_txq) in ice_vsi_rebuild_get_coalesce()
2866 if (i < vsi->num_rxq) in ice_vsi_rebuild_get_coalesce()
2870 return vsi->num_q_vectors; in ice_vsi_rebuild_get_coalesce()
2874 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2899 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
2915 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { in ice_vsi_rebuild_set_coalesce()
2916 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2917 rc->itr_settings = coalesce[i].itr_rx; in ice_vsi_rebuild_set_coalesce()
2918 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
2919 } else if (i < vsi->alloc_rxq) { in ice_vsi_rebuild_set_coalesce()
2920 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2921 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
2922 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
2925 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { in ice_vsi_rebuild_set_coalesce()
2926 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2927 rc->itr_settings = coalesce[i].itr_tx; in ice_vsi_rebuild_set_coalesce()
2928 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
2929 } else if (i < vsi->alloc_txq) { in ice_vsi_rebuild_set_coalesce()
2930 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2931 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
2932 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
2935 vsi->q_vectors[i]->intrl = coalesce[i].intrl; in ice_vsi_rebuild_set_coalesce()
2936 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
2942 for (; i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
2944 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2945 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
2946 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
2949 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2950 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
2951 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
2953 vsi->q_vectors[i]->intrl = coalesce[0].intrl; in ice_vsi_rebuild_set_coalesce()
2954 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
2959 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
2965 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
2966 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
2970 struct ice_pf *pf = vsi->back; in ice_vsi_realloc_stat_arrays()
2971 u16 prev_txq = vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
2972 u16 prev_rxq = vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
2975 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_realloc_stat_arrays()
2979 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
2980 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
2981 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
2986 tx_ring_stats = vsi_stat->tx_ring_stats; in ice_vsi_realloc_stat_arrays()
2987 vsi_stat->tx_ring_stats = in ice_vsi_realloc_stat_arrays()
2988 krealloc_array(vsi_stat->tx_ring_stats, req_txq, in ice_vsi_realloc_stat_arrays()
2989 sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_realloc_stat_arrays()
2991 if (!vsi_stat->tx_ring_stats) { in ice_vsi_realloc_stat_arrays()
2992 vsi_stat->tx_ring_stats = tx_ring_stats; in ice_vsi_realloc_stat_arrays()
2993 return -ENOMEM; in ice_vsi_realloc_stat_arrays()
2998 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
2999 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
3000 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
3005 rx_ring_stats = vsi_stat->rx_ring_stats; in ice_vsi_realloc_stat_arrays()
3006 vsi_stat->rx_ring_stats = in ice_vsi_realloc_stat_arrays()
3007 krealloc_array(vsi_stat->rx_ring_stats, req_rxq, in ice_vsi_realloc_stat_arrays()
3008 sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_realloc_stat_arrays()
3010 if (!vsi_stat->rx_ring_stats) { in ice_vsi_realloc_stat_arrays()
3011 vsi_stat->rx_ring_stats = rx_ring_stats; in ice_vsi_realloc_stat_arrays()
3012 return -ENOMEM; in ice_vsi_realloc_stat_arrays()
3019 * ice_vsi_rebuild - Rebuild VSI after reset
3036 return -EINVAL; in ice_vsi_rebuild()
3038 vsi->flags = vsi_flags; in ice_vsi_rebuild()
3039 pf = vsi->back; in ice_vsi_rebuild()
3040 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_rebuild()
3041 return -EINVAL; in ice_vsi_rebuild()
3043 mutex_lock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3054 coalesce = kcalloc(vsi->num_q_vectors, in ice_vsi_rebuild()
3057 ret = -ENOMEM; in ice_vsi_rebuild()
3066 ret = -EIO; in ice_vsi_rebuild()
3075 clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); in ice_vsi_rebuild()
3083 mutex_unlock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3088 * ice_is_reset_in_progress - check for a reset in progress
3100 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3109 * Returns 0 on success, -EBUSY if the reset is not finished within the
3110 * timeout, and -ERESTARTSYS if the thread was interrupted.
3116 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, in ice_wait_for_reset()
3117 !ice_is_reset_in_progress(pf->state), in ice_wait_for_reset()
3122 return -EBUSY; in ice_wait_for_reset()
3128 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3134 vsi->info.mapping_flags = ctx->info.mapping_flags; in ice_vsi_update_q_map()
3135 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, in ice_vsi_update_q_map()
3136 sizeof(vsi->info.q_mapping)); in ice_vsi_update_q_map()
3137 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, in ice_vsi_update_q_map()
3138 sizeof(vsi->info.tc_mapping)); in ice_vsi_update_q_map()
3142 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3148 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc()
3149 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
3150 int numtc = vsi->tc_cfg.numtc; in ice_vsi_cfg_netdev_tc()
3159 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_cfg_netdev_tc()
3167 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) in ice_vsi_cfg_netdev_tc()
3168 numtc = vsi->all_numtc; in ice_vsi_cfg_netdev_tc()
3173 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_vsi_cfg_netdev_tc()
3176 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc()
3178 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc()
3179 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc()
3180 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc()
3183 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_cfg_netdev_tc()
3185 if (!vsi->mqprio_qopt.qopt.count[i]) in ice_vsi_cfg_netdev_tc()
3188 vsi->mqprio_qopt.qopt.count[i], in ice_vsi_cfg_netdev_tc()
3189 vsi->mqprio_qopt.qopt.offset[i]); in ice_vsi_cfg_netdev_tc()
3192 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_netdev_tc()
3196 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; in ice_vsi_cfg_netdev_tc()
3199 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc()
3205 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3217 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; in ice_vsi_setup_q_map_mqprio()
3218 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; in ice_vsi_setup_q_map_mqprio()
3223 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; in ice_vsi_setup_q_map_mqprio()
3230 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map_mqprio()
3232 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map_mqprio()
3233 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map_mqprio()
3234 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map_mqprio()
3235 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map_mqprio()
3236 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map_mqprio()
3240 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3241 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3242 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3243 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map_mqprio()
3244 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; in ice_vsi_setup_q_map_mqprio()
3245 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; in ice_vsi_setup_q_map_mqprio()
3246 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map_mqprio()
3249 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map_mqprio()
3251 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_setup_q_map_mqprio()
3253 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3254 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3255 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3260 if (new_txq > vsi->alloc_txq) { in ice_vsi_setup_q_map_mqprio()
3261 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3262 new_txq, vsi->alloc_txq); in ice_vsi_setup_q_map_mqprio()
3263 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3267 if (new_rxq > vsi->alloc_rxq) { in ice_vsi_setup_q_map_mqprio()
3268 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3269 new_rxq, vsi->alloc_rxq); in ice_vsi_setup_q_map_mqprio()
3270 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3274 vsi->num_txq = new_txq; in ice_vsi_setup_q_map_mqprio()
3275 vsi->num_rxq = new_rxq; in ice_vsi_setup_q_map_mqprio()
3278 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_vsi_setup_q_map_mqprio()
3279 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map_mqprio()
3280 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); in ice_vsi_setup_q_map_mqprio()
3285 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { in ice_vsi_setup_q_map_mqprio()
3286 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3287 vsi->next_base_q = tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3289 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); in ice_vsi_setup_q_map_mqprio()
3290 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); in ice_vsi_setup_q_map_mqprio()
3291 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", in ice_vsi_setup_q_map_mqprio()
3292 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); in ice_vsi_setup_q_map_mqprio()
3298 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3307 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3315 if (vsi->tc_cfg.ena_tc == ena_tc && in ice_vsi_cfg_tc()
3316 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in ice_vsi_cfg_tc()
3324 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc()
3328 if (vsi->type == ICE_VSI_CHNL && in ice_vsi_cfg_tc()
3329 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3330 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc()
3333 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); in ice_vsi_cfg_tc()
3334 vsi->tc_cfg.ena_tc = ena_tc; in ice_vsi_cfg_tc()
3335 vsi->tc_cfg.numtc = num_tc; in ice_vsi_cfg_tc()
3339 return -ENOMEM; in ice_vsi_cfg_tc()
3341 ctx->vf_num = 0; in ice_vsi_cfg_tc()
3342 ctx->info = vsi->info; in ice_vsi_cfg_tc()
3344 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3345 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3351 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); in ice_vsi_cfg_tc()
3356 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); in ice_vsi_cfg_tc()
3357 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); in ice_vsi_cfg_tc()
3363 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3364 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3365 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); in ice_vsi_cfg_tc()
3367 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, in ice_vsi_cfg_tc()
3368 vsi->tc_cfg.ena_tc, max_txqs); in ice_vsi_cfg_tc()
3372 vsi->vsi_num, ret); in ice_vsi_cfg_tc()
3376 vsi->info.valid_sections = 0; in ice_vsi_cfg_tc()
3385 * ice_update_ring_stats - Update ring statistics
3394 stats->bytes += bytes; in ice_update_ring_stats()
3395 stats->pkts += pkts; in ice_update_ring_stats()
3399 * ice_update_tx_ring_stats - Update Tx ring specific counters
3406 u64_stats_update_begin(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3407 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); in ice_update_tx_ring_stats()
3408 u64_stats_update_end(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3412 * ice_update_rx_ring_stats - Update Rx ring specific counters
3419 u64_stats_update_begin(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3420 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); in ice_update_rx_ring_stats()
3421 u64_stats_update_end(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3425 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3439 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3447 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); in ice_is_vsi_dflt_vsi()
3451 * ice_set_dflt_vsi - set the default forwarding VSI
3466 return -EINVAL; in ice_set_dflt_vsi()
3468 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3470 if (ice_lag_is_switchdev_running(vsi->back)) { in ice_set_dflt_vsi()
3472 vsi->vsi_num); in ice_set_dflt_vsi()
3479 vsi->vsi_num); in ice_set_dflt_vsi()
3483 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); in ice_set_dflt_vsi()
3486 vsi->vsi_num, status); in ice_set_dflt_vsi()
3494 * ice_clear_dflt_vsi - clear the default forwarding VSI
3507 return -EINVAL; in ice_clear_dflt_vsi()
3509 dev = ice_pf_to_dev(vsi->back); in ice_clear_dflt_vsi()
3512 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) in ice_clear_dflt_vsi()
3513 return -ENODEV; in ice_clear_dflt_vsi()
3515 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, in ice_clear_dflt_vsi()
3519 vsi->vsi_num, status); in ice_clear_dflt_vsi()
3520 return -EIO; in ice_clear_dflt_vsi()
3527 * ice_get_link_speed_mbps - get link speed in Mbps
3536 link_speed = vsi->port_info->phy.link_info.link_speed; in ice_get_link_speed_mbps()
3538 return (int)ice_get_link_speed(fls(link_speed) - 1); in ice_get_link_speed_mbps()
3542 * ice_get_link_speed_kbps - get link speed in Kbps
3557 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3562 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3567 struct ice_pf *pf = vsi->back; in ice_set_min_bw_limit()
3573 if (!vsi->port_info) { in ice_set_min_bw_limit()
3575 vsi->idx, vsi->type); in ice_set_min_bw_limit()
3576 return -EINVAL; in ice_set_min_bw_limit()
3582 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_min_bw_limit()
3584 return -EINVAL; in ice_set_min_bw_limit()
3589 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_min_bw_limit()
3593 min_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_min_bw_limit()
3594 vsi->idx); in ice_set_min_bw_limit()
3599 min_tx_rate, ice_vsi_type_str(vsi->type)); in ice_set_min_bw_limit()
3601 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_min_bw_limit()
3602 vsi->idx, 0, in ice_set_min_bw_limit()
3606 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3611 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3618 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3623 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3628 struct ice_pf *pf = vsi->back; in ice_set_max_bw_limit()
3634 if (!vsi->port_info) { in ice_set_max_bw_limit()
3636 vsi->idx, vsi->type); in ice_set_max_bw_limit()
3637 return -EINVAL; in ice_set_max_bw_limit()
3643 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_max_bw_limit()
3645 return -EINVAL; in ice_set_max_bw_limit()
3650 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_max_bw_limit()
3654 max_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_max_bw_limit()
3655 vsi->idx); in ice_set_max_bw_limit()
3660 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3662 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_max_bw_limit()
3663 vsi->idx, 0, in ice_set_max_bw_limit()
3667 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3672 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3679 * ice_set_link - turn on/off physical link
3685 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3686 struct ice_port_info *pi = vsi->port_info; in ice_set_link()
3687 struct ice_hw *hw = pi->hw; in ice_set_link()
3690 if (vsi->type != ICE_VSI_PF) in ice_set_link()
3691 return -EINVAL; in ice_set_link()
3700 if (status == -EIO) { in ice_set_link()
3701 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) in ice_set_link()
3704 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3708 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3716 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3739 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3740 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3744 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_add_vlan_zero()
3748 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3749 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3756 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3769 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3770 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3774 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_del_vlan_zero()
3778 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3779 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3785 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vsi_del_vlan_zero()
3790 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3801 if (vsi->type == ICE_VSI_VF) { in ice_vsi_num_zero_vlans()
3802 if (WARN_ON(!vsi->vf)) in ice_vsi_num_zero_vlans()
3805 if (ice_vf_is_port_vlan_ena(vsi->vf)) in ice_vsi_num_zero_vlans()
3809 if (ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_num_zero_vlans()
3816 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3817 * @vsi: VSI used to determine if any non-zero VLANs have been added
3821 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); in ice_vsi_has_non_zero_vlans()
3825 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3826 * @vsi: VSI used to get the number of non-zero VLANs added
3830 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); in ice_vsi_num_non_zero_vlans()
3845 return test_bit(f, pf->features); in ice_is_feature_supported()
3858 set_bit(f, pf->features); in ice_set_feature_support()
3871 clear_bit(f, pf->features); in ice_clear_feature_support()
3882 switch (pf->hw.device_id) { in ice_init_feature_support()
3890 if (ice_is_phy_rclk_in_netlist(&pf->hw)) in ice_init_feature_support()
3892 /* If we don't own the timer - don't enable other caps */ in ice_init_feature_support()
3895 if (ice_is_cgu_in_netlist(&pf->hw)) in ice_init_feature_support()
3897 if (ice_is_clock_mux_in_netlist(&pf->hw)) in ice_init_feature_support()
3899 if (ice_gnss_is_module_present(&pf->hw)) in ice_init_feature_support()
3906 if (pf->hw.mac_type == ICE_MAC_E830) { in ice_init_feature_support()
3913 * ice_vsi_update_security - update security block in VSI
3922 ctx.info = vsi->info; in ice_vsi_update_security()
3926 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_security()
3927 return -ENODEV; in ice_vsi_update_security()
3929 vsi->info = ctx.info; in ice_vsi_update_security()
3934 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
3939 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | in ice_vsi_ctx_set_antispoof()
3945 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
3950 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & in ice_vsi_ctx_clear_antispoof()
3956 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
3964 .info = vsi->info, in ice_vsi_update_local_lb()
3973 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_local_lb()
3974 return -ENODEV; in ice_vsi_update_local_lb()
3976 vsi->info = ctx.info; in ice_vsi_update_local_lb()