xref: /linux/drivers/net/ethernet/intel/ice/ice_lib.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_flow.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_type.h"
11 #include "ice_vsi_vlan_ops.h"
12 
13 /**
14  * ice_vsi_type_str - maps VSI type enum to string equivalents
15  * @vsi_type: VSI type enum
16  */
17 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
18 {
19 	switch (vsi_type) {
20 	case ICE_VSI_PF:
21 		return "ICE_VSI_PF";
22 	case ICE_VSI_VF:
23 		return "ICE_VSI_VF";
24 	case ICE_VSI_SF:
25 		return "ICE_VSI_SF";
26 	case ICE_VSI_CTRL:
27 		return "ICE_VSI_CTRL";
28 	case ICE_VSI_CHNL:
29 		return "ICE_VSI_CHNL";
30 	case ICE_VSI_LB:
31 		return "ICE_VSI_LB";
32 	default:
33 		return "unknown";
34 	}
35 }
36 
37 /**
38  * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
39  * @vsi: the VSI being configured
40  * @ena: start or stop the Rx rings
41  *
42  * First enable/disable all of the Rx rings, flush any remaining writes, and
43  * then verify that they have all been enabled/disabled successfully. This will
44  * let all of the register writes complete when enabling/disabling the Rx rings
45  * before waiting for the change in hardware to complete.
46  */
47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
48 {
49 	int ret = 0;
50 	u16 i;
51 
52 	ice_for_each_rxq(vsi, i)
53 		ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
54 
55 	ice_flush(&vsi->back->hw);
56 
57 	ice_for_each_rxq(vsi, i) {
58 		ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
59 		if (ret)
60 			break;
61 	}
62 
63 	return ret;
64 }
65 
66 /**
67  * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
68  * @vsi: VSI pointer
69  *
70  * On error: returns error code (negative)
71  * On success: returns 0
72  */
73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
74 {
75 	struct ice_pf *pf = vsi->back;
76 	struct device *dev;
77 
78 	dev = ice_pf_to_dev(pf);
79 	if (vsi->type == ICE_VSI_CHNL)
80 		return 0;
81 
82 	/* allocate memory for both Tx and Rx ring pointers */
83 	vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84 				     sizeof(*vsi->tx_rings), GFP_KERNEL);
85 	if (!vsi->tx_rings)
86 		return -ENOMEM;
87 
88 	vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89 				     sizeof(*vsi->rx_rings), GFP_KERNEL);
90 	if (!vsi->rx_rings)
91 		goto err_rings;
92 
93 	/* txq_map needs to have enough space to track both Tx (stack) rings
94 	 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
95 	 * so use num_possible_cpus() as we want to always provide XDP ring
96 	 * per CPU, regardless of queue count settings from user that might
97 	 * have come from ethtool's set_channels() callback;
98 	 */
99 	vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
100 				    sizeof(*vsi->txq_map), GFP_KERNEL);
101 
102 	if (!vsi->txq_map)
103 		goto err_txq_map;
104 
105 	vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
106 				    sizeof(*vsi->rxq_map), GFP_KERNEL);
107 	if (!vsi->rxq_map)
108 		goto err_rxq_map;
109 
110 	/* There is no need to allocate q_vectors for a loopback VSI. */
111 	if (vsi->type == ICE_VSI_LB)
112 		return 0;
113 
114 	/* allocate memory for q_vector pointers */
115 	vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116 				      sizeof(*vsi->q_vectors), GFP_KERNEL);
117 	if (!vsi->q_vectors)
118 		goto err_vectors;
119 
120 	return 0;
121 
122 err_vectors:
123 	devm_kfree(dev, vsi->rxq_map);
124 err_rxq_map:
125 	devm_kfree(dev, vsi->txq_map);
126 err_txq_map:
127 	devm_kfree(dev, vsi->rx_rings);
128 err_rings:
129 	devm_kfree(dev, vsi->tx_rings);
130 	return -ENOMEM;
131 }
132 
133 /**
134  * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
135  * @vsi: the VSI being configured
136  */
137 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
138 {
139 	switch (vsi->type) {
140 	case ICE_VSI_PF:
141 	case ICE_VSI_SF:
142 	case ICE_VSI_CTRL:
143 	case ICE_VSI_LB:
144 		/* a user could change the values of num_[tr]x_desc using
145 		 * ethtool -G so we should keep those values instead of
146 		 * overwriting them with the defaults.
147 		 */
148 		if (!vsi->num_rx_desc)
149 			vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
150 		if (!vsi->num_tx_desc)
151 			vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
152 		break;
153 	default:
154 		dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
155 			vsi->type);
156 		break;
157 	}
158 }
159 
160 static u16 ice_get_rxq_count(struct ice_pf *pf)
161 {
162 	return min(ice_get_avail_rxq_count(pf), num_online_cpus());
163 }
164 
165 static u16 ice_get_txq_count(struct ice_pf *pf)
166 {
167 	return min(ice_get_avail_txq_count(pf), num_online_cpus());
168 }
169 
170 /**
171  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
172  * @vsi: the VSI being configured
173  *
174  * Return 0 on success and a negative value on error
175  */
176 static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
177 {
178 	enum ice_vsi_type vsi_type = vsi->type;
179 	struct ice_pf *pf = vsi->back;
180 	struct ice_vf *vf = vsi->vf;
181 
182 	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
183 		return;
184 
185 	switch (vsi_type) {
186 	case ICE_VSI_PF:
187 		if (vsi->req_txq) {
188 			vsi->alloc_txq = vsi->req_txq;
189 			vsi->num_txq = vsi->req_txq;
190 		} else {
191 			vsi->alloc_txq = ice_get_txq_count(pf);
192 		}
193 
194 		pf->num_lan_tx = vsi->alloc_txq;
195 
196 		/* only 1 Rx queue unless RSS is enabled */
197 		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
198 			vsi->alloc_rxq = 1;
199 		} else {
200 			if (vsi->req_rxq) {
201 				vsi->alloc_rxq = vsi->req_rxq;
202 				vsi->num_rxq = vsi->req_rxq;
203 			} else {
204 				vsi->alloc_rxq = ice_get_rxq_count(pf);
205 			}
206 		}
207 
208 		pf->num_lan_rx = vsi->alloc_rxq;
209 
210 		vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
211 		break;
212 	case ICE_VSI_SF:
213 		vsi->alloc_txq = 1;
214 		vsi->alloc_rxq = 1;
215 		vsi->num_q_vectors = 1;
216 		vsi->irq_dyn_alloc = true;
217 		break;
218 	case ICE_VSI_VF:
219 		if (vf->num_req_qs)
220 			vf->num_vf_qs = vf->num_req_qs;
221 		vsi->alloc_txq = vf->num_vf_qs;
222 		vsi->alloc_rxq = vf->num_vf_qs;
223 		/* pf->vfs.num_msix_per includes (VF miscellaneous vector +
224 		 * data queue interrupts). Since vsi->num_q_vectors is number
225 		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
226 		 * original vector count
227 		 */
228 		vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
229 		break;
230 	case ICE_VSI_CTRL:
231 		vsi->alloc_txq = 1;
232 		vsi->alloc_rxq = 1;
233 		vsi->num_q_vectors = 1;
234 		break;
235 	case ICE_VSI_CHNL:
236 		vsi->alloc_txq = 0;
237 		vsi->alloc_rxq = 0;
238 		break;
239 	case ICE_VSI_LB:
240 		vsi->alloc_txq = 1;
241 		vsi->alloc_rxq = 1;
242 		break;
243 	default:
244 		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
245 		break;
246 	}
247 
248 	ice_vsi_set_num_desc(vsi);
249 }
250 
251 /**
252  * ice_get_free_slot - get the next non-NULL location index in array
253  * @array: array to search
254  * @size: size of the array
255  * @curr: last known occupied index to be used as a search hint
256  *
257  * void * is being used to keep the functionality generic. This lets us use this
258  * function on any array of pointers.
259  */
260 static int ice_get_free_slot(void *array, int size, int curr)
261 {
262 	int **tmp_array = (int **)array;
263 	int next;
264 
265 	if (curr < (size - 1) && !tmp_array[curr + 1]) {
266 		next = curr + 1;
267 	} else {
268 		int i = 0;
269 
270 		while ((i < size) && (tmp_array[i]))
271 			i++;
272 		if (i == size)
273 			next = ICE_NO_VSI;
274 		else
275 			next = i;
276 	}
277 	return next;
278 }
279 
280 /**
281  * ice_vsi_delete_from_hw - delete a VSI from the switch
282  * @vsi: pointer to VSI being removed
283  */
284 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
285 {
286 	struct ice_pf *pf = vsi->back;
287 	struct ice_vsi_ctx *ctxt;
288 	int status;
289 
290 	ice_fltr_remove_all(vsi);
291 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
292 	if (!ctxt)
293 		return;
294 
295 	if (vsi->type == ICE_VSI_VF)
296 		ctxt->vf_num = vsi->vf->vf_id;
297 	ctxt->vsi_num = vsi->vsi_num;
298 
299 	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
300 
301 	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
302 	if (status)
303 		dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
304 			vsi->vsi_num, status);
305 
306 	kfree(ctxt);
307 }
308 
309 /**
310  * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
311  * @vsi: pointer to VSI being cleared
312  */
313 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
314 {
315 	struct ice_pf *pf = vsi->back;
316 	struct device *dev;
317 
318 	dev = ice_pf_to_dev(pf);
319 
320 	/* free the ring and vector containers */
321 	devm_kfree(dev, vsi->q_vectors);
322 	vsi->q_vectors = NULL;
323 	devm_kfree(dev, vsi->tx_rings);
324 	vsi->tx_rings = NULL;
325 	devm_kfree(dev, vsi->rx_rings);
326 	vsi->rx_rings = NULL;
327 	devm_kfree(dev, vsi->txq_map);
328 	vsi->txq_map = NULL;
329 	devm_kfree(dev, vsi->rxq_map);
330 	vsi->rxq_map = NULL;
331 }
332 
333 /**
334  * ice_vsi_free_stats - Free the ring statistics structures
335  * @vsi: VSI pointer
336  */
337 static void ice_vsi_free_stats(struct ice_vsi *vsi)
338 {
339 	struct ice_vsi_stats *vsi_stat;
340 	struct ice_pf *pf = vsi->back;
341 	int i;
342 
343 	if (vsi->type == ICE_VSI_CHNL)
344 		return;
345 	if (!pf->vsi_stats)
346 		return;
347 
348 	vsi_stat = pf->vsi_stats[vsi->idx];
349 	if (!vsi_stat)
350 		return;
351 
352 	ice_for_each_alloc_txq(vsi, i) {
353 		if (vsi_stat->tx_ring_stats[i]) {
354 			kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
355 			WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
356 		}
357 	}
358 
359 	ice_for_each_alloc_rxq(vsi, i) {
360 		if (vsi_stat->rx_ring_stats[i]) {
361 			kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
362 			WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
363 		}
364 	}
365 
366 	kfree(vsi_stat->tx_ring_stats);
367 	kfree(vsi_stat->rx_ring_stats);
368 	kfree(vsi_stat);
369 	pf->vsi_stats[vsi->idx] = NULL;
370 }
371 
372 /**
373  * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
374  * @vsi: VSI which is having stats allocated
375  */
376 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
377 {
378 	struct ice_ring_stats **tx_ring_stats;
379 	struct ice_ring_stats **rx_ring_stats;
380 	struct ice_vsi_stats *vsi_stats;
381 	struct ice_pf *pf = vsi->back;
382 	u16 i;
383 
384 	vsi_stats = pf->vsi_stats[vsi->idx];
385 	tx_ring_stats = vsi_stats->tx_ring_stats;
386 	rx_ring_stats = vsi_stats->rx_ring_stats;
387 
388 	/* Allocate Tx ring stats */
389 	ice_for_each_alloc_txq(vsi, i) {
390 		struct ice_ring_stats *ring_stats;
391 		struct ice_tx_ring *ring;
392 
393 		ring = vsi->tx_rings[i];
394 		ring_stats = tx_ring_stats[i];
395 
396 		if (!ring_stats) {
397 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
398 			if (!ring_stats)
399 				goto err_out;
400 
401 			WRITE_ONCE(tx_ring_stats[i], ring_stats);
402 		}
403 
404 		ring->ring_stats = ring_stats;
405 	}
406 
407 	/* Allocate Rx ring stats */
408 	ice_for_each_alloc_rxq(vsi, i) {
409 		struct ice_ring_stats *ring_stats;
410 		struct ice_rx_ring *ring;
411 
412 		ring = vsi->rx_rings[i];
413 		ring_stats = rx_ring_stats[i];
414 
415 		if (!ring_stats) {
416 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
417 			if (!ring_stats)
418 				goto err_out;
419 
420 			WRITE_ONCE(rx_ring_stats[i], ring_stats);
421 		}
422 
423 		ring->ring_stats = ring_stats;
424 	}
425 
426 	return 0;
427 
428 err_out:
429 	ice_vsi_free_stats(vsi);
430 	return -ENOMEM;
431 }
432 
433 /**
434  * ice_vsi_free - clean up and deallocate the provided VSI
435  * @vsi: pointer to VSI being cleared
436  *
437  * This deallocates the VSI's queue resources, removes it from the PF's
438  * VSI array if necessary, and deallocates the VSI
439  */
440 void ice_vsi_free(struct ice_vsi *vsi)
441 {
442 	struct ice_pf *pf = NULL;
443 	struct device *dev;
444 
445 	if (!vsi || !vsi->back)
446 		return;
447 
448 	pf = vsi->back;
449 	dev = ice_pf_to_dev(pf);
450 
451 	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
452 		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
453 		return;
454 	}
455 
456 	mutex_lock(&pf->sw_mutex);
457 	/* updates the PF for this cleared VSI */
458 
459 	pf->vsi[vsi->idx] = NULL;
460 	pf->next_vsi = vsi->idx;
461 
462 	ice_vsi_free_stats(vsi);
463 	ice_vsi_free_arrays(vsi);
464 	mutex_destroy(&vsi->xdp_state_lock);
465 	mutex_unlock(&pf->sw_mutex);
466 	devm_kfree(dev, vsi);
467 }
468 
469 void ice_vsi_delete(struct ice_vsi *vsi)
470 {
471 	ice_vsi_delete_from_hw(vsi);
472 	ice_vsi_free(vsi);
473 }
474 
475 /**
476  * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
477  * @irq: interrupt number
478  * @data: pointer to a q_vector
479  */
480 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
481 {
482 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
483 
484 	if (!q_vector->tx.tx_ring)
485 		return IRQ_HANDLED;
486 
487 	ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring);
488 	ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
489 
490 	return IRQ_HANDLED;
491 }
492 
493 /**
494  * ice_msix_clean_rings - MSIX mode Interrupt Handler
495  * @irq: interrupt number
496  * @data: pointer to a q_vector
497  */
498 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
499 {
500 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
501 
502 	if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
503 		return IRQ_HANDLED;
504 
505 	q_vector->total_events++;
506 
507 	napi_schedule(&q_vector->napi);
508 
509 	return IRQ_HANDLED;
510 }
511 
512 /**
513  * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
514  * @vsi: VSI pointer
515  */
516 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
517 {
518 	struct ice_vsi_stats *vsi_stat;
519 	struct ice_pf *pf = vsi->back;
520 
521 	if (vsi->type == ICE_VSI_CHNL)
522 		return 0;
523 	if (!pf->vsi_stats)
524 		return -ENOENT;
525 
526 	if (pf->vsi_stats[vsi->idx])
527 	/* realloc will happen in rebuild path */
528 		return 0;
529 
530 	vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
531 	if (!vsi_stat)
532 		return -ENOMEM;
533 
534 	vsi_stat->tx_ring_stats =
535 		kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
536 			GFP_KERNEL);
537 	if (!vsi_stat->tx_ring_stats)
538 		goto err_alloc_tx;
539 
540 	vsi_stat->rx_ring_stats =
541 		kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
542 			GFP_KERNEL);
543 	if (!vsi_stat->rx_ring_stats)
544 		goto err_alloc_rx;
545 
546 	pf->vsi_stats[vsi->idx] = vsi_stat;
547 
548 	return 0;
549 
550 err_alloc_rx:
551 	kfree(vsi_stat->rx_ring_stats);
552 err_alloc_tx:
553 	kfree(vsi_stat->tx_ring_stats);
554 	kfree(vsi_stat);
555 	pf->vsi_stats[vsi->idx] = NULL;
556 	return -ENOMEM;
557 }
558 
559 /**
560  * ice_vsi_alloc_def - set default values for already allocated VSI
561  * @vsi: ptr to VSI
562  * @ch: ptr to channel
563  */
564 static int
565 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
566 {
567 	if (vsi->type != ICE_VSI_CHNL) {
568 		ice_vsi_set_num_qs(vsi);
569 		if (ice_vsi_alloc_arrays(vsi))
570 			return -ENOMEM;
571 	}
572 
573 	vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
574 
575 	switch (vsi->type) {
576 	case ICE_VSI_PF:
577 	case ICE_VSI_SF:
578 		/* Setup default MSIX irq handler for VSI */
579 		vsi->irq_handler = ice_msix_clean_rings;
580 		break;
581 	case ICE_VSI_CTRL:
582 		/* Setup ctrl VSI MSIX irq handler */
583 		vsi->irq_handler = ice_msix_clean_ctrl_vsi;
584 		break;
585 	case ICE_VSI_CHNL:
586 		if (!ch)
587 			return -EINVAL;
588 
589 		vsi->num_rxq = ch->num_rxq;
590 		vsi->num_txq = ch->num_txq;
591 		vsi->next_base_q = ch->base_q;
592 		break;
593 	case ICE_VSI_VF:
594 	case ICE_VSI_LB:
595 		break;
596 	default:
597 		ice_vsi_free_arrays(vsi);
598 		return -EINVAL;
599 	}
600 
601 	return 0;
602 }
603 
604 /**
605  * ice_vsi_alloc - Allocates the next available struct VSI in the PF
606  * @pf: board private structure
607  *
608  * Reserves a VSI index from the PF and allocates an empty VSI structure
609  * without a type. The VSI structure must later be initialized by calling
610  * ice_vsi_cfg().
611  *
612  * returns a pointer to a VSI on success, NULL on failure.
613  */
614 struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
615 {
616 	struct device *dev = ice_pf_to_dev(pf);
617 	struct ice_vsi *vsi = NULL;
618 
619 	/* Need to protect the allocation of the VSIs at the PF level */
620 	mutex_lock(&pf->sw_mutex);
621 
622 	/* If we have already allocated our maximum number of VSIs,
623 	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
624 	 * is available to be populated
625 	 */
626 	if (pf->next_vsi == ICE_NO_VSI) {
627 		dev_dbg(dev, "out of VSI slots!\n");
628 		goto unlock_pf;
629 	}
630 
631 	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
632 	if (!vsi)
633 		goto unlock_pf;
634 
635 	vsi->back = pf;
636 	set_bit(ICE_VSI_DOWN, vsi->state);
637 
638 	/* fill slot and make note of the index */
639 	vsi->idx = pf->next_vsi;
640 	pf->vsi[pf->next_vsi] = vsi;
641 
642 	/* prepare pf->next_vsi for next use */
643 	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
644 					 pf->next_vsi);
645 
646 	mutex_init(&vsi->xdp_state_lock);
647 
648 unlock_pf:
649 	mutex_unlock(&pf->sw_mutex);
650 	return vsi;
651 }
652 
653 /**
654  * ice_alloc_fd_res - Allocate FD resource for a VSI
655  * @vsi: pointer to the ice_vsi
656  *
657  * This allocates the FD resources
658  *
659  * Returns 0 on success, -EPERM on no-op or -EIO on failure
660  */
661 static int ice_alloc_fd_res(struct ice_vsi *vsi)
662 {
663 	struct ice_pf *pf = vsi->back;
664 	u32 g_val, b_val;
665 
666 	/* Flow Director filters are only allocated/assigned to the PF VSI or
667 	 * CHNL VSI which passes the traffic. The CTRL VSI is only used to
668 	 * add/delete filters so resources are not allocated to it
669 	 */
670 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
671 		return -EPERM;
672 
673 	if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
674 	      vsi->type == ICE_VSI_CHNL))
675 		return -EPERM;
676 
677 	/* FD filters from guaranteed pool per VSI */
678 	g_val = pf->hw.func_caps.fd_fltr_guar;
679 	if (!g_val)
680 		return -EPERM;
681 
682 	/* FD filters from best effort pool */
683 	b_val = pf->hw.func_caps.fd_fltr_best_effort;
684 	if (!b_val)
685 		return -EPERM;
686 
687 	/* PF main VSI gets only 64 FD resources from guaranteed pool
688 	 * when ADQ is configured.
689 	 */
690 #define ICE_PF_VSI_GFLTR	64
691 
692 	/* determine FD filter resources per VSI from shared(best effort) and
693 	 * dedicated pool
694 	 */
695 	if (vsi->type == ICE_VSI_PF) {
696 		vsi->num_gfltr = g_val;
697 		/* if MQPRIO is configured, main VSI doesn't get all FD
698 		 * resources from guaranteed pool. PF VSI gets 64 FD resources
699 		 */
700 		if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
701 			if (g_val < ICE_PF_VSI_GFLTR)
702 				return -EPERM;
703 			/* allow bare minimum entries for PF VSI */
704 			vsi->num_gfltr = ICE_PF_VSI_GFLTR;
705 		}
706 
707 		/* each VSI gets same "best_effort" quota */
708 		vsi->num_bfltr = b_val;
709 	} else if (vsi->type == ICE_VSI_VF) {
710 		vsi->num_gfltr = 0;
711 
712 		/* each VSI gets same "best_effort" quota */
713 		vsi->num_bfltr = b_val;
714 	} else {
715 		struct ice_vsi *main_vsi;
716 		int numtc;
717 
718 		main_vsi = ice_get_main_vsi(pf);
719 		if (!main_vsi)
720 			return -EPERM;
721 
722 		if (!main_vsi->all_numtc)
723 			return -EINVAL;
724 
725 		/* figure out ADQ numtc */
726 		numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
727 
728 		/* only one TC but still asking resources for channels,
729 		 * invalid config
730 		 */
731 		if (numtc < ICE_CHNL_START_TC)
732 			return -EPERM;
733 
734 		g_val -= ICE_PF_VSI_GFLTR;
735 		/* channel VSIs gets equal share from guaranteed pool */
736 		vsi->num_gfltr = g_val / numtc;
737 
738 		/* each VSI gets same "best_effort" quota */
739 		vsi->num_bfltr = b_val;
740 	}
741 
742 	return 0;
743 }
744 
745 /**
746  * ice_vsi_get_qs - Assign queues from PF to VSI
747  * @vsi: the VSI to assign queues to
748  *
749  * Returns 0 on success and a negative value on error
750  */
751 static int ice_vsi_get_qs(struct ice_vsi *vsi)
752 {
753 	struct ice_pf *pf = vsi->back;
754 	struct ice_qs_cfg tx_qs_cfg = {
755 		.qs_mutex = &pf->avail_q_mutex,
756 		.pf_map = pf->avail_txqs,
757 		.pf_map_size = pf->max_pf_txqs,
758 		.q_count = vsi->alloc_txq,
759 		.scatter_count = ICE_MAX_SCATTER_TXQS,
760 		.vsi_map = vsi->txq_map,
761 		.vsi_map_offset = 0,
762 		.mapping_mode = ICE_VSI_MAP_CONTIG
763 	};
764 	struct ice_qs_cfg rx_qs_cfg = {
765 		.qs_mutex = &pf->avail_q_mutex,
766 		.pf_map = pf->avail_rxqs,
767 		.pf_map_size = pf->max_pf_rxqs,
768 		.q_count = vsi->alloc_rxq,
769 		.scatter_count = ICE_MAX_SCATTER_RXQS,
770 		.vsi_map = vsi->rxq_map,
771 		.vsi_map_offset = 0,
772 		.mapping_mode = ICE_VSI_MAP_CONTIG
773 	};
774 	int ret;
775 
776 	if (vsi->type == ICE_VSI_CHNL)
777 		return 0;
778 
779 	ret = __ice_vsi_get_qs(&tx_qs_cfg);
780 	if (ret)
781 		return ret;
782 	vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
783 
784 	ret = __ice_vsi_get_qs(&rx_qs_cfg);
785 	if (ret)
786 		return ret;
787 	vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
788 
789 	return 0;
790 }
791 
792 /**
793  * ice_vsi_put_qs - Release queues from VSI to PF
794  * @vsi: the VSI that is going to release queues
795  */
796 static void ice_vsi_put_qs(struct ice_vsi *vsi)
797 {
798 	struct ice_pf *pf = vsi->back;
799 	int i;
800 
801 	mutex_lock(&pf->avail_q_mutex);
802 
803 	ice_for_each_alloc_txq(vsi, i) {
804 		clear_bit(vsi->txq_map[i], pf->avail_txqs);
805 		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
806 	}
807 
808 	ice_for_each_alloc_rxq(vsi, i) {
809 		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
810 		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
811 	}
812 
813 	mutex_unlock(&pf->avail_q_mutex);
814 }
815 
816 /**
817  * ice_is_safe_mode
818  * @pf: pointer to the PF struct
819  *
820  * returns true if driver is in safe mode, false otherwise
821  */
822 bool ice_is_safe_mode(struct ice_pf *pf)
823 {
824 	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
825 }
826 
827 /**
828  * ice_is_rdma_ena
829  * @pf: pointer to the PF struct
830  *
831  * returns true if RDMA is currently supported, false otherwise
832  */
833 bool ice_is_rdma_ena(struct ice_pf *pf)
834 {
835 	union devlink_param_value value;
836 	int err;
837 
838 	err = devl_param_driverinit_value_get(priv_to_devlink(pf),
839 					      DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
840 					      &value);
841 	return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
842 }
843 
844 /**
845  * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
846  * @vsi: the VSI being cleaned up
847  *
848  * This function deletes RSS input set for all flows that were configured
849  * for this VSI
850  */
851 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
852 {
853 	struct ice_pf *pf = vsi->back;
854 	int status;
855 
856 	if (ice_is_safe_mode(pf))
857 		return;
858 
859 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
860 	if (status)
861 		dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
862 			vsi->vsi_num, status);
863 }
864 
865 /**
866  * ice_rss_clean - Delete RSS related VSI structures and configuration
867  * @vsi: the VSI being removed
868  */
869 static void ice_rss_clean(struct ice_vsi *vsi)
870 {
871 	struct ice_pf *pf = vsi->back;
872 	struct device *dev;
873 
874 	dev = ice_pf_to_dev(pf);
875 
876 	devm_kfree(dev, vsi->rss_hkey_user);
877 	devm_kfree(dev, vsi->rss_lut_user);
878 
879 	ice_vsi_clean_rss_flow_fld(vsi);
880 	/* remove RSS replay list */
881 	if (!ice_is_safe_mode(pf))
882 		ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
883 }
884 
885 /**
886  * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
887  * @vsi: the VSI being configured
888  */
889 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
890 {
891 	struct ice_hw_common_caps *cap;
892 	struct ice_pf *pf = vsi->back;
893 	u16 max_rss_size;
894 
895 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
896 		vsi->rss_size = 1;
897 		return;
898 	}
899 
900 	cap = &pf->hw.func_caps.common_cap;
901 	max_rss_size = BIT(cap->rss_table_entry_width);
902 	switch (vsi->type) {
903 	case ICE_VSI_CHNL:
904 	case ICE_VSI_PF:
905 		/* PF VSI will inherit RSS instance of PF */
906 		vsi->rss_table_size = (u16)cap->rss_table_size;
907 		if (vsi->type == ICE_VSI_CHNL)
908 			vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
909 		else
910 			vsi->rss_size = min_t(u16, num_online_cpus(),
911 					      max_rss_size);
912 		vsi->rss_lut_type = ICE_LUT_PF;
913 		break;
914 	case ICE_VSI_SF:
915 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
916 		vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
917 		vsi->rss_lut_type = ICE_LUT_VSI;
918 		break;
919 	case ICE_VSI_VF:
920 		/* VF VSI will get a small RSS table.
921 		 * For VSI_LUT, LUT size should be set to 64 bytes.
922 		 */
923 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
924 		vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
925 		vsi->rss_lut_type = ICE_LUT_VSI;
926 		break;
927 	case ICE_VSI_LB:
928 		break;
929 	default:
930 		dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
931 			ice_vsi_type_str(vsi->type));
932 		break;
933 	}
934 }
935 
936 /**
937  * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
938  * @hw: HW structure used to determine the VLAN mode of the device
939  * @ctxt: the VSI context being set
940  *
941  * This initializes a default VSI context for all sections except the Queues.
942  */
943 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
944 {
945 	u32 table = 0;
946 
947 	memset(&ctxt->info, 0, sizeof(ctxt->info));
948 	/* VSI's should be allocated from shared pool */
949 	ctxt->alloc_from_pool = true;
950 	/* Src pruning enabled by default */
951 	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
952 	/* Traffic from VSI can be sent to LAN */
953 	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
954 	/* allow all untagged/tagged packets by default on Tx */
955 	ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
956 						 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
957 	/* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
958 	 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
959 	 *
960 	 * DVM - leave inner VLAN in packet by default
961 	 */
962 	if (ice_is_dvm_ena(hw)) {
963 		ctxt->info.inner_vlan_flags |=
964 			FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
965 				   ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
966 		ctxt->info.outer_vlan_flags =
967 			FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
968 				   ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
969 		ctxt->info.outer_vlan_flags |=
970 			FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
971 				   ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
972 		ctxt->info.outer_vlan_flags |=
973 			FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
974 				   ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
975 	}
976 	/* Have 1:1 UP mapping for both ingress/egress tables */
977 	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
978 	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
979 	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
980 	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
981 	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
982 	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
983 	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
984 	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
985 	ctxt->info.ingress_table = cpu_to_le32(table);
986 	ctxt->info.egress_table = cpu_to_le32(table);
987 	/* Have 1:1 UP mapping for outer to inner UP table */
988 	ctxt->info.outer_up_table = cpu_to_le32(table);
989 	/* No Outer tag support outer_tag_flags remains to zero */
990 }
991 
992 /**
993  * ice_vsi_setup_q_map - Setup a VSI queue map
994  * @vsi: the VSI being configured
995  * @ctxt: VSI context structure
996  */
997 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
998 {
999 	u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
1000 	u16 num_txq_per_tc, num_rxq_per_tc;
1001 	u16 qcount_tx = vsi->alloc_txq;
1002 	u16 qcount_rx = vsi->alloc_rxq;
1003 	u8 netdev_tc = 0;
1004 	int i;
1005 
1006 	if (!vsi->tc_cfg.numtc) {
1007 		/* at least TC0 should be enabled by default */
1008 		vsi->tc_cfg.numtc = 1;
1009 		vsi->tc_cfg.ena_tc = 1;
1010 	}
1011 
1012 	num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
1013 	if (!num_rxq_per_tc)
1014 		num_rxq_per_tc = 1;
1015 	num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
1016 	if (!num_txq_per_tc)
1017 		num_txq_per_tc = 1;
1018 
1019 	/* find the (rounded up) power-of-2 of qcount */
1020 	pow = (u16)order_base_2(num_rxq_per_tc);
1021 
1022 	/* TC mapping is a function of the number of Rx queues assigned to the
1023 	 * VSI for each traffic class and the offset of these queues.
1024 	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
1025 	 * queues allocated to TC0. No:of queues is a power-of-2.
1026 	 *
1027 	 * If TC is not enabled, the queue offset is set to 0, and allocate one
1028 	 * queue, this way, traffic for the given TC will be sent to the default
1029 	 * queue.
1030 	 *
1031 	 * Setup number and offset of Rx queues for all TCs for the VSI
1032 	 */
1033 	ice_for_each_traffic_class(i) {
1034 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1035 			/* TC is not enabled */
1036 			vsi->tc_cfg.tc_info[i].qoffset = 0;
1037 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1038 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1039 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
1040 			ctxt->info.tc_mapping[i] = 0;
1041 			continue;
1042 		}
1043 
1044 		/* TC is enabled */
1045 		vsi->tc_cfg.tc_info[i].qoffset = offset;
1046 		vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
1047 		vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1048 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
1049 
1050 		qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1051 		qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1052 		offset += num_rxq_per_tc;
1053 		tx_count += num_txq_per_tc;
1054 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1055 	}
1056 
1057 	/* if offset is non-zero, means it is calculated correctly based on
1058 	 * enabled TCs for a given VSI otherwise qcount_rx will always
1059 	 * be correct and non-zero because it is based off - VSI's
1060 	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
1061 	 * at least 1)
1062 	 */
1063 	if (offset)
1064 		rx_count = offset;
1065 	else
1066 		rx_count = num_rxq_per_tc;
1067 
1068 	if (rx_count > vsi->alloc_rxq) {
1069 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1070 			rx_count, vsi->alloc_rxq);
1071 		return -EINVAL;
1072 	}
1073 
1074 	if (tx_count > vsi->alloc_txq) {
1075 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1076 			tx_count, vsi->alloc_txq);
1077 		return -EINVAL;
1078 	}
1079 
1080 	vsi->num_txq = tx_count;
1081 	vsi->num_rxq = rx_count;
1082 
1083 	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1084 		dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1085 		/* since there is a chance that num_rxq could have been changed
1086 		 * in the above for loop, make num_txq equal to num_rxq.
1087 		 */
1088 		vsi->num_txq = vsi->num_rxq;
1089 	}
1090 
1091 	/* Rx queue mapping */
1092 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1093 	/* q_mapping buffer holds the info for the first queue allocated for
1094 	 * this VSI in the PF space and also the number of queues associated
1095 	 * with this VSI.
1096 	 */
1097 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1098 	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1099 
1100 	return 0;
1101 }
1102 
1103 /**
1104  * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1105  * @ctxt: the VSI context being set
1106  * @vsi: the VSI being configured
1107  */
1108 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1109 {
1110 	u8 dflt_q_group, dflt_q_prio;
1111 	u16 dflt_q, report_q, val;
1112 
1113 	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1114 	    vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1115 		return;
1116 
1117 	val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1118 	ctxt->info.valid_sections |= cpu_to_le16(val);
1119 	dflt_q = 0;
1120 	dflt_q_group = 0;
1121 	report_q = 0;
1122 	dflt_q_prio = 0;
1123 
1124 	/* enable flow director filtering/programming */
1125 	val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1126 	ctxt->info.fd_options = cpu_to_le16(val);
1127 	/* max of allocated flow director filters */
1128 	ctxt->info.max_fd_fltr_dedicated =
1129 			cpu_to_le16(vsi->num_gfltr);
1130 	/* max of shared flow director filters any VSI may program */
1131 	ctxt->info.max_fd_fltr_shared =
1132 			cpu_to_le16(vsi->num_bfltr);
1133 	/* default queue index within the VSI of the default FD */
1134 	val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q);
1135 	/* target queue or queue group to the FD filter */
1136 	val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
1137 	ctxt->info.fd_def_q = cpu_to_le16(val);
1138 	/* queue index on which FD filter completion is reported */
1139 	val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q);
1140 	/* priority of the default qindex action */
1141 	val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
1142 	ctxt->info.fd_report_opt = cpu_to_le16(val);
1143 }
1144 
1145 /**
1146  * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1147  * @ctxt: the VSI context being set
1148  * @vsi: the VSI being configured
1149  */
1150 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1151 {
1152 	u8 lut_type, hash_type;
1153 	struct device *dev;
1154 	struct ice_pf *pf;
1155 
1156 	pf = vsi->back;
1157 	dev = ice_pf_to_dev(pf);
1158 
1159 	switch (vsi->type) {
1160 	case ICE_VSI_CHNL:
1161 	case ICE_VSI_PF:
1162 		/* PF VSI will inherit RSS instance of PF */
1163 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1164 		break;
1165 	case ICE_VSI_VF:
1166 	case ICE_VSI_SF:
1167 		/* VF VSI will gets a small RSS table which is a VSI LUT type */
1168 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1169 		break;
1170 	default:
1171 		dev_dbg(dev, "Unsupported VSI type %s\n",
1172 			ice_vsi_type_str(vsi->type));
1173 		return;
1174 	}
1175 
1176 	hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1177 	vsi->rss_hfunc = hash_type;
1178 
1179 	ctxt->info.q_opt_rss =
1180 		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
1181 		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
1182 }
1183 
1184 static void
1185 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1186 {
1187 	u16 qcount, qmap;
1188 	u8 offset = 0;
1189 	int pow;
1190 
1191 	qcount = vsi->num_rxq;
1192 
1193 	pow = order_base_2(qcount);
1194 	qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1195 	qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1196 
1197 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1198 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1199 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1200 	ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
1201 }
1202 
1203 /**
1204  * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1205  * @vsi: VSI to check whether or not VLAN pruning is enabled.
1206  *
1207  * returns true if Rx VLAN pruning is enabled and false otherwise.
1208  */
1209 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
1210 {
1211 	return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1212 }
1213 
1214 /**
1215  * ice_vsi_init - Create and initialize a VSI
1216  * @vsi: the VSI being configured
1217  * @vsi_flags: VSI configuration flags
1218  *
1219  * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
1220  * reconfigure an existing context.
1221  *
1222  * This initializes a VSI context depending on the VSI type to be added and
1223  * passes it down to the add_vsi aq command to create a new VSI.
1224  */
1225 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
1226 {
1227 	struct ice_pf *pf = vsi->back;
1228 	struct ice_hw *hw = &pf->hw;
1229 	struct ice_vsi_ctx *ctxt;
1230 	struct device *dev;
1231 	int ret = 0;
1232 
1233 	dev = ice_pf_to_dev(pf);
1234 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1235 	if (!ctxt)
1236 		return -ENOMEM;
1237 
1238 	switch (vsi->type) {
1239 	case ICE_VSI_CTRL:
1240 	case ICE_VSI_LB:
1241 	case ICE_VSI_PF:
1242 		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1243 		break;
1244 	case ICE_VSI_SF:
1245 	case ICE_VSI_CHNL:
1246 		ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1247 		break;
1248 	case ICE_VSI_VF:
1249 		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1250 		/* VF number here is the absolute VF number (0-255) */
1251 		ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1252 		break;
1253 	default:
1254 		ret = -ENODEV;
1255 		goto out;
1256 	}
1257 
1258 	/* Handle VLAN pruning for channel VSI if main VSI has VLAN
1259 	 * prune enabled
1260 	 */
1261 	if (vsi->type == ICE_VSI_CHNL) {
1262 		struct ice_vsi *main_vsi;
1263 
1264 		main_vsi = ice_get_main_vsi(pf);
1265 		if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
1266 			ctxt->info.sw_flags2 |=
1267 				ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1268 		else
1269 			ctxt->info.sw_flags2 &=
1270 				~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1271 	}
1272 
1273 	ice_set_dflt_vsi_ctx(hw, ctxt);
1274 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1275 		ice_set_fd_vsi_ctx(ctxt, vsi);
1276 	/* if the switch is in VEB mode, allow VSI loopback */
1277 	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1278 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1279 
1280 	/* Set LUT type and HASH type if RSS is enabled */
1281 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1282 	    vsi->type != ICE_VSI_CTRL) {
1283 		ice_set_rss_vsi_ctx(ctxt, vsi);
1284 		/* if updating VSI context, make sure to set valid_section:
1285 		 * to indicate which section of VSI context being updated
1286 		 */
1287 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1288 			ctxt->info.valid_sections |=
1289 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1290 	}
1291 
1292 	ctxt->info.sw_id = vsi->port_info->sw_id;
1293 	if (vsi->type == ICE_VSI_CHNL) {
1294 		ice_chnl_vsi_setup_q_map(vsi, ctxt);
1295 	} else {
1296 		ret = ice_vsi_setup_q_map(vsi, ctxt);
1297 		if (ret)
1298 			goto out;
1299 
1300 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1301 			/* means VSI being updated */
1302 			/* must to indicate which section of VSI context are
1303 			 * being modified
1304 			 */
1305 			ctxt->info.valid_sections |=
1306 				cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
1307 	}
1308 
1309 	/* Allow control frames out of main VSI */
1310 	if (vsi->type == ICE_VSI_PF) {
1311 		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1312 		ctxt->info.valid_sections |=
1313 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1314 	}
1315 
1316 	if (vsi_flags & ICE_VSI_FLAG_INIT) {
1317 		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1318 		if (ret) {
1319 			dev_err(dev, "Add VSI failed, err %d\n", ret);
1320 			ret = -EIO;
1321 			goto out;
1322 		}
1323 	} else {
1324 		ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1325 		if (ret) {
1326 			dev_err(dev, "Update VSI failed, err %d\n", ret);
1327 			ret = -EIO;
1328 			goto out;
1329 		}
1330 	}
1331 
1332 	/* keep context for update VSI operations */
1333 	vsi->info = ctxt->info;
1334 
1335 	/* record VSI number returned */
1336 	vsi->vsi_num = ctxt->vsi_num;
1337 
1338 out:
1339 	kfree(ctxt);
1340 	return ret;
1341 }
1342 
1343 /**
1344  * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1345  * @vsi: the VSI having rings deallocated
1346  */
1347 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1348 {
1349 	int i;
1350 
1351 	/* Avoid stale references by clearing map from vector to ring */
1352 	if (vsi->q_vectors) {
1353 		ice_for_each_q_vector(vsi, i) {
1354 			struct ice_q_vector *q_vector = vsi->q_vectors[i];
1355 
1356 			if (q_vector) {
1357 				q_vector->tx.tx_ring = NULL;
1358 				q_vector->rx.rx_ring = NULL;
1359 			}
1360 		}
1361 	}
1362 
1363 	if (vsi->tx_rings) {
1364 		ice_for_each_alloc_txq(vsi, i) {
1365 			if (vsi->tx_rings[i]) {
1366 				kfree_rcu(vsi->tx_rings[i], rcu);
1367 				WRITE_ONCE(vsi->tx_rings[i], NULL);
1368 			}
1369 		}
1370 	}
1371 	if (vsi->rx_rings) {
1372 		ice_for_each_alloc_rxq(vsi, i) {
1373 			if (vsi->rx_rings[i]) {
1374 				kfree_rcu(vsi->rx_rings[i], rcu);
1375 				WRITE_ONCE(vsi->rx_rings[i], NULL);
1376 			}
1377 		}
1378 	}
1379 }
1380 
1381 /**
1382  * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1383  * @vsi: VSI which is having rings allocated
1384  */
1385 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1386 {
1387 	bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1388 	struct ice_pf *pf = vsi->back;
1389 	struct device *dev;
1390 	u16 i;
1391 
1392 	dev = ice_pf_to_dev(pf);
1393 	/* Allocate Tx rings */
1394 	ice_for_each_alloc_txq(vsi, i) {
1395 		struct ice_tx_ring *ring;
1396 
1397 		/* allocate with kzalloc(), free with kfree_rcu() */
1398 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1399 
1400 		if (!ring)
1401 			goto err_out;
1402 
1403 		ring->q_index = i;
1404 		ring->reg_idx = vsi->txq_map[i];
1405 		ring->vsi = vsi;
1406 		ring->tx_tstamps = &pf->ptp.port.tx;
1407 		ring->dev = dev;
1408 		ring->count = vsi->num_tx_desc;
1409 		ring->txq_teid = ICE_INVAL_TEID;
1410 		if (dvm_ena)
1411 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
1412 		else
1413 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1414 		WRITE_ONCE(vsi->tx_rings[i], ring);
1415 	}
1416 
1417 	/* Allocate Rx rings */
1418 	ice_for_each_alloc_rxq(vsi, i) {
1419 		struct ice_rx_ring *ring;
1420 
1421 		/* allocate with kzalloc(), free with kfree_rcu() */
1422 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1423 		if (!ring)
1424 			goto err_out;
1425 
1426 		ring->q_index = i;
1427 		ring->reg_idx = vsi->rxq_map[i];
1428 		ring->vsi = vsi;
1429 		ring->netdev = vsi->netdev;
1430 		ring->count = vsi->num_rx_desc;
1431 		ring->cached_phctime = pf->ptp.cached_phc_time;
1432 
1433 		if (ice_is_feature_supported(pf, ICE_F_GCS))
1434 			ring->flags |= ICE_RX_FLAGS_RING_GCS;
1435 
1436 		WRITE_ONCE(vsi->rx_rings[i], ring);
1437 	}
1438 
1439 	return 0;
1440 
1441 err_out:
1442 	ice_vsi_clear_rings(vsi);
1443 	return -ENOMEM;
1444 }
1445 
1446 /**
1447  * ice_vsi_manage_rss_lut - disable/enable RSS
1448  * @vsi: the VSI being changed
1449  * @ena: boolean value indicating if this is an enable or disable request
1450  *
1451  * In the event of disable request for RSS, this function will zero out RSS
1452  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1453  * LUT.
1454  */
1455 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1456 {
1457 	u8 *lut;
1458 
1459 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1460 	if (!lut)
1461 		return;
1462 
1463 	if (ena) {
1464 		if (vsi->rss_lut_user)
1465 			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1466 		else
1467 			ice_fill_rss_lut(lut, vsi->rss_table_size,
1468 					 vsi->rss_size);
1469 	}
1470 
1471 	ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1472 	kfree(lut);
1473 }
1474 
1475 /**
1476  * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1477  * @vsi: VSI to be configured
1478  * @disable: set to true to have FCS / CRC in the frame data
1479  */
1480 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1481 {
1482 	int i;
1483 
1484 	ice_for_each_rxq(vsi, i)
1485 		if (disable)
1486 			vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1487 		else
1488 			vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1489 }
1490 
1491 /**
1492  * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1493  * @vsi: VSI to be configured
1494  */
1495 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1496 {
1497 	struct ice_pf *pf = vsi->back;
1498 	struct device *dev;
1499 	u8 *lut, *key;
1500 	int err;
1501 
1502 	dev = ice_pf_to_dev(pf);
1503 	if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1504 	    (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
1505 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1506 	} else {
1507 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1508 
1509 		/* If orig_rss_size is valid and it is less than determined
1510 		 * main VSI's rss_size, update main VSI's rss_size to be
1511 		 * orig_rss_size so that when tc-qdisc is deleted, main VSI
1512 		 * RSS table gets programmed to be correct (whatever it was
1513 		 * to begin with (prior to setup-tc for ADQ config)
1514 		 */
1515 		if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1516 		    vsi->orig_rss_size <= vsi->num_rxq) {
1517 			vsi->rss_size = vsi->orig_rss_size;
1518 			/* now orig_rss_size is used, reset it to zero */
1519 			vsi->orig_rss_size = 0;
1520 		}
1521 	}
1522 
1523 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1524 	if (!lut)
1525 		return -ENOMEM;
1526 
1527 	if (vsi->rss_lut_user)
1528 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1529 	else
1530 		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1531 
1532 	err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1533 	if (err) {
1534 		dev_err(dev, "set_rss_lut failed, error %d\n", err);
1535 		goto ice_vsi_cfg_rss_exit;
1536 	}
1537 
1538 	key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
1539 	if (!key) {
1540 		err = -ENOMEM;
1541 		goto ice_vsi_cfg_rss_exit;
1542 	}
1543 
1544 	if (vsi->rss_hkey_user)
1545 		memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1546 	else
1547 		netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1548 
1549 	err = ice_set_rss_key(vsi, key);
1550 	if (err)
1551 		dev_err(dev, "set_rss_key failed, error %d\n", err);
1552 
1553 	kfree(key);
1554 ice_vsi_cfg_rss_exit:
1555 	kfree(lut);
1556 	return err;
1557 }
1558 
1559 /**
1560  * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1561  * @vsi: VSI to be configured
1562  *
1563  * This function will only be called during the VF VSI setup. Upon successful
1564  * completion of package download, this function will configure default RSS
1565  * input sets for VF VSI.
1566  */
1567 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1568 {
1569 	struct ice_pf *pf = vsi->back;
1570 	struct device *dev;
1571 	int status;
1572 
1573 	dev = ice_pf_to_dev(pf);
1574 	if (ice_is_safe_mode(pf)) {
1575 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1576 			vsi->vsi_num);
1577 		return;
1578 	}
1579 
1580 	status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG);
1581 	if (status)
1582 		dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1583 			vsi->vsi_num, status);
1584 }
1585 
1586 static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
1587 	/* configure RSS for IPv4 with input set IP src/dst */
1588 	{ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
1589 	/* configure RSS for IPv6 with input set IPv6 src/dst */
1590 	{ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
1591 	/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1592 	{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
1593 				ICE_HASH_TCP_IPV4,  ICE_RSS_ANY_HEADERS, false},
1594 	/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1595 	{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
1596 				ICE_HASH_UDP_IPV4,  ICE_RSS_ANY_HEADERS, false},
1597 	/* configure RSS for sctp4 with input set IP src/dst - only support
1598 	 * RSS on SCTPv4 on outer headers (non-tunneled)
1599 	 */
1600 	{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
1601 		ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
1602 	/* configure RSS for gtpc4 with input set IPv4 src/dst */
1603 	{ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
1604 		ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false},
1605 	/* configure RSS for gtpc4t with input set IPv4 src/dst */
1606 	{ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
1607 		ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
1608 	/* configure RSS for gtpu4 with input set IPv4 src/dst */
1609 	{ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
1610 		ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
1611 	/* configure RSS for gtpu4e with input set IPv4 src/dst */
1612 	{ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
1613 		ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false},
1614 	/* configure RSS for gtpu4u with input set IPv4 src/dst */
1615 	{ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
1616 		ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false},
1617 	/* configure RSS for gtpu4d with input set IPv4 src/dst */
1618 	{ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
1619 		ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
1620 
1621 	/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1622 	{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
1623 				ICE_HASH_TCP_IPV6,  ICE_RSS_ANY_HEADERS, false},
1624 	/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1625 	{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
1626 				ICE_HASH_UDP_IPV6,  ICE_RSS_ANY_HEADERS, false},
1627 	/* configure RSS for sctp6 with input set IPv6 src/dst - only support
1628 	 * RSS on SCTPv6 on outer headers (non-tunneled)
1629 	 */
1630 	{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
1631 		ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
1632 	/* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
1633 	{ICE_FLOW_SEG_HDR_ESP,
1634 		ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
1635 	/* configure RSS for gtpc6 with input set IPv6 src/dst */
1636 	{ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
1637 		ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false},
1638 	/* configure RSS for gtpc6t with input set IPv6 src/dst */
1639 	{ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
1640 		ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
1641 	/* configure RSS for gtpu6 with input set IPv6 src/dst */
1642 	{ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
1643 		ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
1644 	/* configure RSS for gtpu6e with input set IPv6 src/dst */
1645 	{ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
1646 		ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false},
1647 	/* configure RSS for gtpu6u with input set IPv6 src/dst */
1648 	{ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
1649 		ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false},
1650 	/* configure RSS for gtpu6d with input set IPv6 src/dst */
1651 	{ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
1652 		ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
1653 };
1654 
1655 /**
1656  * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1657  * @vsi: VSI to be configured
1658  *
1659  * This function will only be called after successful download package call
1660  * during initialization of PF. Since the downloaded package will erase the
1661  * RSS section, this function will configure RSS input sets for different
1662  * flow types. The last profile added has the highest priority, therefore 2
1663  * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1664  * (i.e. IPv4 src/dst TCP src/dst port).
1665  */
1666 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1667 {
1668 	u16 vsi_num = vsi->vsi_num;
1669 	struct ice_pf *pf = vsi->back;
1670 	struct ice_hw *hw = &pf->hw;
1671 	struct device *dev;
1672 	int status;
1673 	u32 i;
1674 
1675 	dev = ice_pf_to_dev(pf);
1676 	if (ice_is_safe_mode(pf)) {
1677 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1678 			vsi_num);
1679 		return;
1680 	}
1681 	for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
1682 		const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
1683 
1684 		status = ice_add_rss_cfg(hw, vsi, cfg);
1685 		if (status)
1686 			dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
1687 				cfg->addl_hdrs, cfg->hash_flds,
1688 				cfg->hdr_type, cfg->symm);
1689 	}
1690 }
1691 
1692 /**
1693  * ice_pf_state_is_nominal - checks the PF for nominal state
1694  * @pf: pointer to PF to check
1695  *
1696  * Check the PF's state for a collection of bits that would indicate
1697  * the PF is in a state that would inhibit normal operation for
1698  * driver functionality.
1699  *
1700  * Returns true if PF is in a nominal state, false otherwise
1701  */
1702 bool ice_pf_state_is_nominal(struct ice_pf *pf)
1703 {
1704 	DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1705 
1706 	if (!pf)
1707 		return false;
1708 
1709 	bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
1710 	if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1711 		return false;
1712 
1713 	return true;
1714 }
1715 
1716 #define ICE_FW_MODE_REC_M BIT(1)
1717 bool ice_is_recovery_mode(struct ice_hw *hw)
1718 {
1719 	return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
1720 }
1721 
1722 /**
1723  * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1724  * @vsi: the VSI to be updated
1725  */
1726 void ice_update_eth_stats(struct ice_vsi *vsi)
1727 {
1728 	struct ice_eth_stats *prev_es, *cur_es;
1729 	struct ice_hw *hw = &vsi->back->hw;
1730 	struct ice_pf *pf = vsi->back;
1731 	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
1732 
1733 	prev_es = &vsi->eth_stats_prev;
1734 	cur_es = &vsi->eth_stats;
1735 
1736 	if (ice_is_reset_in_progress(pf->state))
1737 		vsi->stat_offsets_loaded = false;
1738 
1739 	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1740 			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1741 
1742 	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1743 			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1744 
1745 	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1746 			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1747 
1748 	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1749 			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1750 
1751 	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1752 			  &prev_es->rx_discards, &cur_es->rx_discards);
1753 
1754 	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1755 			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1756 
1757 	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1758 			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1759 
1760 	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1761 			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1762 
1763 	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1764 			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1765 
1766 	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1767 			  &prev_es->tx_errors, &cur_es->tx_errors);
1768 
1769 	vsi->stat_offsets_loaded = true;
1770 }
1771 
1772 /**
1773  * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1774  * @hw: HW pointer
1775  * @pf_q: index of the Rx queue in the PF's queue space
1776  * @rxdid: flexible descriptor RXDID
1777  * @prio: priority for the RXDID for this queue
1778  * @ena_ts: true to enable timestamp and false to disable timestamp
1779  */
1780 void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
1781 			     bool ena_ts)
1782 {
1783 	int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1784 
1785 	/* clear any previous values */
1786 	regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1787 		    QRXFLXP_CNTXT_RXDID_PRIO_M |
1788 		    QRXFLXP_CNTXT_TS_M);
1789 
1790 	regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid);
1791 	regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio);
1792 
1793 	if (ena_ts)
1794 		/* Enable TimeSync on this queue */
1795 		regval |= QRXFLXP_CNTXT_TS_M;
1796 
1797 	wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1798 }
1799 
1800 /**
1801  * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1802  * @intrl: interrupt rate limit in usecs
1803  * @gran: interrupt rate limit granularity in usecs
1804  *
1805  * This function converts a decimal interrupt rate limit in usecs to the format
1806  * expected by firmware.
1807  */
1808 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1809 {
1810 	u32 val = intrl / gran;
1811 
1812 	if (val)
1813 		return val | GLINT_RATE_INTRL_ENA_M;
1814 	return 0;
1815 }
1816 
1817 /**
1818  * ice_write_intrl - write throttle rate limit to interrupt specific register
1819  * @q_vector: pointer to interrupt specific structure
1820  * @intrl: throttle rate limit in microseconds to write
1821  */
1822 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
1823 {
1824 	struct ice_hw *hw = &q_vector->vsi->back->hw;
1825 
1826 	wr32(hw, GLINT_RATE(q_vector->reg_idx),
1827 	     ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
1828 }
1829 
1830 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
1831 {
1832 	switch (rc->type) {
1833 	case ICE_RX_CONTAINER:
1834 		if (rc->rx_ring)
1835 			return rc->rx_ring->q_vector;
1836 		break;
1837 	case ICE_TX_CONTAINER:
1838 		if (rc->tx_ring)
1839 			return rc->tx_ring->q_vector;
1840 		break;
1841 	default:
1842 		break;
1843 	}
1844 
1845 	return NULL;
1846 }
1847 
1848 /**
1849  * __ice_write_itr - write throttle rate to register
1850  * @q_vector: pointer to interrupt data structure
1851  * @rc: pointer to ring container
1852  * @itr: throttle rate in microseconds to write
1853  */
1854 static void __ice_write_itr(struct ice_q_vector *q_vector,
1855 			    struct ice_ring_container *rc, u16 itr)
1856 {
1857 	struct ice_hw *hw = &q_vector->vsi->back->hw;
1858 
1859 	wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1860 	     ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
1861 }
1862 
1863 /**
1864  * ice_write_itr - write throttle rate to queue specific register
1865  * @rc: pointer to ring container
1866  * @itr: throttle rate in microseconds to write
1867  */
1868 void ice_write_itr(struct ice_ring_container *rc, u16 itr)
1869 {
1870 	struct ice_q_vector *q_vector;
1871 
1872 	q_vector = ice_pull_qvec_from_rc(rc);
1873 	if (!q_vector)
1874 		return;
1875 
1876 	__ice_write_itr(q_vector, rc, itr);
1877 }
1878 
1879 /**
1880  * ice_set_q_vector_intrl - set up interrupt rate limiting
1881  * @q_vector: the vector to be configured
1882  *
1883  * Interrupt rate limiting is local to the vector, not per-queue so we must
1884  * detect if either ring container has dynamic moderation enabled to decide
1885  * what to set the interrupt rate limit to via INTRL settings. In the case that
1886  * dynamic moderation is disabled on both, write the value with the cached
1887  * setting to make sure INTRL register matches the user visible value.
1888  */
1889 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
1890 {
1891 	if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
1892 		/* in the case of dynamic enabled, cap each vector to no more
1893 		 * than (4 us) 250,000 ints/sec, which allows low latency
1894 		 * but still less than 500,000 interrupts per second, which
1895 		 * reduces CPU a bit in the case of the lowest latency
1896 		 * setting. The 4 here is a value in microseconds.
1897 		 */
1898 		ice_write_intrl(q_vector, 4);
1899 	} else {
1900 		ice_write_intrl(q_vector, q_vector->intrl);
1901 	}
1902 }
1903 
1904 /**
1905  * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1906  * @vsi: the VSI being configured
1907  *
1908  * This configures MSIX mode interrupts for the PF VSI, and should not be used
1909  * for the VF VSI.
1910  */
1911 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1912 {
1913 	struct ice_pf *pf = vsi->back;
1914 	struct ice_hw *hw = &pf->hw;
1915 	u16 txq = 0, rxq = 0;
1916 	int i, q;
1917 
1918 	ice_for_each_q_vector(vsi, i) {
1919 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1920 		u16 reg_idx = q_vector->reg_idx;
1921 
1922 		ice_cfg_itr(hw, q_vector);
1923 
1924 		/* Both Transmit Queue Interrupt Cause Control register
1925 		 * and Receive Queue Interrupt Cause control register
1926 		 * expects MSIX_INDX field to be the vector index
1927 		 * within the function space and not the absolute
1928 		 * vector index across PF or across device.
1929 		 * For SR-IOV VF VSIs queue vector index always starts
1930 		 * with 1 since first vector index(0) is used for OICR
1931 		 * in VF space. Since VMDq and other PF VSIs are within
1932 		 * the PF function space, use the vector index that is
1933 		 * tracked for this PF.
1934 		 */
1935 		for (q = 0; q < q_vector->num_ring_tx; q++) {
1936 			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1937 					      q_vector->tx.itr_idx);
1938 			txq++;
1939 		}
1940 
1941 		for (q = 0; q < q_vector->num_ring_rx; q++) {
1942 			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1943 					      q_vector->rx.itr_idx);
1944 			rxq++;
1945 		}
1946 	}
1947 }
1948 
1949 /**
1950  * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
1951  * @vsi: the VSI whose rings are to be enabled
1952  *
1953  * Returns 0 on success and a negative value on error
1954  */
1955 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
1956 {
1957 	return ice_vsi_ctrl_all_rx_rings(vsi, true);
1958 }
1959 
1960 /**
1961  * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
1962  * @vsi: the VSI whose rings are to be disabled
1963  *
1964  * Returns 0 on success and a negative value on error
1965  */
1966 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1967 {
1968 	return ice_vsi_ctrl_all_rx_rings(vsi, false);
1969 }
1970 
1971 /**
1972  * ice_vsi_stop_tx_rings - Disable Tx rings
1973  * @vsi: the VSI being configured
1974  * @rst_src: reset source
1975  * @rel_vmvf_num: Relative ID of VF/VM
1976  * @rings: Tx ring array to be stopped
1977  * @count: number of Tx ring array elements
1978  */
1979 static int
1980 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1981 		      u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
1982 {
1983 	u16 q_idx;
1984 
1985 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1986 		return -EINVAL;
1987 
1988 	for (q_idx = 0; q_idx < count; q_idx++) {
1989 		struct ice_txq_meta txq_meta = { };
1990 		int status;
1991 
1992 		if (!rings || !rings[q_idx])
1993 			return -EINVAL;
1994 
1995 		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
1996 		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
1997 					      rings[q_idx], &txq_meta);
1998 
1999 		if (status)
2000 			return status;
2001 	}
2002 
2003 	return 0;
2004 }
2005 
2006 /**
2007  * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2008  * @vsi: the VSI being configured
2009  * @rst_src: reset source
2010  * @rel_vmvf_num: Relative ID of VF/VM
2011  */
2012 int
2013 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2014 			  u16 rel_vmvf_num)
2015 {
2016 	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
2017 }
2018 
2019 /**
2020  * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2021  * @vsi: the VSI being configured
2022  */
2023 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2024 {
2025 	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2026 }
2027 
2028 /**
2029  * ice_vsi_is_rx_queue_active
2030  * @vsi: the VSI being configured
2031  *
2032  * Return true if at least one queue is active.
2033  */
2034 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
2035 {
2036 	struct ice_pf *pf = vsi->back;
2037 	struct ice_hw *hw = &pf->hw;
2038 	int i;
2039 
2040 	ice_for_each_rxq(vsi, i) {
2041 		u32 rx_reg;
2042 		int pf_q;
2043 
2044 		pf_q = vsi->rxq_map[i];
2045 		rx_reg = rd32(hw, QRX_CTRL(pf_q));
2046 		if (rx_reg & QRX_CTRL_QENA_STAT_M)
2047 			return true;
2048 	}
2049 
2050 	return false;
2051 }
2052 
2053 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2054 {
2055 	if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2056 		vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2057 		vsi->tc_cfg.numtc = 1;
2058 		return;
2059 	}
2060 
2061 	/* set VSI TC information based on DCB config */
2062 	ice_vsi_set_dcb_tc_cfg(vsi);
2063 }
2064 
2065 /**
2066  * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
2067  * @vsi: the VSI being configured
2068  * @tx: bool to determine Tx or Rx rule
2069  * @create: bool to determine create or remove Rule
2070  *
2071  * Adding an ethtype Tx rule to the uplink VSI results in it being applied
2072  * to the whole port, so LLDP transmission for VFs will be blocked too.
2073  */
2074 void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2075 {
2076 	int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
2077 			enum ice_sw_fwd_act_type act);
2078 	struct ice_pf *pf = vsi->back;
2079 	struct device *dev;
2080 	int status;
2081 
2082 	dev = ice_pf_to_dev(pf);
2083 	eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2084 
2085 	if (tx) {
2086 		status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2087 				  ICE_DROP_PACKET);
2088 	} else {
2089 		if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
2090 			status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2091 					  ICE_FWD_TO_VSI);
2092 			if (!status || !create)
2093 				goto report;
2094 
2095 			dev_info(dev,
2096 				 "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
2097 				 vsi->vsi_num, status);
2098 		}
2099 
2100 		status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
2101 		if (!status)
2102 			set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
2103 
2104 	}
2105 
2106 report:
2107 	if (status)
2108 		dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
2109 			 create ? "add" : "remove", tx ? "Tx" : "Rx",
2110 			 vsi->vsi_num, status);
2111 }
2112 
2113 /**
2114  * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
2115  * @pf: the PF being configured
2116  * @enable: enable or disable
2117  *
2118  * Configure switch rules to enable/disable LLDP handling by software
2119  * across PF.
2120  */
2121 void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
2122 {
2123 	struct ice_vsi *vsi;
2124 	struct ice_vf *vf;
2125 	unsigned int bkt;
2126 
2127 	vsi = ice_get_main_vsi(pf);
2128 	ice_vsi_cfg_sw_lldp(vsi, false, enable);
2129 
2130 	if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2131 		return;
2132 
2133 	ice_for_each_vf(pf, bkt, vf) {
2134 		vsi = ice_get_vf_vsi(vf);
2135 
2136 		if (WARN_ON(!vsi))
2137 			continue;
2138 
2139 		if (ice_vf_is_lldp_ena(vf))
2140 			ice_vsi_cfg_sw_lldp(vsi, false, enable);
2141 	}
2142 }
2143 
2144 /**
2145  * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2146  * @vsi: pointer to the VSI
2147  *
2148  * This function will allocate new scheduler aggregator now if needed and will
2149  * move specified VSI into it.
2150  */
2151 static void ice_set_agg_vsi(struct ice_vsi *vsi)
2152 {
2153 	struct device *dev = ice_pf_to_dev(vsi->back);
2154 	struct ice_agg_node *agg_node_iter = NULL;
2155 	u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2156 	struct ice_agg_node *agg_node = NULL;
2157 	int node_offset, max_agg_nodes = 0;
2158 	struct ice_port_info *port_info;
2159 	struct ice_pf *pf = vsi->back;
2160 	u32 agg_node_id_start = 0;
2161 	int status;
2162 
2163 	/* create (as needed) scheduler aggregator node and move VSI into
2164 	 * corresponding aggregator node
2165 	 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2166 	 * - VF aggregator nodes will contain VF VSI
2167 	 */
2168 	port_info = pf->hw.port_info;
2169 	if (!port_info)
2170 		return;
2171 
2172 	switch (vsi->type) {
2173 	case ICE_VSI_CTRL:
2174 	case ICE_VSI_CHNL:
2175 	case ICE_VSI_LB:
2176 	case ICE_VSI_PF:
2177 	case ICE_VSI_SF:
2178 		max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2179 		agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2180 		agg_node_iter = &pf->pf_agg_node[0];
2181 		break;
2182 	case ICE_VSI_VF:
2183 		/* user can create 'n' VFs on a given PF, but since max children
2184 		 * per aggregator node can be only 64. Following code handles
2185 		 * aggregator(s) for VF VSIs, either selects a agg_node which
2186 		 * was already created provided num_vsis < 64, otherwise
2187 		 * select next available node, which will be created
2188 		 */
2189 		max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2190 		agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2191 		agg_node_iter = &pf->vf_agg_node[0];
2192 		break;
2193 	default:
2194 		/* other VSI type, handle later if needed */
2195 		dev_dbg(dev, "unexpected VSI type %s\n",
2196 			ice_vsi_type_str(vsi->type));
2197 		return;
2198 	}
2199 
2200 	/* find the appropriate aggregator node */
2201 	for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2202 		/* see if we can find space in previously created
2203 		 * node if num_vsis < 64, otherwise skip
2204 		 */
2205 		if (agg_node_iter->num_vsis &&
2206 		    agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2207 			agg_node_iter++;
2208 			continue;
2209 		}
2210 
2211 		if (agg_node_iter->valid &&
2212 		    agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2213 			agg_id = agg_node_iter->agg_id;
2214 			agg_node = agg_node_iter;
2215 			break;
2216 		}
2217 
2218 		/* find unclaimed agg_id */
2219 		if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2220 			agg_id = node_offset + agg_node_id_start;
2221 			agg_node = agg_node_iter;
2222 			break;
2223 		}
2224 		/* move to next agg_node */
2225 		agg_node_iter++;
2226 	}
2227 
2228 	if (!agg_node)
2229 		return;
2230 
2231 	/* if selected aggregator node was not created, create it */
2232 	if (!agg_node->valid) {
2233 		status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2234 				     (u8)vsi->tc_cfg.ena_tc);
2235 		if (status) {
2236 			dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2237 				agg_id);
2238 			return;
2239 		}
2240 		/* aggregator node is created, store the needed info */
2241 		agg_node->valid = true;
2242 		agg_node->agg_id = agg_id;
2243 	}
2244 
2245 	/* move VSI to corresponding aggregator node */
2246 	status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2247 				     (u8)vsi->tc_cfg.ena_tc);
2248 	if (status) {
2249 		dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2250 			vsi->idx, agg_id);
2251 		return;
2252 	}
2253 
2254 	/* keep active children count for aggregator node */
2255 	agg_node->num_vsis++;
2256 
2257 	/* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2258 	 * to aggregator node
2259 	 */
2260 	vsi->agg_node = agg_node;
2261 	dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2262 		vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2263 		vsi->agg_node->num_vsis);
2264 }
2265 
2266 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
2267 {
2268 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2269 	struct device *dev = ice_pf_to_dev(pf);
2270 	int ret, i;
2271 
2272 	/* configure VSI nodes based on number of queues and TC's */
2273 	ice_for_each_traffic_class(i) {
2274 		if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2275 			continue;
2276 
2277 		if (vsi->type == ICE_VSI_CHNL) {
2278 			if (!vsi->alloc_txq && vsi->num_txq)
2279 				max_txqs[i] = vsi->num_txq;
2280 			else
2281 				max_txqs[i] = pf->num_lan_tx;
2282 		} else {
2283 			max_txqs[i] = vsi->alloc_txq;
2284 		}
2285 
2286 		if (vsi->type == ICE_VSI_PF)
2287 			max_txqs[i] += vsi->num_xdp_txq;
2288 	}
2289 
2290 	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2291 	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2292 			      max_txqs);
2293 	if (ret) {
2294 		dev_err(dev, "VSI %d failed lan queue config, error %d\n",
2295 			vsi->vsi_num, ret);
2296 		return ret;
2297 	}
2298 
2299 	return 0;
2300 }
2301 
2302 /**
2303  * ice_vsi_cfg_def - configure default VSI based on the type
2304  * @vsi: pointer to VSI
2305  */
2306 static int ice_vsi_cfg_def(struct ice_vsi *vsi)
2307 {
2308 	struct device *dev = ice_pf_to_dev(vsi->back);
2309 	struct ice_pf *pf = vsi->back;
2310 	int ret;
2311 
2312 	vsi->vsw = pf->first_sw;
2313 
2314 	ret = ice_vsi_alloc_def(vsi, vsi->ch);
2315 	if (ret)
2316 		return ret;
2317 
2318 	/* allocate memory for Tx/Rx ring stat pointers */
2319 	ret = ice_vsi_alloc_stat_arrays(vsi);
2320 	if (ret)
2321 		goto unroll_vsi_alloc;
2322 
2323 	ice_alloc_fd_res(vsi);
2324 
2325 	ret = ice_vsi_get_qs(vsi);
2326 	if (ret) {
2327 		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2328 			vsi->idx);
2329 		goto unroll_vsi_alloc_stat;
2330 	}
2331 
2332 	/* set RSS capabilities */
2333 	ice_vsi_set_rss_params(vsi);
2334 
2335 	/* set TC configuration */
2336 	ice_vsi_set_tc_cfg(vsi);
2337 
2338 	/* create the VSI */
2339 	ret = ice_vsi_init(vsi, vsi->flags);
2340 	if (ret)
2341 		goto unroll_get_qs;
2342 
2343 	ice_vsi_init_vlan_ops(vsi);
2344 
2345 	switch (vsi->type) {
2346 	case ICE_VSI_CTRL:
2347 	case ICE_VSI_SF:
2348 	case ICE_VSI_PF:
2349 		ret = ice_vsi_alloc_q_vectors(vsi);
2350 		if (ret)
2351 			goto unroll_vsi_init;
2352 
2353 		ret = ice_vsi_alloc_rings(vsi);
2354 		if (ret)
2355 			goto unroll_vector_base;
2356 
2357 		ret = ice_vsi_alloc_ring_stats(vsi);
2358 		if (ret)
2359 			goto unroll_vector_base;
2360 
2361 		if (ice_is_xdp_ena_vsi(vsi)) {
2362 			ret = ice_vsi_determine_xdp_res(vsi);
2363 			if (ret)
2364 				goto unroll_vector_base;
2365 			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
2366 						    ICE_XDP_CFG_PART);
2367 			if (ret)
2368 				goto unroll_vector_base;
2369 		}
2370 
2371 		ice_vsi_map_rings_to_vectors(vsi);
2372 
2373 		vsi->stat_offsets_loaded = false;
2374 
2375 		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2376 		if (vsi->type != ICE_VSI_CTRL)
2377 			/* Do not exit if configuring RSS had an issue, at
2378 			 * least receive traffic on first queue. Hence no
2379 			 * need to capture return value
2380 			 */
2381 			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2382 				ice_vsi_cfg_rss_lut_key(vsi);
2383 				ice_vsi_set_rss_flow_fld(vsi);
2384 			}
2385 		ice_init_arfs(vsi);
2386 		break;
2387 	case ICE_VSI_CHNL:
2388 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2389 			ice_vsi_cfg_rss_lut_key(vsi);
2390 			ice_vsi_set_rss_flow_fld(vsi);
2391 		}
2392 		break;
2393 	case ICE_VSI_VF:
2394 		/* VF driver will take care of creating netdev for this type and
2395 		 * map queues to vectors through Virtchnl, PF driver only
2396 		 * creates a VSI and corresponding structures for bookkeeping
2397 		 * purpose
2398 		 */
2399 		ret = ice_vsi_alloc_q_vectors(vsi);
2400 		if (ret)
2401 			goto unroll_vsi_init;
2402 
2403 		ret = ice_vsi_alloc_rings(vsi);
2404 		if (ret)
2405 			goto unroll_alloc_q_vector;
2406 
2407 		ret = ice_vsi_alloc_ring_stats(vsi);
2408 		if (ret)
2409 			goto unroll_vector_base;
2410 
2411 		vsi->stat_offsets_loaded = false;
2412 
2413 		/* Do not exit if configuring RSS had an issue, at least
2414 		 * receive traffic on first queue. Hence no need to capture
2415 		 * return value
2416 		 */
2417 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2418 			ice_vsi_cfg_rss_lut_key(vsi);
2419 			ice_vsi_set_vf_rss_flow_fld(vsi);
2420 		}
2421 		break;
2422 	case ICE_VSI_LB:
2423 		ret = ice_vsi_alloc_rings(vsi);
2424 		if (ret)
2425 			goto unroll_vsi_init;
2426 
2427 		ret = ice_vsi_alloc_ring_stats(vsi);
2428 		if (ret)
2429 			goto unroll_vector_base;
2430 
2431 		break;
2432 	default:
2433 		/* clean up the resources and exit */
2434 		ret = -EINVAL;
2435 		goto unroll_vsi_init;
2436 	}
2437 
2438 	return 0;
2439 
2440 unroll_vector_base:
2441 	/* reclaim SW interrupts back to the common pool */
2442 unroll_alloc_q_vector:
2443 	ice_vsi_free_q_vectors(vsi);
2444 unroll_vsi_init:
2445 	ice_vsi_delete_from_hw(vsi);
2446 unroll_get_qs:
2447 	ice_vsi_put_qs(vsi);
2448 unroll_vsi_alloc_stat:
2449 	ice_vsi_free_stats(vsi);
2450 unroll_vsi_alloc:
2451 	ice_vsi_free_arrays(vsi);
2452 	return ret;
2453 }
2454 
2455 /**
2456  * ice_vsi_cfg - configure a previously allocated VSI
2457  * @vsi: pointer to VSI
2458  */
2459 int ice_vsi_cfg(struct ice_vsi *vsi)
2460 {
2461 	struct ice_pf *pf = vsi->back;
2462 	int ret;
2463 
2464 	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
2465 		return -EINVAL;
2466 
2467 	ret = ice_vsi_cfg_def(vsi);
2468 	if (ret)
2469 		return ret;
2470 
2471 	ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
2472 	if (ret)
2473 		ice_vsi_decfg(vsi);
2474 
2475 	if (vsi->type == ICE_VSI_CTRL) {
2476 		if (vsi->vf) {
2477 			WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2478 			vsi->vf->ctrl_vsi_idx = vsi->idx;
2479 		} else {
2480 			WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2481 			pf->ctrl_vsi_idx = vsi->idx;
2482 		}
2483 	}
2484 
2485 	return ret;
2486 }
2487 
2488 /**
2489  * ice_vsi_decfg - remove all VSI configuration
2490  * @vsi: pointer to VSI
2491  */
2492 void ice_vsi_decfg(struct ice_vsi *vsi)
2493 {
2494 	struct ice_pf *pf = vsi->back;
2495 	int err;
2496 
2497 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2498 	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
2499 	if (err)
2500 		dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
2501 			vsi->vsi_num, err);
2502 
2503 	if (vsi->xdp_rings)
2504 		/* return value check can be skipped here, it always returns
2505 		 * 0 if reset is in progress
2506 		 */
2507 		ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
2508 
2509 	ice_vsi_clear_rings(vsi);
2510 	ice_vsi_free_q_vectors(vsi);
2511 	ice_vsi_put_qs(vsi);
2512 	ice_vsi_free_arrays(vsi);
2513 
2514 	/* SR-IOV determines needed MSIX resources all at once instead of per
2515 	 * VSI since when VFs are spawned we know how many VFs there are and how
2516 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
2517 	 * cleared in the same manner.
2518 	 */
2519 
2520 	if (vsi->type == ICE_VSI_VF &&
2521 	    vsi->agg_node && vsi->agg_node->valid)
2522 		vsi->agg_node->num_vsis--;
2523 }
2524 
2525 /**
2526  * ice_vsi_setup - Set up a VSI by a given type
2527  * @pf: board private structure
2528  * @params: parameters to use when creating the VSI
2529  *
2530  * This allocates the sw VSI structure and its queue resources.
2531  *
2532  * Returns pointer to the successfully allocated and configured VSI sw struct on
2533  * success, NULL on failure.
2534  */
2535 struct ice_vsi *
2536 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
2537 {
2538 	struct device *dev = ice_pf_to_dev(pf);
2539 	struct ice_vsi *vsi;
2540 	int ret;
2541 
2542 	/* ice_vsi_setup can only initialize a new VSI, and we must have
2543 	 * a port_info structure for it.
2544 	 */
2545 	if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
2546 	    WARN_ON(!params->port_info))
2547 		return NULL;
2548 
2549 	vsi = ice_vsi_alloc(pf);
2550 	if (!vsi) {
2551 		dev_err(dev, "could not allocate VSI\n");
2552 		return NULL;
2553 	}
2554 
2555 	vsi->params = *params;
2556 	ret = ice_vsi_cfg(vsi);
2557 	if (ret)
2558 		goto err_vsi_cfg;
2559 
2560 	/* Add switch rule to drop all Tx Flow Control Frames, of look up
2561 	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2562 	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2563 	 * The rule is added once for PF VSI in order to create appropriate
2564 	 * recipe, since VSI/VSI list is ignored with drop action...
2565 	 * Also add rules to handle LLDP Tx packets.  Tx LLDP packets need to
2566 	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2567 	 * settings in the HW.
2568 	 */
2569 	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
2570 		ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2571 				 ICE_DROP_PACKET);
2572 		ice_vsi_cfg_sw_lldp(vsi, true, true);
2573 	}
2574 
2575 	if (!vsi->agg_node)
2576 		ice_set_agg_vsi(vsi);
2577 
2578 	return vsi;
2579 
2580 err_vsi_cfg:
2581 	ice_vsi_free(vsi);
2582 
2583 	return NULL;
2584 }
2585 
2586 /**
2587  * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2588  * @vsi: the VSI being cleaned up
2589  */
2590 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2591 {
2592 	struct ice_pf *pf = vsi->back;
2593 	struct ice_hw *hw = &pf->hw;
2594 	u32 txq = 0;
2595 	u32 rxq = 0;
2596 	int i, q;
2597 
2598 	ice_for_each_q_vector(vsi, i) {
2599 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2600 
2601 		ice_write_intrl(q_vector, 0);
2602 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2603 			ice_write_itr(&q_vector->tx, 0);
2604 			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2605 			if (vsi->xdp_rings) {
2606 				u32 xdp_txq = txq + vsi->num_xdp_txq;
2607 
2608 				wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2609 			}
2610 			txq++;
2611 		}
2612 
2613 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2614 			ice_write_itr(&q_vector->rx, 0);
2615 			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2616 			rxq++;
2617 		}
2618 	}
2619 
2620 	ice_flush(hw);
2621 }
2622 
2623 /**
2624  * ice_vsi_free_irq - Free the IRQ association with the OS
2625  * @vsi: the VSI being configured
2626  */
2627 void ice_vsi_free_irq(struct ice_vsi *vsi)
2628 {
2629 	struct ice_pf *pf = vsi->back;
2630 	int i;
2631 
2632 	if (!vsi->q_vectors || !vsi->irqs_ready)
2633 		return;
2634 
2635 	ice_vsi_release_msix(vsi);
2636 	if (vsi->type == ICE_VSI_VF)
2637 		return;
2638 
2639 	vsi->irqs_ready = false;
2640 
2641 	ice_for_each_q_vector(vsi, i) {
2642 		int irq_num;
2643 
2644 		irq_num = vsi->q_vectors[i]->irq.virq;
2645 
2646 		/* free only the irqs that were actually requested */
2647 		if (!vsi->q_vectors[i] ||
2648 		    !(vsi->q_vectors[i]->num_ring_tx ||
2649 		      vsi->q_vectors[i]->num_ring_rx))
2650 			continue;
2651 
2652 		synchronize_irq(irq_num);
2653 		devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2654 	}
2655 }
2656 
2657 /**
2658  * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2659  * @vsi: the VSI having resources freed
2660  */
2661 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2662 {
2663 	int i;
2664 
2665 	if (!vsi->tx_rings)
2666 		return;
2667 
2668 	ice_for_each_txq(vsi, i)
2669 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2670 			ice_free_tx_ring(vsi->tx_rings[i]);
2671 }
2672 
2673 /**
2674  * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2675  * @vsi: the VSI having resources freed
2676  */
2677 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2678 {
2679 	int i;
2680 
2681 	if (!vsi->rx_rings)
2682 		return;
2683 
2684 	ice_for_each_rxq(vsi, i)
2685 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2686 			ice_free_rx_ring(vsi->rx_rings[i]);
2687 }
2688 
2689 /**
2690  * ice_vsi_close - Shut down a VSI
2691  * @vsi: the VSI being shut down
2692  */
2693 void ice_vsi_close(struct ice_vsi *vsi)
2694 {
2695 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2696 		ice_down(vsi);
2697 
2698 	ice_vsi_clear_napi_queues(vsi);
2699 	ice_vsi_free_irq(vsi);
2700 	ice_vsi_free_tx_rings(vsi);
2701 	ice_vsi_free_rx_rings(vsi);
2702 }
2703 
2704 /**
2705  * ice_ena_vsi - resume a VSI
2706  * @vsi: the VSI being resume
2707  * @locked: is the rtnl_lock already held
2708  */
2709 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2710 {
2711 	int err = 0;
2712 
2713 	if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2714 		return 0;
2715 
2716 	clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2717 
2718 	if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
2719 			    vsi->type == ICE_VSI_SF)) {
2720 		if (netif_running(vsi->netdev)) {
2721 			if (!locked)
2722 				rtnl_lock();
2723 
2724 			err = ice_open_internal(vsi->netdev);
2725 
2726 			if (!locked)
2727 				rtnl_unlock();
2728 		}
2729 	} else if (vsi->type == ICE_VSI_CTRL) {
2730 		err = ice_vsi_open_ctrl(vsi);
2731 	}
2732 
2733 	return err;
2734 }
2735 
2736 /**
2737  * ice_dis_vsi - pause a VSI
2738  * @vsi: the VSI being paused
2739  * @locked: is the rtnl_lock already held
2740  */
2741 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2742 {
2743 	bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
2744 
2745 	set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2746 
2747 	if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
2748 			    vsi->type == ICE_VSI_SF)) {
2749 		if (netif_running(vsi->netdev)) {
2750 			if (!locked)
2751 				rtnl_lock();
2752 			already_down = test_bit(ICE_VSI_DOWN, vsi->state);
2753 			if (!already_down)
2754 				ice_vsi_close(vsi);
2755 
2756 			if (!locked)
2757 				rtnl_unlock();
2758 		} else if (!already_down) {
2759 			ice_vsi_close(vsi);
2760 		}
2761 	} else if (vsi->type == ICE_VSI_CTRL && !already_down) {
2762 		ice_vsi_close(vsi);
2763 	}
2764 }
2765 
2766 /**
2767  * ice_vsi_set_napi_queues - associate netdev queues with napi
2768  * @vsi: VSI pointer
2769  *
2770  * Associate queue[s] with napi for all vectors.
2771  */
2772 void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
2773 {
2774 	struct net_device *netdev = vsi->netdev;
2775 	int q_idx, v_idx;
2776 
2777 	if (!netdev)
2778 		return;
2779 
2780 	ASSERT_RTNL();
2781 	ice_for_each_rxq(vsi, q_idx)
2782 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
2783 				     &vsi->rx_rings[q_idx]->q_vector->napi);
2784 
2785 	ice_for_each_txq(vsi, q_idx)
2786 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
2787 				     &vsi->tx_rings[q_idx]->q_vector->napi);
2788 	/* Also set the interrupt number for the NAPI */
2789 	ice_for_each_q_vector(vsi, v_idx) {
2790 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2791 
2792 		netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
2793 	}
2794 }
2795 
2796 /**
2797  * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
2798  * @vsi: VSI pointer
2799  *
2800  * Clear the association between all VSI queues queue[s] and napi.
2801  */
2802 void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
2803 {
2804 	struct net_device *netdev = vsi->netdev;
2805 	int q_idx, v_idx;
2806 
2807 	if (!netdev)
2808 		return;
2809 
2810 	ASSERT_RTNL();
2811 	/* Clear the NAPI's interrupt number */
2812 	ice_for_each_q_vector(vsi, v_idx) {
2813 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2814 
2815 		netif_napi_set_irq(&q_vector->napi, -1);
2816 	}
2817 
2818 	ice_for_each_txq(vsi, q_idx)
2819 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
2820 
2821 	ice_for_each_rxq(vsi, q_idx)
2822 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL);
2823 }
2824 
2825 /**
2826  * ice_napi_add - register NAPI handler for the VSI
2827  * @vsi: VSI for which NAPI handler is to be registered
2828  *
2829  * This function is only called in the driver's load path. Registering the NAPI
2830  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2831  * reset/rebuild, etc.)
2832  */
2833 void ice_napi_add(struct ice_vsi *vsi)
2834 {
2835 	int v_idx;
2836 
2837 	if (!vsi->netdev)
2838 		return;
2839 
2840 	ice_for_each_q_vector(vsi, v_idx)
2841 		netif_napi_add_config(vsi->netdev,
2842 				      &vsi->q_vectors[v_idx]->napi,
2843 				      ice_napi_poll,
2844 				      v_idx);
2845 }
2846 
2847 /**
2848  * ice_vsi_release - Delete a VSI and free its resources
2849  * @vsi: the VSI being removed
2850  *
2851  * Returns 0 on success or < 0 on error
2852  */
2853 int ice_vsi_release(struct ice_vsi *vsi)
2854 {
2855 	struct ice_pf *pf;
2856 
2857 	if (!vsi->back)
2858 		return -ENODEV;
2859 	pf = vsi->back;
2860 
2861 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2862 		ice_rss_clean(vsi);
2863 
2864 	ice_vsi_close(vsi);
2865 
2866 	/* The Rx rule will only exist to remove if the LLDP FW
2867 	 * engine is currently stopped
2868 	 */
2869 	if (!ice_is_safe_mode(pf) &&
2870 	    !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
2871 	    (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
2872 	     ice_vf_is_lldp_ena(vsi->vf))))
2873 		ice_vsi_cfg_sw_lldp(vsi, false, false);
2874 
2875 	ice_vsi_decfg(vsi);
2876 
2877 	/* retain SW VSI data structure since it is needed to unregister and
2878 	 * free VSI netdev when PF is not in reset recovery pending state,\
2879 	 * for ex: during rmmod.
2880 	 */
2881 	if (!ice_is_reset_in_progress(pf->state))
2882 		ice_vsi_delete(vsi);
2883 
2884 	return 0;
2885 }
2886 
2887 /**
2888  * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2889  * @vsi: VSI connected with q_vectors
2890  * @coalesce: array of struct with stored coalesce
2891  *
2892  * Returns array size.
2893  */
2894 static int
2895 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
2896 			     struct ice_coalesce_stored *coalesce)
2897 {
2898 	int i;
2899 
2900 	ice_for_each_q_vector(vsi, i) {
2901 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2902 
2903 		coalesce[i].itr_tx = q_vector->tx.itr_settings;
2904 		coalesce[i].itr_rx = q_vector->rx.itr_settings;
2905 		coalesce[i].intrl = q_vector->intrl;
2906 
2907 		if (i < vsi->num_txq)
2908 			coalesce[i].tx_valid = true;
2909 		if (i < vsi->num_rxq)
2910 			coalesce[i].rx_valid = true;
2911 	}
2912 
2913 	return vsi->num_q_vectors;
2914 }
2915 
2916 /**
2917  * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2918  * @vsi: VSI connected with q_vectors
2919  * @coalesce: pointer to array of struct with stored coalesce
2920  * @size: size of coalesce array
2921  *
2922  * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
2923  * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
2924  * to default value.
2925  */
2926 static void
2927 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
2928 			     struct ice_coalesce_stored *coalesce, int size)
2929 {
2930 	struct ice_ring_container *rc;
2931 	int i;
2932 
2933 	if ((size && !coalesce) || !vsi)
2934 		return;
2935 
2936 	/* There are a couple of cases that have to be handled here:
2937 	 *   1. The case where the number of queue vectors stays the same, but
2938 	 *      the number of Tx or Rx rings changes (the first for loop)
2939 	 *   2. The case where the number of queue vectors increased (the
2940 	 *      second for loop)
2941 	 */
2942 	for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
2943 		/* There are 2 cases to handle here and they are the same for
2944 		 * both Tx and Rx:
2945 		 *   if the entry was valid previously (coalesce[i].[tr]x_valid
2946 		 *   and the loop variable is less than the number of rings
2947 		 *   allocated, then write the previous values
2948 		 *
2949 		 *   if the entry was not valid previously, but the number of
2950 		 *   rings is less than are allocated (this means the number of
2951 		 *   rings increased from previously), then write out the
2952 		 *   values in the first element
2953 		 *
2954 		 *   Also, always write the ITR, even if in ITR_IS_DYNAMIC
2955 		 *   as there is no harm because the dynamic algorithm
2956 		 *   will just overwrite.
2957 		 */
2958 		if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
2959 			rc = &vsi->q_vectors[i]->rx;
2960 			rc->itr_settings = coalesce[i].itr_rx;
2961 			ice_write_itr(rc, rc->itr_setting);
2962 		} else if (i < vsi->alloc_rxq) {
2963 			rc = &vsi->q_vectors[i]->rx;
2964 			rc->itr_settings = coalesce[0].itr_rx;
2965 			ice_write_itr(rc, rc->itr_setting);
2966 		}
2967 
2968 		if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
2969 			rc = &vsi->q_vectors[i]->tx;
2970 			rc->itr_settings = coalesce[i].itr_tx;
2971 			ice_write_itr(rc, rc->itr_setting);
2972 		} else if (i < vsi->alloc_txq) {
2973 			rc = &vsi->q_vectors[i]->tx;
2974 			rc->itr_settings = coalesce[0].itr_tx;
2975 			ice_write_itr(rc, rc->itr_setting);
2976 		}
2977 
2978 		vsi->q_vectors[i]->intrl = coalesce[i].intrl;
2979 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
2980 	}
2981 
2982 	/* the number of queue vectors increased so write whatever is in
2983 	 * the first element
2984 	 */
2985 	for (; i < vsi->num_q_vectors; i++) {
2986 		/* transmit */
2987 		rc = &vsi->q_vectors[i]->tx;
2988 		rc->itr_settings = coalesce[0].itr_tx;
2989 		ice_write_itr(rc, rc->itr_setting);
2990 
2991 		/* receive */
2992 		rc = &vsi->q_vectors[i]->rx;
2993 		rc->itr_settings = coalesce[0].itr_rx;
2994 		ice_write_itr(rc, rc->itr_setting);
2995 
2996 		vsi->q_vectors[i]->intrl = coalesce[0].intrl;
2997 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
2998 	}
2999 }
3000 
3001 /**
3002  * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
3003  * @vsi: VSI pointer
3004  */
3005 static int
3006 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
3007 {
3008 	u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
3009 	u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
3010 	struct ice_ring_stats **tx_ring_stats;
3011 	struct ice_ring_stats **rx_ring_stats;
3012 	struct ice_vsi_stats *vsi_stat;
3013 	struct ice_pf *pf = vsi->back;
3014 	u16 prev_txq = vsi->alloc_txq;
3015 	u16 prev_rxq = vsi->alloc_rxq;
3016 	int i;
3017 
3018 	vsi_stat = pf->vsi_stats[vsi->idx];
3019 
3020 	if (req_txq < prev_txq) {
3021 		for (i = req_txq; i < prev_txq; i++) {
3022 			if (vsi_stat->tx_ring_stats[i]) {
3023 				kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3024 				WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3025 			}
3026 		}
3027 	}
3028 
3029 	tx_ring_stats = vsi_stat->tx_ring_stats;
3030 	vsi_stat->tx_ring_stats =
3031 		krealloc_array(vsi_stat->tx_ring_stats, req_txq,
3032 			       sizeof(*vsi_stat->tx_ring_stats),
3033 			       GFP_KERNEL | __GFP_ZERO);
3034 	if (!vsi_stat->tx_ring_stats) {
3035 		vsi_stat->tx_ring_stats = tx_ring_stats;
3036 		return -ENOMEM;
3037 	}
3038 
3039 	if (req_rxq < prev_rxq) {
3040 		for (i = req_rxq; i < prev_rxq; i++) {
3041 			if (vsi_stat->rx_ring_stats[i]) {
3042 				kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3043 				WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3044 			}
3045 		}
3046 	}
3047 
3048 	rx_ring_stats = vsi_stat->rx_ring_stats;
3049 	vsi_stat->rx_ring_stats =
3050 		krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
3051 			       sizeof(*vsi_stat->rx_ring_stats),
3052 			       GFP_KERNEL | __GFP_ZERO);
3053 	if (!vsi_stat->rx_ring_stats) {
3054 		vsi_stat->rx_ring_stats = rx_ring_stats;
3055 		return -ENOMEM;
3056 	}
3057 
3058 	return 0;
3059 }
3060 
3061 /**
3062  * ice_vsi_rebuild - Rebuild VSI after reset
3063  * @vsi: VSI to be rebuild
3064  * @vsi_flags: flags used for VSI rebuild flow
3065  *
3066  * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
3067  * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
3068  *
3069  * Returns 0 on success and negative value on failure
3070  */
3071 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3072 {
3073 	struct ice_coalesce_stored *coalesce;
3074 	int prev_num_q_vectors;
3075 	struct ice_pf *pf;
3076 	int ret;
3077 
3078 	if (!vsi)
3079 		return -EINVAL;
3080 
3081 	vsi->flags = vsi_flags;
3082 	pf = vsi->back;
3083 	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3084 		return -EINVAL;
3085 
3086 	mutex_lock(&vsi->xdp_state_lock);
3087 
3088 	ret = ice_vsi_realloc_stat_arrays(vsi);
3089 	if (ret)
3090 		goto unlock;
3091 
3092 	ice_vsi_decfg(vsi);
3093 	ret = ice_vsi_cfg_def(vsi);
3094 	if (ret)
3095 		goto unlock;
3096 
3097 	coalesce = kcalloc(vsi->num_q_vectors,
3098 			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
3099 	if (!coalesce) {
3100 		ret = -ENOMEM;
3101 		goto decfg;
3102 	}
3103 
3104 	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3105 
3106 	ret = ice_vsi_cfg_tc_lan(pf, vsi);
3107 	if (ret) {
3108 		if (vsi_flags & ICE_VSI_FLAG_INIT) {
3109 			ret = -EIO;
3110 			goto free_coalesce;
3111 		}
3112 
3113 		ret = ice_schedule_reset(pf, ICE_RESET_PFR);
3114 		goto free_coalesce;
3115 	}
3116 
3117 	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3118 	clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
3119 
3120 free_coalesce:
3121 	kfree(coalesce);
3122 decfg:
3123 	if (ret)
3124 		ice_vsi_decfg(vsi);
3125 unlock:
3126 	mutex_unlock(&vsi->xdp_state_lock);
3127 	return ret;
3128 }
3129 
3130 /**
3131  * ice_is_reset_in_progress - check for a reset in progress
3132  * @state: PF state field
3133  */
3134 bool ice_is_reset_in_progress(unsigned long *state)
3135 {
3136 	return test_bit(ICE_RESET_OICR_RECV, state) ||
3137 	       test_bit(ICE_PFR_REQ, state) ||
3138 	       test_bit(ICE_CORER_REQ, state) ||
3139 	       test_bit(ICE_GLOBR_REQ, state);
3140 }
3141 
3142 /**
3143  * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3144  * @pf: pointer to the PF structure
3145  * @timeout: length of time to wait, in jiffies
3146  *
3147  * Wait (sleep) for a short time until the driver finishes cleaning up from
3148  * a device reset. The caller must be able to sleep. Use this to delay
3149  * operations that could fail while the driver is cleaning up after a device
3150  * reset.
3151  *
3152  * Returns 0 on success, -EBUSY if the reset is not finished within the
3153  * timeout, and -ERESTARTSYS if the thread was interrupted.
3154  */
3155 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
3156 {
3157 	long ret;
3158 
3159 	ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
3160 					       !ice_is_reset_in_progress(pf->state),
3161 					       timeout);
3162 	if (ret < 0)
3163 		return ret;
3164 	else if (!ret)
3165 		return -EBUSY;
3166 	else
3167 		return 0;
3168 }
3169 
3170 /**
3171  * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3172  * @vsi: VSI being configured
3173  * @ctx: the context buffer returned from AQ VSI update command
3174  */
3175 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3176 {
3177 	vsi->info.mapping_flags = ctx->info.mapping_flags;
3178 	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3179 	       sizeof(vsi->info.q_mapping));
3180 	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3181 	       sizeof(vsi->info.tc_mapping));
3182 }
3183 
3184 /**
3185  * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3186  * @vsi: the VSI being configured
3187  * @ena_tc: TC map to be enabled
3188  */
3189 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3190 {
3191 	struct net_device *netdev = vsi->netdev;
3192 	struct ice_pf *pf = vsi->back;
3193 	int numtc = vsi->tc_cfg.numtc;
3194 	struct ice_dcbx_cfg *dcbcfg;
3195 	u8 netdev_tc;
3196 	int i;
3197 
3198 	if (!netdev)
3199 		return;
3200 
3201 	/* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */
3202 	if (vsi->type == ICE_VSI_CHNL)
3203 		return;
3204 
3205 	if (!ena_tc) {
3206 		netdev_reset_tc(netdev);
3207 		return;
3208 	}
3209 
3210 	if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3211 		numtc = vsi->all_numtc;
3212 
3213 	if (netdev_set_num_tc(netdev, numtc))
3214 		return;
3215 
3216 	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
3217 
3218 	ice_for_each_traffic_class(i)
3219 		if (vsi->tc_cfg.ena_tc & BIT(i))
3220 			netdev_set_tc_queue(netdev,
3221 					    vsi->tc_cfg.tc_info[i].netdev_tc,
3222 					    vsi->tc_cfg.tc_info[i].qcount_tx,
3223 					    vsi->tc_cfg.tc_info[i].qoffset);
3224 	/* setup TC queue map for CHNL TCs */
3225 	ice_for_each_chnl_tc(i) {
3226 		if (!(vsi->all_enatc & BIT(i)))
3227 			break;
3228 		if (!vsi->mqprio_qopt.qopt.count[i])
3229 			break;
3230 		netdev_set_tc_queue(netdev, i,
3231 				    vsi->mqprio_qopt.qopt.count[i],
3232 				    vsi->mqprio_qopt.qopt.offset[i]);
3233 	}
3234 
3235 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3236 		return;
3237 
3238 	for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3239 		u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3240 
3241 		/* Get the mapped netdev TC# for the UP */
3242 		netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3243 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
3244 	}
3245 }
3246 
3247 /**
3248  * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3249  * @vsi: the VSI being configured,
3250  * @ctxt: VSI context structure
3251  * @ena_tc: number of traffic classes to enable
3252  *
3253  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
3254  */
3255 static int
3256 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
3257 			   u8 ena_tc)
3258 {
3259 	u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
3260 	u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3261 	int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3262 	u16 new_txq, new_rxq;
3263 	u8 netdev_tc = 0;
3264 	int i;
3265 
3266 	vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3267 
3268 	pow = order_base_2(tc0_qcount);
3269 	qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
3270 	qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
3271 
3272 	ice_for_each_traffic_class(i) {
3273 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3274 			/* TC is not enabled */
3275 			vsi->tc_cfg.tc_info[i].qoffset = 0;
3276 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3277 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3278 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3279 			ctxt->info.tc_mapping[i] = 0;
3280 			continue;
3281 		}
3282 
3283 		offset = vsi->mqprio_qopt.qopt.offset[i];
3284 		qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3285 		qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3286 		vsi->tc_cfg.tc_info[i].qoffset = offset;
3287 		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3288 		vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3289 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3290 	}
3291 
3292 	if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3293 		ice_for_each_chnl_tc(i) {
3294 			if (!(vsi->all_enatc & BIT(i)))
3295 				continue;
3296 			offset = vsi->mqprio_qopt.qopt.offset[i];
3297 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3298 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3299 		}
3300 	}
3301 
3302 	new_txq = offset + qcount_tx;
3303 	if (new_txq > vsi->alloc_txq) {
3304 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3305 			new_txq, vsi->alloc_txq);
3306 		return -EINVAL;
3307 	}
3308 
3309 	new_rxq = offset + qcount_rx;
3310 	if (new_rxq > vsi->alloc_rxq) {
3311 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3312 			new_rxq, vsi->alloc_rxq);
3313 		return -EINVAL;
3314 	}
3315 
3316 	/* Set actual Tx/Rx queue pairs */
3317 	vsi->num_txq = new_txq;
3318 	vsi->num_rxq = new_rxq;
3319 
3320 	/* Setup queue TC[0].qmap for given VSI context */
3321 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
3322 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3323 	ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
3324 
3325 	/* Find queue count available for channel VSIs and starting offset
3326 	 * for channel VSIs
3327 	 */
3328 	if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3329 		vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3330 		vsi->next_base_q = tc0_qcount;
3331 	}
3332 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n",  vsi->num_txq);
3333 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n",  vsi->num_rxq);
3334 	dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3335 		vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3336 
3337 	return 0;
3338 }
3339 
3340 /**
3341  * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3342  * @vsi: VSI to be configured
3343  * @ena_tc: TC bitmap
3344  *
3345  * VSI queues expected to be quiesced before calling this function
3346  */
3347 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3348 {
3349 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3350 	struct ice_pf *pf = vsi->back;
3351 	struct ice_tc_cfg old_tc_cfg;
3352 	struct ice_vsi_ctx *ctx;
3353 	struct device *dev;
3354 	int i, ret = 0;
3355 	u8 num_tc = 0;
3356 
3357 	dev = ice_pf_to_dev(pf);
3358 	if (vsi->tc_cfg.ena_tc == ena_tc &&
3359 	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3360 		return 0;
3361 
3362 	ice_for_each_traffic_class(i) {
3363 		/* build bitmap of enabled TCs */
3364 		if (ena_tc & BIT(i))
3365 			num_tc++;
3366 		/* populate max_txqs per TC */
3367 		max_txqs[i] = vsi->alloc_txq;
3368 		/* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
3369 		 * zero for CHNL VSI, hence use num_txq instead as max_txqs
3370 		 */
3371 		if (vsi->type == ICE_VSI_CHNL &&
3372 		    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3373 			max_txqs[i] = vsi->num_txq;
3374 	}
3375 
3376 	memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3377 	vsi->tc_cfg.ena_tc = ena_tc;
3378 	vsi->tc_cfg.numtc = num_tc;
3379 
3380 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3381 	if (!ctx)
3382 		return -ENOMEM;
3383 
3384 	ctx->vf_num = 0;
3385 	ctx->info = vsi->info;
3386 
3387 	if (vsi->type == ICE_VSI_PF &&
3388 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3389 		ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
3390 	else
3391 		ret = ice_vsi_setup_q_map(vsi, ctx);
3392 
3393 	if (ret) {
3394 		memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3395 		goto out;
3396 	}
3397 
3398 	/* must to indicate which section of VSI context are being modified */
3399 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3400 	ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3401 	if (ret) {
3402 		dev_info(dev, "Failed VSI Update\n");
3403 		goto out;
3404 	}
3405 
3406 	if (vsi->type == ICE_VSI_PF &&
3407 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3408 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3409 	else
3410 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3411 				      vsi->tc_cfg.ena_tc, max_txqs);
3412 
3413 	if (ret) {
3414 		dev_err(dev, "VSI %d failed TC config, error %d\n",
3415 			vsi->vsi_num, ret);
3416 		goto out;
3417 	}
3418 	ice_vsi_update_q_map(vsi, ctx);
3419 	vsi->info.valid_sections = 0;
3420 
3421 	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3422 out:
3423 	kfree(ctx);
3424 	return ret;
3425 }
3426 
3427 /**
3428  * ice_update_ring_stats - Update ring statistics
3429  * @stats: stats to be updated
3430  * @pkts: number of processed packets
3431  * @bytes: number of processed bytes
3432  *
3433  * This function assumes that caller has acquired a u64_stats_sync lock.
3434  */
3435 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
3436 {
3437 	stats->bytes += bytes;
3438 	stats->pkts += pkts;
3439 }
3440 
3441 /**
3442  * ice_update_tx_ring_stats - Update Tx ring specific counters
3443  * @tx_ring: ring to update
3444  * @pkts: number of processed packets
3445  * @bytes: number of processed bytes
3446  */
3447 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
3448 {
3449 	u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3450 	ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3451 	u64_stats_update_end(&tx_ring->ring_stats->syncp);
3452 }
3453 
3454 /**
3455  * ice_update_rx_ring_stats - Update Rx ring specific counters
3456  * @rx_ring: ring to update
3457  * @pkts: number of processed packets
3458  * @bytes: number of processed bytes
3459  */
3460 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
3461 {
3462 	u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3463 	ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3464 	u64_stats_update_end(&rx_ring->ring_stats->syncp);
3465 }
3466 
3467 /**
3468  * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3469  * @pi: port info of the switch with default VSI
3470  *
3471  * Return true if the there is a single VSI in default forwarding VSI list
3472  */
3473 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3474 {
3475 	bool exists = false;
3476 
3477 	ice_check_if_dflt_vsi(pi, 0, &exists);
3478 	return exists;
3479 }
3480 
3481 /**
3482  * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3483  * @vsi: VSI to compare against default forwarding VSI
3484  *
3485  * If this VSI passed in is the default forwarding VSI then return true, else
3486  * return false
3487  */
3488 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3489 {
3490 	return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3491 }
3492 
3493 /**
3494  * ice_set_dflt_vsi - set the default forwarding VSI
3495  * @vsi: VSI getting set as the default forwarding VSI on the switch
3496  *
3497  * If the VSI passed in is already the default VSI and it's enabled just return
3498  * success.
3499  *
3500  * Otherwise try to set the VSI passed in as the switch's default VSI and
3501  * return the result.
3502  */
3503 int ice_set_dflt_vsi(struct ice_vsi *vsi)
3504 {
3505 	struct device *dev;
3506 	int status;
3507 
3508 	if (!vsi)
3509 		return -EINVAL;
3510 
3511 	dev = ice_pf_to_dev(vsi->back);
3512 
3513 	if (ice_lag_is_switchdev_running(vsi->back)) {
3514 		dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
3515 			vsi->vsi_num);
3516 		return 0;
3517 	}
3518 
3519 	/* the VSI passed in is already the default VSI */
3520 	if (ice_is_vsi_dflt_vsi(vsi)) {
3521 		dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3522 			vsi->vsi_num);
3523 		return 0;
3524 	}
3525 
3526 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3527 	if (status) {
3528 		dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
3529 			vsi->vsi_num, status);
3530 		return status;
3531 	}
3532 
3533 	return 0;
3534 }
3535 
3536 /**
3537  * ice_clear_dflt_vsi - clear the default forwarding VSI
3538  * @vsi: VSI to remove from filter list
3539  *
3540  * If the switch has no default VSI or it's not enabled then return error.
3541  *
3542  * Otherwise try to clear the default VSI and return the result.
3543  */
3544 int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3545 {
3546 	struct device *dev;
3547 	int status;
3548 
3549 	if (!vsi)
3550 		return -EINVAL;
3551 
3552 	dev = ice_pf_to_dev(vsi->back);
3553 
3554 	/* there is no default VSI configured */
3555 	if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3556 		return -ENODEV;
3557 
3558 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3559 				  ICE_FLTR_RX);
3560 	if (status) {
3561 		dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3562 			vsi->vsi_num, status);
3563 		return -EIO;
3564 	}
3565 
3566 	return 0;
3567 }
3568 
3569 /**
3570  * ice_get_link_speed_mbps - get link speed in Mbps
3571  * @vsi: the VSI whose link speed is being queried
3572  *
3573  * Return current VSI link speed and 0 if the speed is unknown.
3574  */
3575 int ice_get_link_speed_mbps(struct ice_vsi *vsi)
3576 {
3577 	unsigned int link_speed;
3578 
3579 	link_speed = vsi->port_info->phy.link_info.link_speed;
3580 
3581 	return (int)ice_get_link_speed(fls(link_speed) - 1);
3582 }
3583 
3584 /**
3585  * ice_get_link_speed_kbps - get link speed in Kbps
3586  * @vsi: the VSI whose link speed is being queried
3587  *
3588  * Return current VSI link speed and 0 if the speed is unknown.
3589  */
3590 int ice_get_link_speed_kbps(struct ice_vsi *vsi)
3591 {
3592 	int speed_mbps;
3593 
3594 	speed_mbps = ice_get_link_speed_mbps(vsi);
3595 
3596 	return speed_mbps * 1000;
3597 }
3598 
3599 /**
3600  * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3601  * @vsi: VSI to be configured
3602  * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
3603  *
3604  * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
3605  * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3606  * on TC 0.
3607  */
3608 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
3609 {
3610 	struct ice_pf *pf = vsi->back;
3611 	struct device *dev;
3612 	int status;
3613 	int speed;
3614 
3615 	dev = ice_pf_to_dev(pf);
3616 	if (!vsi->port_info) {
3617 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3618 			vsi->idx, vsi->type);
3619 		return -EINVAL;
3620 	}
3621 
3622 	speed = ice_get_link_speed_kbps(vsi);
3623 	if (min_tx_rate > (u64)speed) {
3624 		dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3625 			min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3626 			speed);
3627 		return -EINVAL;
3628 	}
3629 
3630 	/* Configure min BW for VSI limit */
3631 	if (min_tx_rate) {
3632 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3633 						   ICE_MIN_BW, min_tx_rate);
3634 		if (status) {
3635 			dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
3636 				min_tx_rate, ice_vsi_type_str(vsi->type),
3637 				vsi->idx);
3638 			return status;
3639 		}
3640 
3641 		dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
3642 			min_tx_rate, ice_vsi_type_str(vsi->type));
3643 	} else {
3644 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3645 							vsi->idx, 0,
3646 							ICE_MIN_BW);
3647 		if (status) {
3648 			dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
3649 				ice_vsi_type_str(vsi->type), vsi->idx);
3650 			return status;
3651 		}
3652 
3653 		dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
3654 			ice_vsi_type_str(vsi->type), vsi->idx);
3655 	}
3656 
3657 	return 0;
3658 }
3659 
3660 /**
3661  * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3662  * @vsi: VSI to be configured
3663  * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
3664  *
3665  * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
3666  * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3667  * on TC 0.
3668  */
3669 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
3670 {
3671 	struct ice_pf *pf = vsi->back;
3672 	struct device *dev;
3673 	int status;
3674 	int speed;
3675 
3676 	dev = ice_pf_to_dev(pf);
3677 	if (!vsi->port_info) {
3678 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3679 			vsi->idx, vsi->type);
3680 		return -EINVAL;
3681 	}
3682 
3683 	speed = ice_get_link_speed_kbps(vsi);
3684 	if (max_tx_rate > (u64)speed) {
3685 		dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3686 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3687 			speed);
3688 		return -EINVAL;
3689 	}
3690 
3691 	/* Configure max BW for VSI limit */
3692 	if (max_tx_rate) {
3693 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3694 						   ICE_MAX_BW, max_tx_rate);
3695 		if (status) {
3696 			dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
3697 				max_tx_rate, ice_vsi_type_str(vsi->type),
3698 				vsi->idx);
3699 			return status;
3700 		}
3701 
3702 		dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
3703 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3704 	} else {
3705 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3706 							vsi->idx, 0,
3707 							ICE_MAX_BW);
3708 		if (status) {
3709 			dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
3710 				ice_vsi_type_str(vsi->type), vsi->idx);
3711 			return status;
3712 		}
3713 
3714 		dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
3715 			ice_vsi_type_str(vsi->type), vsi->idx);
3716 	}
3717 
3718 	return 0;
3719 }
3720 
3721 /**
3722  * ice_set_link - turn on/off physical link
3723  * @vsi: VSI to modify physical link on
3724  * @ena: turn on/off physical link
3725  */
3726 int ice_set_link(struct ice_vsi *vsi, bool ena)
3727 {
3728 	struct device *dev = ice_pf_to_dev(vsi->back);
3729 	struct ice_port_info *pi = vsi->port_info;
3730 	struct ice_hw *hw = pi->hw;
3731 	int status;
3732 
3733 	if (vsi->type != ICE_VSI_PF)
3734 		return -EINVAL;
3735 
3736 	status = ice_aq_set_link_restart_an(pi, ena, NULL);
3737 
3738 	/* if link is owned by manageability, FW will return LIBIE_AQ_RC_EMODE.
3739 	 * this is not a fatal error, so print a warning message and return
3740 	 * a success code. Return an error if FW returns an error code other
3741 	 * than LIBIE_AQ_RC_EMODE
3742 	 */
3743 	if (status == -EIO) {
3744 		if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
3745 			dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
3746 				(ena ? "ON" : "OFF"), status,
3747 				libie_aq_str(hw->adminq.sq_last_status));
3748 	} else if (status) {
3749 		dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
3750 			(ena ? "ON" : "OFF"), status,
3751 			libie_aq_str(hw->adminq.sq_last_status));
3752 		return status;
3753 	}
3754 
3755 	return 0;
3756 }
3757 
3758 /**
3759  * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3760  * @vsi: VSI used to add VLAN filters
3761  *
3762  * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
3763  * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
3764  * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3765  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3766  *
3767  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3768  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3769  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3770  *
3771  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3772  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3773  * part of filtering.
3774  */
3775 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3776 {
3777 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3778 	struct ice_vlan vlan;
3779 	int err;
3780 
3781 	vlan = ICE_VLAN(0, 0, 0);
3782 	err = vlan_ops->add_vlan(vsi, &vlan);
3783 	if (err && err != -EEXIST)
3784 		return err;
3785 
3786 	/* in SVM both VLAN 0 filters are identical */
3787 	if (!ice_is_dvm_ena(&vsi->back->hw))
3788 		return 0;
3789 
3790 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3791 	err = vlan_ops->add_vlan(vsi, &vlan);
3792 	if (err && err != -EEXIST)
3793 		return err;
3794 
3795 	return 0;
3796 }
3797 
3798 /**
3799  * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3800  * @vsi: VSI used to add VLAN filters
3801  *
3802  * Delete the VLAN 0 filters in the same manner that they were added in
3803  * ice_vsi_add_vlan_zero.
3804  */
3805 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3806 {
3807 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3808 	struct ice_vlan vlan;
3809 	int err;
3810 
3811 	vlan = ICE_VLAN(0, 0, 0);
3812 	err = vlan_ops->del_vlan(vsi, &vlan);
3813 	if (err && err != -EEXIST)
3814 		return err;
3815 
3816 	/* in SVM both VLAN 0 filters are identical */
3817 	if (!ice_is_dvm_ena(&vsi->back->hw))
3818 		return 0;
3819 
3820 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3821 	err = vlan_ops->del_vlan(vsi, &vlan);
3822 	if (err && err != -EEXIST)
3823 		return err;
3824 
3825 	/* when deleting the last VLAN filter, make sure to disable the VLAN
3826 	 * promisc mode so the filter isn't left by accident
3827 	 */
3828 	return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3829 				    ICE_MCAST_VLAN_PROMISC_BITS, 0);
3830 }
3831 
3832 /**
3833  * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3834  * @vsi: VSI used to get the VLAN mode
3835  *
3836  * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
3837  * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
3838  */
3839 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
3840 {
3841 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS	2
3842 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS	1
3843 	/* no VLAN 0 filter is created when a port VLAN is active */
3844 	if (vsi->type == ICE_VSI_VF) {
3845 		if (WARN_ON(!vsi->vf))
3846 			return 0;
3847 
3848 		if (ice_vf_is_port_vlan_ena(vsi->vf))
3849 			return 0;
3850 	}
3851 
3852 	if (ice_is_dvm_ena(&vsi->back->hw))
3853 		return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
3854 	else
3855 		return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
3856 }
3857 
3858 /**
3859  * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3860  * @vsi: VSI used to determine if any non-zero VLANs have been added
3861  */
3862 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
3863 {
3864 	return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3865 }
3866 
3867 /**
3868  * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3869  * @vsi: VSI used to get the number of non-zero VLANs added
3870  */
3871 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
3872 {
3873 	return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
3874 }
3875 
3876 /**
3877  * ice_is_feature_supported
3878  * @pf: pointer to the struct ice_pf instance
3879  * @f: feature enum to be checked
3880  *
3881  * returns true if feature is supported, false otherwise
3882  */
3883 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
3884 {
3885 	if (f < 0 || f >= ICE_F_MAX)
3886 		return false;
3887 
3888 	return test_bit(f, pf->features);
3889 }
3890 
3891 /**
3892  * ice_set_feature_support
3893  * @pf: pointer to the struct ice_pf instance
3894  * @f: feature enum to set
3895  */
3896 void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
3897 {
3898 	if (f < 0 || f >= ICE_F_MAX)
3899 		return;
3900 
3901 	set_bit(f, pf->features);
3902 }
3903 
3904 /**
3905  * ice_clear_feature_support
3906  * @pf: pointer to the struct ice_pf instance
3907  * @f: feature enum to clear
3908  */
3909 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
3910 {
3911 	if (f < 0 || f >= ICE_F_MAX)
3912 		return;
3913 
3914 	clear_bit(f, pf->features);
3915 }
3916 
3917 /**
3918  * ice_init_feature_support
3919  * @pf: pointer to the struct ice_pf instance
3920  *
3921  * called during init to setup supported feature
3922  */
3923 void ice_init_feature_support(struct ice_pf *pf)
3924 {
3925 	switch (pf->hw.device_id) {
3926 	case ICE_DEV_ID_E810C_BACKPLANE:
3927 	case ICE_DEV_ID_E810C_QSFP:
3928 	case ICE_DEV_ID_E810C_SFP:
3929 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
3930 	case ICE_DEV_ID_E810_XXV_QSFP:
3931 	case ICE_DEV_ID_E810_XXV_SFP:
3932 		ice_set_feature_support(pf, ICE_F_DSCP);
3933 		if (ice_is_phy_rclk_in_netlist(&pf->hw))
3934 			ice_set_feature_support(pf, ICE_F_PHY_RCLK);
3935 		/* If we don't own the timer - don't enable other caps */
3936 		if (!ice_pf_src_tmr_owned(pf))
3937 			break;
3938 		if (ice_is_cgu_in_netlist(&pf->hw))
3939 			ice_set_feature_support(pf, ICE_F_CGU);
3940 		if (ice_is_clock_mux_in_netlist(&pf->hw))
3941 			ice_set_feature_support(pf, ICE_F_SMA_CTRL);
3942 		if (ice_gnss_is_module_present(&pf->hw))
3943 			ice_set_feature_support(pf, ICE_F_GNSS);
3944 		break;
3945 	default:
3946 		break;
3947 	}
3948 
3949 	if (pf->hw.mac_type == ICE_MAC_E830) {
3950 		ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
3951 		ice_set_feature_support(pf, ICE_F_GCS);
3952 		ice_set_feature_support(pf, ICE_F_TXTIME);
3953 	}
3954 }
3955 
3956 /**
3957  * ice_vsi_update_security - update security block in VSI
3958  * @vsi: pointer to VSI structure
3959  * @fill: function pointer to fill ctx
3960  */
3961 int
3962 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
3963 {
3964 	struct ice_vsi_ctx ctx = { 0 };
3965 
3966 	ctx.info = vsi->info;
3967 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
3968 	fill(&ctx);
3969 
3970 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
3971 		return -ENODEV;
3972 
3973 	vsi->info = ctx.info;
3974 	return 0;
3975 }
3976 
3977 /**
3978  * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
3979  * @ctx: pointer to VSI ctx structure
3980  */
3981 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
3982 {
3983 	ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
3984 			       (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3985 				ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3986 }
3987 
3988 /**
3989  * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
3990  * @ctx: pointer to VSI ctx structure
3991  */
3992 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
3993 {
3994 	ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
3995 			       ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3996 				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3997 }
3998 
3999 /**
4000  * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
4001  * @vsi: pointer to VSI structure
4002  * @set: set or unset the bit
4003  */
4004 int
4005 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
4006 {
4007 	struct ice_vsi_ctx ctx = {
4008 		.info	= vsi->info,
4009 	};
4010 
4011 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
4012 	if (set)
4013 		ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
4014 	else
4015 		ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
4016 
4017 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4018 		return -ENODEV;
4019 
4020 	vsi->info = ctx.info;
4021 	return 0;
4022 }
4023 
4024 /**
4025  * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
4026  * @vsi: VSI used to update l2tsel on
4027  * @l2tsel: l2tsel setting requested
4028  *
4029  * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
4030  * This will modify which descriptor field the first offloaded VLAN will be
4031  * stripped into.
4032  */
4033 void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
4034 {
4035 	struct ice_hw *hw = &vsi->back->hw;
4036 	u32 l2tsel_bit;
4037 	int i;
4038 
4039 	if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
4040 		l2tsel_bit = 0;
4041 	else
4042 		l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
4043 
4044 	for (i = 0; i < vsi->alloc_rxq; i++) {
4045 		u16 pfq = vsi->rxq_map[i];
4046 		u32 qrx_context_offset;
4047 		u32 regval;
4048 
4049 		qrx_context_offset =
4050 			QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
4051 
4052 		regval = rd32(hw, qrx_context_offset);
4053 		regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
4054 		regval |= l2tsel_bit;
4055 		wr32(hw, qrx_context_offset, regval);
4056 	}
4057 }
4058