xref: /linux/drivers/net/ethernet/intel/ice/ice_lib.c (revision 4003c9e78778e93188a09d6043a74f7154449d43)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_flow.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_type.h"
11 #include "ice_vsi_vlan_ops.h"
12 
13 /**
14  * ice_vsi_type_str - maps VSI type enum to string equivalents
15  * @vsi_type: VSI type enum
16  */
ice_vsi_type_str(enum ice_vsi_type vsi_type)17 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
18 {
19 	switch (vsi_type) {
20 	case ICE_VSI_PF:
21 		return "ICE_VSI_PF";
22 	case ICE_VSI_VF:
23 		return "ICE_VSI_VF";
24 	case ICE_VSI_SF:
25 		return "ICE_VSI_SF";
26 	case ICE_VSI_CTRL:
27 		return "ICE_VSI_CTRL";
28 	case ICE_VSI_CHNL:
29 		return "ICE_VSI_CHNL";
30 	case ICE_VSI_LB:
31 		return "ICE_VSI_LB";
32 	default:
33 		return "unknown";
34 	}
35 }
36 
37 /**
38  * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
39  * @vsi: the VSI being configured
40  * @ena: start or stop the Rx rings
41  *
42  * First enable/disable all of the Rx rings, flush any remaining writes, and
43  * then verify that they have all been enabled/disabled successfully. This will
44  * let all of the register writes complete when enabling/disabling the Rx rings
45  * before waiting for the change in hardware to complete.
46  */
ice_vsi_ctrl_all_rx_rings(struct ice_vsi * vsi,bool ena)47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
48 {
49 	int ret = 0;
50 	u16 i;
51 
52 	ice_for_each_rxq(vsi, i)
53 		ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
54 
55 	ice_flush(&vsi->back->hw);
56 
57 	ice_for_each_rxq(vsi, i) {
58 		ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
59 		if (ret)
60 			break;
61 	}
62 
63 	return ret;
64 }
65 
66 /**
67  * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
68  * @vsi: VSI pointer
69  *
70  * On error: returns error code (negative)
71  * On success: returns 0
72  */
ice_vsi_alloc_arrays(struct ice_vsi * vsi)73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
74 {
75 	struct ice_pf *pf = vsi->back;
76 	struct device *dev;
77 
78 	dev = ice_pf_to_dev(pf);
79 	if (vsi->type == ICE_VSI_CHNL)
80 		return 0;
81 
82 	/* allocate memory for both Tx and Rx ring pointers */
83 	vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84 				     sizeof(*vsi->tx_rings), GFP_KERNEL);
85 	if (!vsi->tx_rings)
86 		return -ENOMEM;
87 
88 	vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89 				     sizeof(*vsi->rx_rings), GFP_KERNEL);
90 	if (!vsi->rx_rings)
91 		goto err_rings;
92 
93 	/* txq_map needs to have enough space to track both Tx (stack) rings
94 	 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
95 	 * so use num_possible_cpus() as we want to always provide XDP ring
96 	 * per CPU, regardless of queue count settings from user that might
97 	 * have come from ethtool's set_channels() callback;
98 	 */
99 	vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
100 				    sizeof(*vsi->txq_map), GFP_KERNEL);
101 
102 	if (!vsi->txq_map)
103 		goto err_txq_map;
104 
105 	vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
106 				    sizeof(*vsi->rxq_map), GFP_KERNEL);
107 	if (!vsi->rxq_map)
108 		goto err_rxq_map;
109 
110 	/* There is no need to allocate q_vectors for a loopback VSI. */
111 	if (vsi->type == ICE_VSI_LB)
112 		return 0;
113 
114 	/* allocate memory for q_vector pointers */
115 	vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116 				      sizeof(*vsi->q_vectors), GFP_KERNEL);
117 	if (!vsi->q_vectors)
118 		goto err_vectors;
119 
120 	return 0;
121 
122 err_vectors:
123 	devm_kfree(dev, vsi->rxq_map);
124 err_rxq_map:
125 	devm_kfree(dev, vsi->txq_map);
126 err_txq_map:
127 	devm_kfree(dev, vsi->rx_rings);
128 err_rings:
129 	devm_kfree(dev, vsi->tx_rings);
130 	return -ENOMEM;
131 }
132 
133 /**
134  * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
135  * @vsi: the VSI being configured
136  */
ice_vsi_set_num_desc(struct ice_vsi * vsi)137 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
138 {
139 	switch (vsi->type) {
140 	case ICE_VSI_PF:
141 	case ICE_VSI_SF:
142 	case ICE_VSI_CTRL:
143 	case ICE_VSI_LB:
144 		/* a user could change the values of num_[tr]x_desc using
145 		 * ethtool -G so we should keep those values instead of
146 		 * overwriting them with the defaults.
147 		 */
148 		if (!vsi->num_rx_desc)
149 			vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
150 		if (!vsi->num_tx_desc)
151 			vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
152 		break;
153 	default:
154 		dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
155 			vsi->type);
156 		break;
157 	}
158 }
159 
160 /**
161  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
162  * @vsi: the VSI being configured
163  *
164  * Return 0 on success and a negative value on error
165  */
ice_vsi_set_num_qs(struct ice_vsi * vsi)166 static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
167 {
168 	enum ice_vsi_type vsi_type = vsi->type;
169 	struct ice_pf *pf = vsi->back;
170 	struct ice_vf *vf = vsi->vf;
171 
172 	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
173 		return;
174 
175 	switch (vsi_type) {
176 	case ICE_VSI_PF:
177 		if (vsi->req_txq) {
178 			vsi->alloc_txq = vsi->req_txq;
179 			vsi->num_txq = vsi->req_txq;
180 		} else {
181 			vsi->alloc_txq = min3(pf->num_lan_msix,
182 					      ice_get_avail_txq_count(pf),
183 					      (u16)num_online_cpus());
184 		}
185 
186 		pf->num_lan_tx = vsi->alloc_txq;
187 
188 		/* only 1 Rx queue unless RSS is enabled */
189 		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
190 			vsi->alloc_rxq = 1;
191 		} else {
192 			if (vsi->req_rxq) {
193 				vsi->alloc_rxq = vsi->req_rxq;
194 				vsi->num_rxq = vsi->req_rxq;
195 			} else {
196 				vsi->alloc_rxq = min3(pf->num_lan_msix,
197 						      ice_get_avail_rxq_count(pf),
198 						      (u16)num_online_cpus());
199 			}
200 		}
201 
202 		pf->num_lan_rx = vsi->alloc_rxq;
203 
204 		vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
205 					   max_t(int, vsi->alloc_rxq,
206 						 vsi->alloc_txq));
207 		break;
208 	case ICE_VSI_SF:
209 		vsi->alloc_txq = 1;
210 		vsi->alloc_rxq = 1;
211 		vsi->num_q_vectors = 1;
212 		vsi->irq_dyn_alloc = true;
213 		break;
214 	case ICE_VSI_VF:
215 		if (vf->num_req_qs)
216 			vf->num_vf_qs = vf->num_req_qs;
217 		vsi->alloc_txq = vf->num_vf_qs;
218 		vsi->alloc_rxq = vf->num_vf_qs;
219 		/* pf->vfs.num_msix_per includes (VF miscellaneous vector +
220 		 * data queue interrupts). Since vsi->num_q_vectors is number
221 		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
222 		 * original vector count
223 		 */
224 		vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
225 		break;
226 	case ICE_VSI_CTRL:
227 		vsi->alloc_txq = 1;
228 		vsi->alloc_rxq = 1;
229 		vsi->num_q_vectors = 1;
230 		break;
231 	case ICE_VSI_CHNL:
232 		vsi->alloc_txq = 0;
233 		vsi->alloc_rxq = 0;
234 		break;
235 	case ICE_VSI_LB:
236 		vsi->alloc_txq = 1;
237 		vsi->alloc_rxq = 1;
238 		break;
239 	default:
240 		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
241 		break;
242 	}
243 
244 	ice_vsi_set_num_desc(vsi);
245 }
246 
247 /**
248  * ice_get_free_slot - get the next non-NULL location index in array
249  * @array: array to search
250  * @size: size of the array
251  * @curr: last known occupied index to be used as a search hint
252  *
253  * void * is being used to keep the functionality generic. This lets us use this
254  * function on any array of pointers.
255  */
ice_get_free_slot(void * array,int size,int curr)256 static int ice_get_free_slot(void *array, int size, int curr)
257 {
258 	int **tmp_array = (int **)array;
259 	int next;
260 
261 	if (curr < (size - 1) && !tmp_array[curr + 1]) {
262 		next = curr + 1;
263 	} else {
264 		int i = 0;
265 
266 		while ((i < size) && (tmp_array[i]))
267 			i++;
268 		if (i == size)
269 			next = ICE_NO_VSI;
270 		else
271 			next = i;
272 	}
273 	return next;
274 }
275 
276 /**
277  * ice_vsi_delete_from_hw - delete a VSI from the switch
278  * @vsi: pointer to VSI being removed
279  */
ice_vsi_delete_from_hw(struct ice_vsi * vsi)280 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
281 {
282 	struct ice_pf *pf = vsi->back;
283 	struct ice_vsi_ctx *ctxt;
284 	int status;
285 
286 	ice_fltr_remove_all(vsi);
287 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
288 	if (!ctxt)
289 		return;
290 
291 	if (vsi->type == ICE_VSI_VF)
292 		ctxt->vf_num = vsi->vf->vf_id;
293 	ctxt->vsi_num = vsi->vsi_num;
294 
295 	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
296 
297 	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
298 	if (status)
299 		dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
300 			vsi->vsi_num, status);
301 
302 	kfree(ctxt);
303 }
304 
305 /**
306  * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
307  * @vsi: pointer to VSI being cleared
308  */
ice_vsi_free_arrays(struct ice_vsi * vsi)309 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
310 {
311 	struct ice_pf *pf = vsi->back;
312 	struct device *dev;
313 
314 	dev = ice_pf_to_dev(pf);
315 
316 	/* free the ring and vector containers */
317 	devm_kfree(dev, vsi->q_vectors);
318 	vsi->q_vectors = NULL;
319 	devm_kfree(dev, vsi->tx_rings);
320 	vsi->tx_rings = NULL;
321 	devm_kfree(dev, vsi->rx_rings);
322 	vsi->rx_rings = NULL;
323 	devm_kfree(dev, vsi->txq_map);
324 	vsi->txq_map = NULL;
325 	devm_kfree(dev, vsi->rxq_map);
326 	vsi->rxq_map = NULL;
327 }
328 
329 /**
330  * ice_vsi_free_stats - Free the ring statistics structures
331  * @vsi: VSI pointer
332  */
ice_vsi_free_stats(struct ice_vsi * vsi)333 static void ice_vsi_free_stats(struct ice_vsi *vsi)
334 {
335 	struct ice_vsi_stats *vsi_stat;
336 	struct ice_pf *pf = vsi->back;
337 	int i;
338 
339 	if (vsi->type == ICE_VSI_CHNL)
340 		return;
341 	if (!pf->vsi_stats)
342 		return;
343 
344 	vsi_stat = pf->vsi_stats[vsi->idx];
345 	if (!vsi_stat)
346 		return;
347 
348 	ice_for_each_alloc_txq(vsi, i) {
349 		if (vsi_stat->tx_ring_stats[i]) {
350 			kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
351 			WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
352 		}
353 	}
354 
355 	ice_for_each_alloc_rxq(vsi, i) {
356 		if (vsi_stat->rx_ring_stats[i]) {
357 			kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
358 			WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
359 		}
360 	}
361 
362 	kfree(vsi_stat->tx_ring_stats);
363 	kfree(vsi_stat->rx_ring_stats);
364 	kfree(vsi_stat);
365 	pf->vsi_stats[vsi->idx] = NULL;
366 }
367 
368 /**
369  * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
370  * @vsi: VSI which is having stats allocated
371  */
ice_vsi_alloc_ring_stats(struct ice_vsi * vsi)372 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
373 {
374 	struct ice_ring_stats **tx_ring_stats;
375 	struct ice_ring_stats **rx_ring_stats;
376 	struct ice_vsi_stats *vsi_stats;
377 	struct ice_pf *pf = vsi->back;
378 	u16 i;
379 
380 	vsi_stats = pf->vsi_stats[vsi->idx];
381 	tx_ring_stats = vsi_stats->tx_ring_stats;
382 	rx_ring_stats = vsi_stats->rx_ring_stats;
383 
384 	/* Allocate Tx ring stats */
385 	ice_for_each_alloc_txq(vsi, i) {
386 		struct ice_ring_stats *ring_stats;
387 		struct ice_tx_ring *ring;
388 
389 		ring = vsi->tx_rings[i];
390 		ring_stats = tx_ring_stats[i];
391 
392 		if (!ring_stats) {
393 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
394 			if (!ring_stats)
395 				goto err_out;
396 
397 			WRITE_ONCE(tx_ring_stats[i], ring_stats);
398 		}
399 
400 		ring->ring_stats = ring_stats;
401 	}
402 
403 	/* Allocate Rx ring stats */
404 	ice_for_each_alloc_rxq(vsi, i) {
405 		struct ice_ring_stats *ring_stats;
406 		struct ice_rx_ring *ring;
407 
408 		ring = vsi->rx_rings[i];
409 		ring_stats = rx_ring_stats[i];
410 
411 		if (!ring_stats) {
412 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
413 			if (!ring_stats)
414 				goto err_out;
415 
416 			WRITE_ONCE(rx_ring_stats[i], ring_stats);
417 		}
418 
419 		ring->ring_stats = ring_stats;
420 	}
421 
422 	return 0;
423 
424 err_out:
425 	ice_vsi_free_stats(vsi);
426 	return -ENOMEM;
427 }
428 
429 /**
430  * ice_vsi_free - clean up and deallocate the provided VSI
431  * @vsi: pointer to VSI being cleared
432  *
433  * This deallocates the VSI's queue resources, removes it from the PF's
434  * VSI array if necessary, and deallocates the VSI
435  */
ice_vsi_free(struct ice_vsi * vsi)436 void ice_vsi_free(struct ice_vsi *vsi)
437 {
438 	struct ice_pf *pf = NULL;
439 	struct device *dev;
440 
441 	if (!vsi || !vsi->back)
442 		return;
443 
444 	pf = vsi->back;
445 	dev = ice_pf_to_dev(pf);
446 
447 	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
448 		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
449 		return;
450 	}
451 
452 	mutex_lock(&pf->sw_mutex);
453 	/* updates the PF for this cleared VSI */
454 
455 	pf->vsi[vsi->idx] = NULL;
456 	pf->next_vsi = vsi->idx;
457 
458 	ice_vsi_free_stats(vsi);
459 	ice_vsi_free_arrays(vsi);
460 	mutex_destroy(&vsi->xdp_state_lock);
461 	mutex_unlock(&pf->sw_mutex);
462 	devm_kfree(dev, vsi);
463 }
464 
ice_vsi_delete(struct ice_vsi * vsi)465 void ice_vsi_delete(struct ice_vsi *vsi)
466 {
467 	ice_vsi_delete_from_hw(vsi);
468 	ice_vsi_free(vsi);
469 }
470 
471 /**
472  * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
473  * @irq: interrupt number
474  * @data: pointer to a q_vector
475  */
ice_msix_clean_ctrl_vsi(int __always_unused irq,void * data)476 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
477 {
478 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
479 
480 	if (!q_vector->tx.tx_ring)
481 		return IRQ_HANDLED;
482 
483 #define FDIR_RX_DESC_CLEAN_BUDGET 64
484 	ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
485 	ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
486 
487 	return IRQ_HANDLED;
488 }
489 
490 /**
491  * ice_msix_clean_rings - MSIX mode Interrupt Handler
492  * @irq: interrupt number
493  * @data: pointer to a q_vector
494  */
ice_msix_clean_rings(int __always_unused irq,void * data)495 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
496 {
497 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
498 
499 	if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
500 		return IRQ_HANDLED;
501 
502 	q_vector->total_events++;
503 
504 	napi_schedule(&q_vector->napi);
505 
506 	return IRQ_HANDLED;
507 }
508 
509 /**
510  * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
511  * @vsi: VSI pointer
512  */
ice_vsi_alloc_stat_arrays(struct ice_vsi * vsi)513 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
514 {
515 	struct ice_vsi_stats *vsi_stat;
516 	struct ice_pf *pf = vsi->back;
517 
518 	if (vsi->type == ICE_VSI_CHNL)
519 		return 0;
520 	if (!pf->vsi_stats)
521 		return -ENOENT;
522 
523 	if (pf->vsi_stats[vsi->idx])
524 	/* realloc will happen in rebuild path */
525 		return 0;
526 
527 	vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
528 	if (!vsi_stat)
529 		return -ENOMEM;
530 
531 	vsi_stat->tx_ring_stats =
532 		kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
533 			GFP_KERNEL);
534 	if (!vsi_stat->tx_ring_stats)
535 		goto err_alloc_tx;
536 
537 	vsi_stat->rx_ring_stats =
538 		kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
539 			GFP_KERNEL);
540 	if (!vsi_stat->rx_ring_stats)
541 		goto err_alloc_rx;
542 
543 	pf->vsi_stats[vsi->idx] = vsi_stat;
544 
545 	return 0;
546 
547 err_alloc_rx:
548 	kfree(vsi_stat->rx_ring_stats);
549 err_alloc_tx:
550 	kfree(vsi_stat->tx_ring_stats);
551 	kfree(vsi_stat);
552 	pf->vsi_stats[vsi->idx] = NULL;
553 	return -ENOMEM;
554 }
555 
556 /**
557  * ice_vsi_alloc_def - set default values for already allocated VSI
558  * @vsi: ptr to VSI
559  * @ch: ptr to channel
560  */
561 static int
ice_vsi_alloc_def(struct ice_vsi * vsi,struct ice_channel * ch)562 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
563 {
564 	if (vsi->type != ICE_VSI_CHNL) {
565 		ice_vsi_set_num_qs(vsi);
566 		if (ice_vsi_alloc_arrays(vsi))
567 			return -ENOMEM;
568 	}
569 
570 	switch (vsi->type) {
571 	case ICE_VSI_PF:
572 	case ICE_VSI_SF:
573 		/* Setup default MSIX irq handler for VSI */
574 		vsi->irq_handler = ice_msix_clean_rings;
575 		break;
576 	case ICE_VSI_CTRL:
577 		/* Setup ctrl VSI MSIX irq handler */
578 		vsi->irq_handler = ice_msix_clean_ctrl_vsi;
579 		break;
580 	case ICE_VSI_CHNL:
581 		if (!ch)
582 			return -EINVAL;
583 
584 		vsi->num_rxq = ch->num_rxq;
585 		vsi->num_txq = ch->num_txq;
586 		vsi->next_base_q = ch->base_q;
587 		break;
588 	case ICE_VSI_VF:
589 	case ICE_VSI_LB:
590 		break;
591 	default:
592 		ice_vsi_free_arrays(vsi);
593 		return -EINVAL;
594 	}
595 
596 	return 0;
597 }
598 
599 /**
600  * ice_vsi_alloc - Allocates the next available struct VSI in the PF
601  * @pf: board private structure
602  *
603  * Reserves a VSI index from the PF and allocates an empty VSI structure
604  * without a type. The VSI structure must later be initialized by calling
605  * ice_vsi_cfg().
606  *
607  * returns a pointer to a VSI on success, NULL on failure.
608  */
ice_vsi_alloc(struct ice_pf * pf)609 struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
610 {
611 	struct device *dev = ice_pf_to_dev(pf);
612 	struct ice_vsi *vsi = NULL;
613 
614 	/* Need to protect the allocation of the VSIs at the PF level */
615 	mutex_lock(&pf->sw_mutex);
616 
617 	/* If we have already allocated our maximum number of VSIs,
618 	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
619 	 * is available to be populated
620 	 */
621 	if (pf->next_vsi == ICE_NO_VSI) {
622 		dev_dbg(dev, "out of VSI slots!\n");
623 		goto unlock_pf;
624 	}
625 
626 	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
627 	if (!vsi)
628 		goto unlock_pf;
629 
630 	vsi->back = pf;
631 	set_bit(ICE_VSI_DOWN, vsi->state);
632 
633 	/* fill slot and make note of the index */
634 	vsi->idx = pf->next_vsi;
635 	pf->vsi[pf->next_vsi] = vsi;
636 
637 	/* prepare pf->next_vsi for next use */
638 	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
639 					 pf->next_vsi);
640 
641 	mutex_init(&vsi->xdp_state_lock);
642 
643 unlock_pf:
644 	mutex_unlock(&pf->sw_mutex);
645 	return vsi;
646 }
647 
648 /**
649  * ice_alloc_fd_res - Allocate FD resource for a VSI
650  * @vsi: pointer to the ice_vsi
651  *
652  * This allocates the FD resources
653  *
654  * Returns 0 on success, -EPERM on no-op or -EIO on failure
655  */
ice_alloc_fd_res(struct ice_vsi * vsi)656 static int ice_alloc_fd_res(struct ice_vsi *vsi)
657 {
658 	struct ice_pf *pf = vsi->back;
659 	u32 g_val, b_val;
660 
661 	/* Flow Director filters are only allocated/assigned to the PF VSI or
662 	 * CHNL VSI which passes the traffic. The CTRL VSI is only used to
663 	 * add/delete filters so resources are not allocated to it
664 	 */
665 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
666 		return -EPERM;
667 
668 	if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
669 	      vsi->type == ICE_VSI_CHNL))
670 		return -EPERM;
671 
672 	/* FD filters from guaranteed pool per VSI */
673 	g_val = pf->hw.func_caps.fd_fltr_guar;
674 	if (!g_val)
675 		return -EPERM;
676 
677 	/* FD filters from best effort pool */
678 	b_val = pf->hw.func_caps.fd_fltr_best_effort;
679 	if (!b_val)
680 		return -EPERM;
681 
682 	/* PF main VSI gets only 64 FD resources from guaranteed pool
683 	 * when ADQ is configured.
684 	 */
685 #define ICE_PF_VSI_GFLTR	64
686 
687 	/* determine FD filter resources per VSI from shared(best effort) and
688 	 * dedicated pool
689 	 */
690 	if (vsi->type == ICE_VSI_PF) {
691 		vsi->num_gfltr = g_val;
692 		/* if MQPRIO is configured, main VSI doesn't get all FD
693 		 * resources from guaranteed pool. PF VSI gets 64 FD resources
694 		 */
695 		if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
696 			if (g_val < ICE_PF_VSI_GFLTR)
697 				return -EPERM;
698 			/* allow bare minimum entries for PF VSI */
699 			vsi->num_gfltr = ICE_PF_VSI_GFLTR;
700 		}
701 
702 		/* each VSI gets same "best_effort" quota */
703 		vsi->num_bfltr = b_val;
704 	} else if (vsi->type == ICE_VSI_VF) {
705 		vsi->num_gfltr = 0;
706 
707 		/* each VSI gets same "best_effort" quota */
708 		vsi->num_bfltr = b_val;
709 	} else {
710 		struct ice_vsi *main_vsi;
711 		int numtc;
712 
713 		main_vsi = ice_get_main_vsi(pf);
714 		if (!main_vsi)
715 			return -EPERM;
716 
717 		if (!main_vsi->all_numtc)
718 			return -EINVAL;
719 
720 		/* figure out ADQ numtc */
721 		numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
722 
723 		/* only one TC but still asking resources for channels,
724 		 * invalid config
725 		 */
726 		if (numtc < ICE_CHNL_START_TC)
727 			return -EPERM;
728 
729 		g_val -= ICE_PF_VSI_GFLTR;
730 		/* channel VSIs gets equal share from guaranteed pool */
731 		vsi->num_gfltr = g_val / numtc;
732 
733 		/* each VSI gets same "best_effort" quota */
734 		vsi->num_bfltr = b_val;
735 	}
736 
737 	return 0;
738 }
739 
740 /**
741  * ice_vsi_get_qs - Assign queues from PF to VSI
742  * @vsi: the VSI to assign queues to
743  *
744  * Returns 0 on success and a negative value on error
745  */
ice_vsi_get_qs(struct ice_vsi * vsi)746 static int ice_vsi_get_qs(struct ice_vsi *vsi)
747 {
748 	struct ice_pf *pf = vsi->back;
749 	struct ice_qs_cfg tx_qs_cfg = {
750 		.qs_mutex = &pf->avail_q_mutex,
751 		.pf_map = pf->avail_txqs,
752 		.pf_map_size = pf->max_pf_txqs,
753 		.q_count = vsi->alloc_txq,
754 		.scatter_count = ICE_MAX_SCATTER_TXQS,
755 		.vsi_map = vsi->txq_map,
756 		.vsi_map_offset = 0,
757 		.mapping_mode = ICE_VSI_MAP_CONTIG
758 	};
759 	struct ice_qs_cfg rx_qs_cfg = {
760 		.qs_mutex = &pf->avail_q_mutex,
761 		.pf_map = pf->avail_rxqs,
762 		.pf_map_size = pf->max_pf_rxqs,
763 		.q_count = vsi->alloc_rxq,
764 		.scatter_count = ICE_MAX_SCATTER_RXQS,
765 		.vsi_map = vsi->rxq_map,
766 		.vsi_map_offset = 0,
767 		.mapping_mode = ICE_VSI_MAP_CONTIG
768 	};
769 	int ret;
770 
771 	if (vsi->type == ICE_VSI_CHNL)
772 		return 0;
773 
774 	ret = __ice_vsi_get_qs(&tx_qs_cfg);
775 	if (ret)
776 		return ret;
777 	vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
778 
779 	ret = __ice_vsi_get_qs(&rx_qs_cfg);
780 	if (ret)
781 		return ret;
782 	vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
783 
784 	return 0;
785 }
786 
787 /**
788  * ice_vsi_put_qs - Release queues from VSI to PF
789  * @vsi: the VSI that is going to release queues
790  */
ice_vsi_put_qs(struct ice_vsi * vsi)791 static void ice_vsi_put_qs(struct ice_vsi *vsi)
792 {
793 	struct ice_pf *pf = vsi->back;
794 	int i;
795 
796 	mutex_lock(&pf->avail_q_mutex);
797 
798 	ice_for_each_alloc_txq(vsi, i) {
799 		clear_bit(vsi->txq_map[i], pf->avail_txqs);
800 		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
801 	}
802 
803 	ice_for_each_alloc_rxq(vsi, i) {
804 		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
805 		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
806 	}
807 
808 	mutex_unlock(&pf->avail_q_mutex);
809 }
810 
811 /**
812  * ice_is_safe_mode
813  * @pf: pointer to the PF struct
814  *
815  * returns true if driver is in safe mode, false otherwise
816  */
ice_is_safe_mode(struct ice_pf * pf)817 bool ice_is_safe_mode(struct ice_pf *pf)
818 {
819 	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
820 }
821 
822 /**
823  * ice_is_rdma_ena
824  * @pf: pointer to the PF struct
825  *
826  * returns true if RDMA is currently supported, false otherwise
827  */
ice_is_rdma_ena(struct ice_pf * pf)828 bool ice_is_rdma_ena(struct ice_pf *pf)
829 {
830 	return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
831 }
832 
833 /**
834  * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
835  * @vsi: the VSI being cleaned up
836  *
837  * This function deletes RSS input set for all flows that were configured
838  * for this VSI
839  */
ice_vsi_clean_rss_flow_fld(struct ice_vsi * vsi)840 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
841 {
842 	struct ice_pf *pf = vsi->back;
843 	int status;
844 
845 	if (ice_is_safe_mode(pf))
846 		return;
847 
848 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
849 	if (status)
850 		dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
851 			vsi->vsi_num, status);
852 }
853 
854 /**
855  * ice_rss_clean - Delete RSS related VSI structures and configuration
856  * @vsi: the VSI being removed
857  */
ice_rss_clean(struct ice_vsi * vsi)858 static void ice_rss_clean(struct ice_vsi *vsi)
859 {
860 	struct ice_pf *pf = vsi->back;
861 	struct device *dev;
862 
863 	dev = ice_pf_to_dev(pf);
864 
865 	devm_kfree(dev, vsi->rss_hkey_user);
866 	devm_kfree(dev, vsi->rss_lut_user);
867 
868 	ice_vsi_clean_rss_flow_fld(vsi);
869 	/* remove RSS replay list */
870 	if (!ice_is_safe_mode(pf))
871 		ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
872 }
873 
874 /**
875  * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
876  * @vsi: the VSI being configured
877  */
ice_vsi_set_rss_params(struct ice_vsi * vsi)878 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
879 {
880 	struct ice_hw_common_caps *cap;
881 	struct ice_pf *pf = vsi->back;
882 	u16 max_rss_size;
883 
884 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
885 		vsi->rss_size = 1;
886 		return;
887 	}
888 
889 	cap = &pf->hw.func_caps.common_cap;
890 	max_rss_size = BIT(cap->rss_table_entry_width);
891 	switch (vsi->type) {
892 	case ICE_VSI_CHNL:
893 	case ICE_VSI_PF:
894 		/* PF VSI will inherit RSS instance of PF */
895 		vsi->rss_table_size = (u16)cap->rss_table_size;
896 		if (vsi->type == ICE_VSI_CHNL)
897 			vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
898 		else
899 			vsi->rss_size = min_t(u16, num_online_cpus(),
900 					      max_rss_size);
901 		vsi->rss_lut_type = ICE_LUT_PF;
902 		break;
903 	case ICE_VSI_SF:
904 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
905 		vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
906 		vsi->rss_lut_type = ICE_LUT_VSI;
907 		break;
908 	case ICE_VSI_VF:
909 		/* VF VSI will get a small RSS table.
910 		 * For VSI_LUT, LUT size should be set to 64 bytes.
911 		 */
912 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
913 		vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
914 		vsi->rss_lut_type = ICE_LUT_VSI;
915 		break;
916 	case ICE_VSI_LB:
917 		break;
918 	default:
919 		dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
920 			ice_vsi_type_str(vsi->type));
921 		break;
922 	}
923 }
924 
925 /**
926  * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
927  * @hw: HW structure used to determine the VLAN mode of the device
928  * @ctxt: the VSI context being set
929  *
930  * This initializes a default VSI context for all sections except the Queues.
931  */
ice_set_dflt_vsi_ctx(struct ice_hw * hw,struct ice_vsi_ctx * ctxt)932 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
933 {
934 	u32 table = 0;
935 
936 	memset(&ctxt->info, 0, sizeof(ctxt->info));
937 	/* VSI's should be allocated from shared pool */
938 	ctxt->alloc_from_pool = true;
939 	/* Src pruning enabled by default */
940 	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
941 	/* Traffic from VSI can be sent to LAN */
942 	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
943 	/* allow all untagged/tagged packets by default on Tx */
944 	ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
945 						 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
946 	/* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
947 	 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
948 	 *
949 	 * DVM - leave inner VLAN in packet by default
950 	 */
951 	if (ice_is_dvm_ena(hw)) {
952 		ctxt->info.inner_vlan_flags |=
953 			FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
954 				   ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
955 		ctxt->info.outer_vlan_flags =
956 			FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
957 				   ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
958 		ctxt->info.outer_vlan_flags |=
959 			FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
960 				   ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
961 		ctxt->info.outer_vlan_flags |=
962 			FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
963 				   ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
964 	}
965 	/* Have 1:1 UP mapping for both ingress/egress tables */
966 	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
967 	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
968 	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
969 	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
970 	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
971 	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
972 	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
973 	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
974 	ctxt->info.ingress_table = cpu_to_le32(table);
975 	ctxt->info.egress_table = cpu_to_le32(table);
976 	/* Have 1:1 UP mapping for outer to inner UP table */
977 	ctxt->info.outer_up_table = cpu_to_le32(table);
978 	/* No Outer tag support outer_tag_flags remains to zero */
979 }
980 
981 /**
982  * ice_vsi_setup_q_map - Setup a VSI queue map
983  * @vsi: the VSI being configured
984  * @ctxt: VSI context structure
985  */
ice_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)986 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
987 {
988 	u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
989 	u16 num_txq_per_tc, num_rxq_per_tc;
990 	u16 qcount_tx = vsi->alloc_txq;
991 	u16 qcount_rx = vsi->alloc_rxq;
992 	u8 netdev_tc = 0;
993 	int i;
994 
995 	if (!vsi->tc_cfg.numtc) {
996 		/* at least TC0 should be enabled by default */
997 		vsi->tc_cfg.numtc = 1;
998 		vsi->tc_cfg.ena_tc = 1;
999 	}
1000 
1001 	num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
1002 	if (!num_rxq_per_tc)
1003 		num_rxq_per_tc = 1;
1004 	num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
1005 	if (!num_txq_per_tc)
1006 		num_txq_per_tc = 1;
1007 
1008 	/* find the (rounded up) power-of-2 of qcount */
1009 	pow = (u16)order_base_2(num_rxq_per_tc);
1010 
1011 	/* TC mapping is a function of the number of Rx queues assigned to the
1012 	 * VSI for each traffic class and the offset of these queues.
1013 	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
1014 	 * queues allocated to TC0. No:of queues is a power-of-2.
1015 	 *
1016 	 * If TC is not enabled, the queue offset is set to 0, and allocate one
1017 	 * queue, this way, traffic for the given TC will be sent to the default
1018 	 * queue.
1019 	 *
1020 	 * Setup number and offset of Rx queues for all TCs for the VSI
1021 	 */
1022 	ice_for_each_traffic_class(i) {
1023 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1024 			/* TC is not enabled */
1025 			vsi->tc_cfg.tc_info[i].qoffset = 0;
1026 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1027 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1028 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
1029 			ctxt->info.tc_mapping[i] = 0;
1030 			continue;
1031 		}
1032 
1033 		/* TC is enabled */
1034 		vsi->tc_cfg.tc_info[i].qoffset = offset;
1035 		vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
1036 		vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1037 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
1038 
1039 		qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1040 		qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1041 		offset += num_rxq_per_tc;
1042 		tx_count += num_txq_per_tc;
1043 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1044 	}
1045 
1046 	/* if offset is non-zero, means it is calculated correctly based on
1047 	 * enabled TCs for a given VSI otherwise qcount_rx will always
1048 	 * be correct and non-zero because it is based off - VSI's
1049 	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
1050 	 * at least 1)
1051 	 */
1052 	if (offset)
1053 		rx_count = offset;
1054 	else
1055 		rx_count = num_rxq_per_tc;
1056 
1057 	if (rx_count > vsi->alloc_rxq) {
1058 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1059 			rx_count, vsi->alloc_rxq);
1060 		return -EINVAL;
1061 	}
1062 
1063 	if (tx_count > vsi->alloc_txq) {
1064 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1065 			tx_count, vsi->alloc_txq);
1066 		return -EINVAL;
1067 	}
1068 
1069 	vsi->num_txq = tx_count;
1070 	vsi->num_rxq = rx_count;
1071 
1072 	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1073 		dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1074 		/* since there is a chance that num_rxq could have been changed
1075 		 * in the above for loop, make num_txq equal to num_rxq.
1076 		 */
1077 		vsi->num_txq = vsi->num_rxq;
1078 	}
1079 
1080 	/* Rx queue mapping */
1081 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1082 	/* q_mapping buffer holds the info for the first queue allocated for
1083 	 * this VSI in the PF space and also the number of queues associated
1084 	 * with this VSI.
1085 	 */
1086 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1087 	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1088 
1089 	return 0;
1090 }
1091 
1092 /**
1093  * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1094  * @ctxt: the VSI context being set
1095  * @vsi: the VSI being configured
1096  */
ice_set_fd_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)1097 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1098 {
1099 	u8 dflt_q_group, dflt_q_prio;
1100 	u16 dflt_q, report_q, val;
1101 
1102 	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1103 	    vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1104 		return;
1105 
1106 	val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1107 	ctxt->info.valid_sections |= cpu_to_le16(val);
1108 	dflt_q = 0;
1109 	dflt_q_group = 0;
1110 	report_q = 0;
1111 	dflt_q_prio = 0;
1112 
1113 	/* enable flow director filtering/programming */
1114 	val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1115 	ctxt->info.fd_options = cpu_to_le16(val);
1116 	/* max of allocated flow director filters */
1117 	ctxt->info.max_fd_fltr_dedicated =
1118 			cpu_to_le16(vsi->num_gfltr);
1119 	/* max of shared flow director filters any VSI may program */
1120 	ctxt->info.max_fd_fltr_shared =
1121 			cpu_to_le16(vsi->num_bfltr);
1122 	/* default queue index within the VSI of the default FD */
1123 	val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q);
1124 	/* target queue or queue group to the FD filter */
1125 	val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
1126 	ctxt->info.fd_def_q = cpu_to_le16(val);
1127 	/* queue index on which FD filter completion is reported */
1128 	val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q);
1129 	/* priority of the default qindex action */
1130 	val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
1131 	ctxt->info.fd_report_opt = cpu_to_le16(val);
1132 }
1133 
1134 /**
1135  * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1136  * @ctxt: the VSI context being set
1137  * @vsi: the VSI being configured
1138  */
ice_set_rss_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)1139 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1140 {
1141 	u8 lut_type, hash_type;
1142 	struct device *dev;
1143 	struct ice_pf *pf;
1144 
1145 	pf = vsi->back;
1146 	dev = ice_pf_to_dev(pf);
1147 
1148 	switch (vsi->type) {
1149 	case ICE_VSI_CHNL:
1150 	case ICE_VSI_PF:
1151 		/* PF VSI will inherit RSS instance of PF */
1152 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1153 		break;
1154 	case ICE_VSI_VF:
1155 	case ICE_VSI_SF:
1156 		/* VF VSI will gets a small RSS table which is a VSI LUT type */
1157 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1158 		break;
1159 	default:
1160 		dev_dbg(dev, "Unsupported VSI type %s\n",
1161 			ice_vsi_type_str(vsi->type));
1162 		return;
1163 	}
1164 
1165 	hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1166 	vsi->rss_hfunc = hash_type;
1167 
1168 	ctxt->info.q_opt_rss =
1169 		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
1170 		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
1171 }
1172 
1173 static void
ice_chnl_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)1174 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1175 {
1176 	struct ice_pf *pf = vsi->back;
1177 	u16 qcount, qmap;
1178 	u8 offset = 0;
1179 	int pow;
1180 
1181 	qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
1182 
1183 	pow = order_base_2(qcount);
1184 	qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1185 	qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1186 
1187 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1188 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1189 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1190 	ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
1191 }
1192 
1193 /**
1194  * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1195  * @vsi: VSI to check whether or not VLAN pruning is enabled.
1196  *
1197  * returns true if Rx VLAN pruning is enabled and false otherwise.
1198  */
ice_vsi_is_vlan_pruning_ena(struct ice_vsi * vsi)1199 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
1200 {
1201 	return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1202 }
1203 
1204 /**
1205  * ice_vsi_init - Create and initialize a VSI
1206  * @vsi: the VSI being configured
1207  * @vsi_flags: VSI configuration flags
1208  *
1209  * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
1210  * reconfigure an existing context.
1211  *
1212  * This initializes a VSI context depending on the VSI type to be added and
1213  * passes it down to the add_vsi aq command to create a new VSI.
1214  */
ice_vsi_init(struct ice_vsi * vsi,u32 vsi_flags)1215 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
1216 {
1217 	struct ice_pf *pf = vsi->back;
1218 	struct ice_hw *hw = &pf->hw;
1219 	struct ice_vsi_ctx *ctxt;
1220 	struct device *dev;
1221 	int ret = 0;
1222 
1223 	dev = ice_pf_to_dev(pf);
1224 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1225 	if (!ctxt)
1226 		return -ENOMEM;
1227 
1228 	switch (vsi->type) {
1229 	case ICE_VSI_CTRL:
1230 	case ICE_VSI_LB:
1231 	case ICE_VSI_PF:
1232 		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1233 		break;
1234 	case ICE_VSI_SF:
1235 	case ICE_VSI_CHNL:
1236 		ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1237 		break;
1238 	case ICE_VSI_VF:
1239 		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1240 		/* VF number here is the absolute VF number (0-255) */
1241 		ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1242 		break;
1243 	default:
1244 		ret = -ENODEV;
1245 		goto out;
1246 	}
1247 
1248 	/* Handle VLAN pruning for channel VSI if main VSI has VLAN
1249 	 * prune enabled
1250 	 */
1251 	if (vsi->type == ICE_VSI_CHNL) {
1252 		struct ice_vsi *main_vsi;
1253 
1254 		main_vsi = ice_get_main_vsi(pf);
1255 		if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
1256 			ctxt->info.sw_flags2 |=
1257 				ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1258 		else
1259 			ctxt->info.sw_flags2 &=
1260 				~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1261 	}
1262 
1263 	ice_set_dflt_vsi_ctx(hw, ctxt);
1264 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1265 		ice_set_fd_vsi_ctx(ctxt, vsi);
1266 	/* if the switch is in VEB mode, allow VSI loopback */
1267 	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1268 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1269 
1270 	/* Set LUT type and HASH type if RSS is enabled */
1271 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1272 	    vsi->type != ICE_VSI_CTRL) {
1273 		ice_set_rss_vsi_ctx(ctxt, vsi);
1274 		/* if updating VSI context, make sure to set valid_section:
1275 		 * to indicate which section of VSI context being updated
1276 		 */
1277 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1278 			ctxt->info.valid_sections |=
1279 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1280 	}
1281 
1282 	ctxt->info.sw_id = vsi->port_info->sw_id;
1283 	if (vsi->type == ICE_VSI_CHNL) {
1284 		ice_chnl_vsi_setup_q_map(vsi, ctxt);
1285 	} else {
1286 		ret = ice_vsi_setup_q_map(vsi, ctxt);
1287 		if (ret)
1288 			goto out;
1289 
1290 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1291 			/* means VSI being updated */
1292 			/* must to indicate which section of VSI context are
1293 			 * being modified
1294 			 */
1295 			ctxt->info.valid_sections |=
1296 				cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
1297 	}
1298 
1299 	/* Allow control frames out of main VSI */
1300 	if (vsi->type == ICE_VSI_PF) {
1301 		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1302 		ctxt->info.valid_sections |=
1303 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1304 	}
1305 
1306 	if (vsi_flags & ICE_VSI_FLAG_INIT) {
1307 		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1308 		if (ret) {
1309 			dev_err(dev, "Add VSI failed, err %d\n", ret);
1310 			ret = -EIO;
1311 			goto out;
1312 		}
1313 	} else {
1314 		ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1315 		if (ret) {
1316 			dev_err(dev, "Update VSI failed, err %d\n", ret);
1317 			ret = -EIO;
1318 			goto out;
1319 		}
1320 	}
1321 
1322 	/* keep context for update VSI operations */
1323 	vsi->info = ctxt->info;
1324 
1325 	/* record VSI number returned */
1326 	vsi->vsi_num = ctxt->vsi_num;
1327 
1328 out:
1329 	kfree(ctxt);
1330 	return ret;
1331 }
1332 
1333 /**
1334  * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1335  * @vsi: the VSI having rings deallocated
1336  */
ice_vsi_clear_rings(struct ice_vsi * vsi)1337 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1338 {
1339 	int i;
1340 
1341 	/* Avoid stale references by clearing map from vector to ring */
1342 	if (vsi->q_vectors) {
1343 		ice_for_each_q_vector(vsi, i) {
1344 			struct ice_q_vector *q_vector = vsi->q_vectors[i];
1345 
1346 			if (q_vector) {
1347 				q_vector->tx.tx_ring = NULL;
1348 				q_vector->rx.rx_ring = NULL;
1349 			}
1350 		}
1351 	}
1352 
1353 	if (vsi->tx_rings) {
1354 		ice_for_each_alloc_txq(vsi, i) {
1355 			if (vsi->tx_rings[i]) {
1356 				kfree_rcu(vsi->tx_rings[i], rcu);
1357 				WRITE_ONCE(vsi->tx_rings[i], NULL);
1358 			}
1359 		}
1360 	}
1361 	if (vsi->rx_rings) {
1362 		ice_for_each_alloc_rxq(vsi, i) {
1363 			if (vsi->rx_rings[i]) {
1364 				kfree_rcu(vsi->rx_rings[i], rcu);
1365 				WRITE_ONCE(vsi->rx_rings[i], NULL);
1366 			}
1367 		}
1368 	}
1369 }
1370 
1371 /**
1372  * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1373  * @vsi: VSI which is having rings allocated
1374  */
ice_vsi_alloc_rings(struct ice_vsi * vsi)1375 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1376 {
1377 	bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1378 	struct ice_pf *pf = vsi->back;
1379 	struct device *dev;
1380 	u16 i;
1381 
1382 	dev = ice_pf_to_dev(pf);
1383 	/* Allocate Tx rings */
1384 	ice_for_each_alloc_txq(vsi, i) {
1385 		struct ice_tx_ring *ring;
1386 
1387 		/* allocate with kzalloc(), free with kfree_rcu() */
1388 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1389 
1390 		if (!ring)
1391 			goto err_out;
1392 
1393 		ring->q_index = i;
1394 		ring->reg_idx = vsi->txq_map[i];
1395 		ring->vsi = vsi;
1396 		ring->tx_tstamps = &pf->ptp.port.tx;
1397 		ring->dev = dev;
1398 		ring->count = vsi->num_tx_desc;
1399 		ring->txq_teid = ICE_INVAL_TEID;
1400 		if (dvm_ena)
1401 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
1402 		else
1403 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1404 		WRITE_ONCE(vsi->tx_rings[i], ring);
1405 	}
1406 
1407 	/* Allocate Rx rings */
1408 	ice_for_each_alloc_rxq(vsi, i) {
1409 		struct ice_rx_ring *ring;
1410 
1411 		/* allocate with kzalloc(), free with kfree_rcu() */
1412 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1413 		if (!ring)
1414 			goto err_out;
1415 
1416 		ring->q_index = i;
1417 		ring->reg_idx = vsi->rxq_map[i];
1418 		ring->vsi = vsi;
1419 		ring->netdev = vsi->netdev;
1420 		ring->dev = dev;
1421 		ring->count = vsi->num_rx_desc;
1422 		ring->cached_phctime = pf->ptp.cached_phc_time;
1423 		WRITE_ONCE(vsi->rx_rings[i], ring);
1424 	}
1425 
1426 	return 0;
1427 
1428 err_out:
1429 	ice_vsi_clear_rings(vsi);
1430 	return -ENOMEM;
1431 }
1432 
1433 /**
1434  * ice_vsi_manage_rss_lut - disable/enable RSS
1435  * @vsi: the VSI being changed
1436  * @ena: boolean value indicating if this is an enable or disable request
1437  *
1438  * In the event of disable request for RSS, this function will zero out RSS
1439  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1440  * LUT.
1441  */
ice_vsi_manage_rss_lut(struct ice_vsi * vsi,bool ena)1442 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1443 {
1444 	u8 *lut;
1445 
1446 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1447 	if (!lut)
1448 		return;
1449 
1450 	if (ena) {
1451 		if (vsi->rss_lut_user)
1452 			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1453 		else
1454 			ice_fill_rss_lut(lut, vsi->rss_table_size,
1455 					 vsi->rss_size);
1456 	}
1457 
1458 	ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1459 	kfree(lut);
1460 }
1461 
1462 /**
1463  * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1464  * @vsi: VSI to be configured
1465  * @disable: set to true to have FCS / CRC in the frame data
1466  */
ice_vsi_cfg_crc_strip(struct ice_vsi * vsi,bool disable)1467 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1468 {
1469 	int i;
1470 
1471 	ice_for_each_rxq(vsi, i)
1472 		if (disable)
1473 			vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1474 		else
1475 			vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1476 }
1477 
1478 /**
1479  * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1480  * @vsi: VSI to be configured
1481  */
ice_vsi_cfg_rss_lut_key(struct ice_vsi * vsi)1482 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1483 {
1484 	struct ice_pf *pf = vsi->back;
1485 	struct device *dev;
1486 	u8 *lut, *key;
1487 	int err;
1488 
1489 	dev = ice_pf_to_dev(pf);
1490 	if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1491 	    (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
1492 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1493 	} else {
1494 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1495 
1496 		/* If orig_rss_size is valid and it is less than determined
1497 		 * main VSI's rss_size, update main VSI's rss_size to be
1498 		 * orig_rss_size so that when tc-qdisc is deleted, main VSI
1499 		 * RSS table gets programmed to be correct (whatever it was
1500 		 * to begin with (prior to setup-tc for ADQ config)
1501 		 */
1502 		if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1503 		    vsi->orig_rss_size <= vsi->num_rxq) {
1504 			vsi->rss_size = vsi->orig_rss_size;
1505 			/* now orig_rss_size is used, reset it to zero */
1506 			vsi->orig_rss_size = 0;
1507 		}
1508 	}
1509 
1510 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1511 	if (!lut)
1512 		return -ENOMEM;
1513 
1514 	if (vsi->rss_lut_user)
1515 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1516 	else
1517 		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1518 
1519 	err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1520 	if (err) {
1521 		dev_err(dev, "set_rss_lut failed, error %d\n", err);
1522 		goto ice_vsi_cfg_rss_exit;
1523 	}
1524 
1525 	key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
1526 	if (!key) {
1527 		err = -ENOMEM;
1528 		goto ice_vsi_cfg_rss_exit;
1529 	}
1530 
1531 	if (vsi->rss_hkey_user)
1532 		memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1533 	else
1534 		netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1535 
1536 	err = ice_set_rss_key(vsi, key);
1537 	if (err)
1538 		dev_err(dev, "set_rss_key failed, error %d\n", err);
1539 
1540 	kfree(key);
1541 ice_vsi_cfg_rss_exit:
1542 	kfree(lut);
1543 	return err;
1544 }
1545 
1546 /**
1547  * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1548  * @vsi: VSI to be configured
1549  *
1550  * This function will only be called during the VF VSI setup. Upon successful
1551  * completion of package download, this function will configure default RSS
1552  * input sets for VF VSI.
1553  */
ice_vsi_set_vf_rss_flow_fld(struct ice_vsi * vsi)1554 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1555 {
1556 	struct ice_pf *pf = vsi->back;
1557 	struct device *dev;
1558 	int status;
1559 
1560 	dev = ice_pf_to_dev(pf);
1561 	if (ice_is_safe_mode(pf)) {
1562 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1563 			vsi->vsi_num);
1564 		return;
1565 	}
1566 
1567 	status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
1568 	if (status)
1569 		dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1570 			vsi->vsi_num, status);
1571 }
1572 
1573 static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
1574 	/* configure RSS for IPv4 with input set IP src/dst */
1575 	{ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
1576 	/* configure RSS for IPv6 with input set IPv6 src/dst */
1577 	{ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
1578 	/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1579 	{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
1580 				ICE_HASH_TCP_IPV4,  ICE_RSS_ANY_HEADERS, false},
1581 	/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1582 	{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
1583 				ICE_HASH_UDP_IPV4,  ICE_RSS_ANY_HEADERS, false},
1584 	/* configure RSS for sctp4 with input set IP src/dst - only support
1585 	 * RSS on SCTPv4 on outer headers (non-tunneled)
1586 	 */
1587 	{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
1588 		ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
1589 	/* configure RSS for gtpc4 with input set IPv4 src/dst */
1590 	{ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
1591 		ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false},
1592 	/* configure RSS for gtpc4t with input set IPv4 src/dst */
1593 	{ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
1594 		ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
1595 	/* configure RSS for gtpu4 with input set IPv4 src/dst */
1596 	{ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
1597 		ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
1598 	/* configure RSS for gtpu4e with input set IPv4 src/dst */
1599 	{ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
1600 		ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false},
1601 	/* configure RSS for gtpu4u with input set IPv4 src/dst */
1602 	{ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
1603 		ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false},
1604 	/* configure RSS for gtpu4d with input set IPv4 src/dst */
1605 	{ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
1606 		ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
1607 
1608 	/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1609 	{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
1610 				ICE_HASH_TCP_IPV6,  ICE_RSS_ANY_HEADERS, false},
1611 	/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1612 	{ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
1613 				ICE_HASH_UDP_IPV6,  ICE_RSS_ANY_HEADERS, false},
1614 	/* configure RSS for sctp6 with input set IPv6 src/dst - only support
1615 	 * RSS on SCTPv6 on outer headers (non-tunneled)
1616 	 */
1617 	{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
1618 		ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
1619 	/* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
1620 	{ICE_FLOW_SEG_HDR_ESP,
1621 		ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
1622 	/* configure RSS for gtpc6 with input set IPv6 src/dst */
1623 	{ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
1624 		ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false},
1625 	/* configure RSS for gtpc6t with input set IPv6 src/dst */
1626 	{ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
1627 		ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
1628 	/* configure RSS for gtpu6 with input set IPv6 src/dst */
1629 	{ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
1630 		ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
1631 	/* configure RSS for gtpu6e with input set IPv6 src/dst */
1632 	{ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
1633 		ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false},
1634 	/* configure RSS for gtpu6u with input set IPv6 src/dst */
1635 	{ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
1636 		ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false},
1637 	/* configure RSS for gtpu6d with input set IPv6 src/dst */
1638 	{ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
1639 		ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
1640 };
1641 
1642 /**
1643  * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1644  * @vsi: VSI to be configured
1645  *
1646  * This function will only be called after successful download package call
1647  * during initialization of PF. Since the downloaded package will erase the
1648  * RSS section, this function will configure RSS input sets for different
1649  * flow types. The last profile added has the highest priority, therefore 2
1650  * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1651  * (i.e. IPv4 src/dst TCP src/dst port).
1652  */
ice_vsi_set_rss_flow_fld(struct ice_vsi * vsi)1653 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1654 {
1655 	u16 vsi_num = vsi->vsi_num;
1656 	struct ice_pf *pf = vsi->back;
1657 	struct ice_hw *hw = &pf->hw;
1658 	struct device *dev;
1659 	int status;
1660 	u32 i;
1661 
1662 	dev = ice_pf_to_dev(pf);
1663 	if (ice_is_safe_mode(pf)) {
1664 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1665 			vsi_num);
1666 		return;
1667 	}
1668 	for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
1669 		const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
1670 
1671 		status = ice_add_rss_cfg(hw, vsi, cfg);
1672 		if (status)
1673 			dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
1674 				cfg->addl_hdrs, cfg->hash_flds,
1675 				cfg->hdr_type, cfg->symm);
1676 	}
1677 }
1678 
1679 /**
1680  * ice_pf_state_is_nominal - checks the PF for nominal state
1681  * @pf: pointer to PF to check
1682  *
1683  * Check the PF's state for a collection of bits that would indicate
1684  * the PF is in a state that would inhibit normal operation for
1685  * driver functionality.
1686  *
1687  * Returns true if PF is in a nominal state, false otherwise
1688  */
ice_pf_state_is_nominal(struct ice_pf * pf)1689 bool ice_pf_state_is_nominal(struct ice_pf *pf)
1690 {
1691 	DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1692 
1693 	if (!pf)
1694 		return false;
1695 
1696 	bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
1697 	if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1698 		return false;
1699 
1700 	return true;
1701 }
1702 
1703 #define ICE_FW_MODE_REC_M BIT(1)
ice_is_recovery_mode(struct ice_hw * hw)1704 bool ice_is_recovery_mode(struct ice_hw *hw)
1705 {
1706 	return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
1707 }
1708 
1709 /**
1710  * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1711  * @vsi: the VSI to be updated
1712  */
ice_update_eth_stats(struct ice_vsi * vsi)1713 void ice_update_eth_stats(struct ice_vsi *vsi)
1714 {
1715 	struct ice_eth_stats *prev_es, *cur_es;
1716 	struct ice_hw *hw = &vsi->back->hw;
1717 	struct ice_pf *pf = vsi->back;
1718 	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
1719 
1720 	prev_es = &vsi->eth_stats_prev;
1721 	cur_es = &vsi->eth_stats;
1722 
1723 	if (ice_is_reset_in_progress(pf->state))
1724 		vsi->stat_offsets_loaded = false;
1725 
1726 	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1727 			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1728 
1729 	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1730 			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1731 
1732 	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1733 			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1734 
1735 	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1736 			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1737 
1738 	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1739 			  &prev_es->rx_discards, &cur_es->rx_discards);
1740 
1741 	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1742 			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1743 
1744 	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1745 			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1746 
1747 	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1748 			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1749 
1750 	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1751 			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1752 
1753 	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1754 			  &prev_es->tx_errors, &cur_es->tx_errors);
1755 
1756 	vsi->stat_offsets_loaded = true;
1757 }
1758 
1759 /**
1760  * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1761  * @hw: HW pointer
1762  * @pf_q: index of the Rx queue in the PF's queue space
1763  * @rxdid: flexible descriptor RXDID
1764  * @prio: priority for the RXDID for this queue
1765  * @ena_ts: true to enable timestamp and false to disable timestamp
1766  */
1767 void
ice_write_qrxflxp_cntxt(struct ice_hw * hw,u16 pf_q,u32 rxdid,u32 prio,bool ena_ts)1768 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
1769 			bool ena_ts)
1770 {
1771 	int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1772 
1773 	/* clear any previous values */
1774 	regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1775 		    QRXFLXP_CNTXT_RXDID_PRIO_M |
1776 		    QRXFLXP_CNTXT_TS_M);
1777 
1778 	regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid);
1779 	regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio);
1780 
1781 	if (ena_ts)
1782 		/* Enable TimeSync on this queue */
1783 		regval |= QRXFLXP_CNTXT_TS_M;
1784 
1785 	wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1786 }
1787 
1788 /**
1789  * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1790  * @intrl: interrupt rate limit in usecs
1791  * @gran: interrupt rate limit granularity in usecs
1792  *
1793  * This function converts a decimal interrupt rate limit in usecs to the format
1794  * expected by firmware.
1795  */
ice_intrl_usec_to_reg(u8 intrl,u8 gran)1796 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1797 {
1798 	u32 val = intrl / gran;
1799 
1800 	if (val)
1801 		return val | GLINT_RATE_INTRL_ENA_M;
1802 	return 0;
1803 }
1804 
1805 /**
1806  * ice_write_intrl - write throttle rate limit to interrupt specific register
1807  * @q_vector: pointer to interrupt specific structure
1808  * @intrl: throttle rate limit in microseconds to write
1809  */
ice_write_intrl(struct ice_q_vector * q_vector,u8 intrl)1810 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
1811 {
1812 	struct ice_hw *hw = &q_vector->vsi->back->hw;
1813 
1814 	wr32(hw, GLINT_RATE(q_vector->reg_idx),
1815 	     ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
1816 }
1817 
ice_pull_qvec_from_rc(struct ice_ring_container * rc)1818 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
1819 {
1820 	switch (rc->type) {
1821 	case ICE_RX_CONTAINER:
1822 		if (rc->rx_ring)
1823 			return rc->rx_ring->q_vector;
1824 		break;
1825 	case ICE_TX_CONTAINER:
1826 		if (rc->tx_ring)
1827 			return rc->tx_ring->q_vector;
1828 		break;
1829 	default:
1830 		break;
1831 	}
1832 
1833 	return NULL;
1834 }
1835 
1836 /**
1837  * __ice_write_itr - write throttle rate to register
1838  * @q_vector: pointer to interrupt data structure
1839  * @rc: pointer to ring container
1840  * @itr: throttle rate in microseconds to write
1841  */
__ice_write_itr(struct ice_q_vector * q_vector,struct ice_ring_container * rc,u16 itr)1842 static void __ice_write_itr(struct ice_q_vector *q_vector,
1843 			    struct ice_ring_container *rc, u16 itr)
1844 {
1845 	struct ice_hw *hw = &q_vector->vsi->back->hw;
1846 
1847 	wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1848 	     ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
1849 }
1850 
1851 /**
1852  * ice_write_itr - write throttle rate to queue specific register
1853  * @rc: pointer to ring container
1854  * @itr: throttle rate in microseconds to write
1855  */
ice_write_itr(struct ice_ring_container * rc,u16 itr)1856 void ice_write_itr(struct ice_ring_container *rc, u16 itr)
1857 {
1858 	struct ice_q_vector *q_vector;
1859 
1860 	q_vector = ice_pull_qvec_from_rc(rc);
1861 	if (!q_vector)
1862 		return;
1863 
1864 	__ice_write_itr(q_vector, rc, itr);
1865 }
1866 
1867 /**
1868  * ice_set_q_vector_intrl - set up interrupt rate limiting
1869  * @q_vector: the vector to be configured
1870  *
1871  * Interrupt rate limiting is local to the vector, not per-queue so we must
1872  * detect if either ring container has dynamic moderation enabled to decide
1873  * what to set the interrupt rate limit to via INTRL settings. In the case that
1874  * dynamic moderation is disabled on both, write the value with the cached
1875  * setting to make sure INTRL register matches the user visible value.
1876  */
ice_set_q_vector_intrl(struct ice_q_vector * q_vector)1877 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
1878 {
1879 	if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
1880 		/* in the case of dynamic enabled, cap each vector to no more
1881 		 * than (4 us) 250,000 ints/sec, which allows low latency
1882 		 * but still less than 500,000 interrupts per second, which
1883 		 * reduces CPU a bit in the case of the lowest latency
1884 		 * setting. The 4 here is a value in microseconds.
1885 		 */
1886 		ice_write_intrl(q_vector, 4);
1887 	} else {
1888 		ice_write_intrl(q_vector, q_vector->intrl);
1889 	}
1890 }
1891 
1892 /**
1893  * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1894  * @vsi: the VSI being configured
1895  *
1896  * This configures MSIX mode interrupts for the PF VSI, and should not be used
1897  * for the VF VSI.
1898  */
ice_vsi_cfg_msix(struct ice_vsi * vsi)1899 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1900 {
1901 	struct ice_pf *pf = vsi->back;
1902 	struct ice_hw *hw = &pf->hw;
1903 	u16 txq = 0, rxq = 0;
1904 	int i, q;
1905 
1906 	ice_for_each_q_vector(vsi, i) {
1907 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1908 		u16 reg_idx = q_vector->reg_idx;
1909 
1910 		ice_cfg_itr(hw, q_vector);
1911 
1912 		/* Both Transmit Queue Interrupt Cause Control register
1913 		 * and Receive Queue Interrupt Cause control register
1914 		 * expects MSIX_INDX field to be the vector index
1915 		 * within the function space and not the absolute
1916 		 * vector index across PF or across device.
1917 		 * For SR-IOV VF VSIs queue vector index always starts
1918 		 * with 1 since first vector index(0) is used for OICR
1919 		 * in VF space. Since VMDq and other PF VSIs are within
1920 		 * the PF function space, use the vector index that is
1921 		 * tracked for this PF.
1922 		 */
1923 		for (q = 0; q < q_vector->num_ring_tx; q++) {
1924 			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1925 					      q_vector->tx.itr_idx);
1926 			txq++;
1927 		}
1928 
1929 		for (q = 0; q < q_vector->num_ring_rx; q++) {
1930 			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1931 					      q_vector->rx.itr_idx);
1932 			rxq++;
1933 		}
1934 	}
1935 }
1936 
1937 /**
1938  * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
1939  * @vsi: the VSI whose rings are to be enabled
1940  *
1941  * Returns 0 on success and a negative value on error
1942  */
ice_vsi_start_all_rx_rings(struct ice_vsi * vsi)1943 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
1944 {
1945 	return ice_vsi_ctrl_all_rx_rings(vsi, true);
1946 }
1947 
1948 /**
1949  * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
1950  * @vsi: the VSI whose rings are to be disabled
1951  *
1952  * Returns 0 on success and a negative value on error
1953  */
ice_vsi_stop_all_rx_rings(struct ice_vsi * vsi)1954 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1955 {
1956 	return ice_vsi_ctrl_all_rx_rings(vsi, false);
1957 }
1958 
1959 /**
1960  * ice_vsi_stop_tx_rings - Disable Tx rings
1961  * @vsi: the VSI being configured
1962  * @rst_src: reset source
1963  * @rel_vmvf_num: Relative ID of VF/VM
1964  * @rings: Tx ring array to be stopped
1965  * @count: number of Tx ring array elements
1966  */
1967 static int
ice_vsi_stop_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num,struct ice_tx_ring ** rings,u16 count)1968 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1969 		      u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
1970 {
1971 	u16 q_idx;
1972 
1973 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1974 		return -EINVAL;
1975 
1976 	for (q_idx = 0; q_idx < count; q_idx++) {
1977 		struct ice_txq_meta txq_meta = { };
1978 		int status;
1979 
1980 		if (!rings || !rings[q_idx])
1981 			return -EINVAL;
1982 
1983 		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
1984 		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
1985 					      rings[q_idx], &txq_meta);
1986 
1987 		if (status)
1988 			return status;
1989 	}
1990 
1991 	return 0;
1992 }
1993 
1994 /**
1995  * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
1996  * @vsi: the VSI being configured
1997  * @rst_src: reset source
1998  * @rel_vmvf_num: Relative ID of VF/VM
1999  */
2000 int
ice_vsi_stop_lan_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num)2001 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2002 			  u16 rel_vmvf_num)
2003 {
2004 	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
2005 }
2006 
2007 /**
2008  * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2009  * @vsi: the VSI being configured
2010  */
ice_vsi_stop_xdp_tx_rings(struct ice_vsi * vsi)2011 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2012 {
2013 	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2014 }
2015 
2016 /**
2017  * ice_vsi_is_rx_queue_active
2018  * @vsi: the VSI being configured
2019  *
2020  * Return true if at least one queue is active.
2021  */
ice_vsi_is_rx_queue_active(struct ice_vsi * vsi)2022 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
2023 {
2024 	struct ice_pf *pf = vsi->back;
2025 	struct ice_hw *hw = &pf->hw;
2026 	int i;
2027 
2028 	ice_for_each_rxq(vsi, i) {
2029 		u32 rx_reg;
2030 		int pf_q;
2031 
2032 		pf_q = vsi->rxq_map[i];
2033 		rx_reg = rd32(hw, QRX_CTRL(pf_q));
2034 		if (rx_reg & QRX_CTRL_QENA_STAT_M)
2035 			return true;
2036 	}
2037 
2038 	return false;
2039 }
2040 
ice_vsi_set_tc_cfg(struct ice_vsi * vsi)2041 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2042 {
2043 	if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2044 		vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2045 		vsi->tc_cfg.numtc = 1;
2046 		return;
2047 	}
2048 
2049 	/* set VSI TC information based on DCB config */
2050 	ice_vsi_set_dcb_tc_cfg(vsi);
2051 }
2052 
2053 /**
2054  * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2055  * @vsi: the VSI being configured
2056  * @tx: bool to determine Tx or Rx rule
2057  * @create: bool to determine create or remove Rule
2058  */
ice_cfg_sw_lldp(struct ice_vsi * vsi,bool tx,bool create)2059 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2060 {
2061 	int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
2062 			enum ice_sw_fwd_act_type act);
2063 	struct ice_pf *pf = vsi->back;
2064 	struct device *dev;
2065 	int status;
2066 
2067 	dev = ice_pf_to_dev(pf);
2068 	eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2069 
2070 	if (tx) {
2071 		status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2072 				  ICE_DROP_PACKET);
2073 	} else {
2074 		if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
2075 			status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
2076 							  create);
2077 		} else {
2078 			status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2079 					  ICE_FWD_TO_VSI);
2080 		}
2081 	}
2082 
2083 	if (status)
2084 		dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
2085 			create ? "adding" : "removing", tx ? "TX" : "RX",
2086 			vsi->vsi_num, status);
2087 }
2088 
2089 /**
2090  * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2091  * @vsi: pointer to the VSI
2092  *
2093  * This function will allocate new scheduler aggregator now if needed and will
2094  * move specified VSI into it.
2095  */
ice_set_agg_vsi(struct ice_vsi * vsi)2096 static void ice_set_agg_vsi(struct ice_vsi *vsi)
2097 {
2098 	struct device *dev = ice_pf_to_dev(vsi->back);
2099 	struct ice_agg_node *agg_node_iter = NULL;
2100 	u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2101 	struct ice_agg_node *agg_node = NULL;
2102 	int node_offset, max_agg_nodes = 0;
2103 	struct ice_port_info *port_info;
2104 	struct ice_pf *pf = vsi->back;
2105 	u32 agg_node_id_start = 0;
2106 	int status;
2107 
2108 	/* create (as needed) scheduler aggregator node and move VSI into
2109 	 * corresponding aggregator node
2110 	 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2111 	 * - VF aggregator nodes will contain VF VSI
2112 	 */
2113 	port_info = pf->hw.port_info;
2114 	if (!port_info)
2115 		return;
2116 
2117 	switch (vsi->type) {
2118 	case ICE_VSI_CTRL:
2119 	case ICE_VSI_CHNL:
2120 	case ICE_VSI_LB:
2121 	case ICE_VSI_PF:
2122 	case ICE_VSI_SF:
2123 		max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2124 		agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2125 		agg_node_iter = &pf->pf_agg_node[0];
2126 		break;
2127 	case ICE_VSI_VF:
2128 		/* user can create 'n' VFs on a given PF, but since max children
2129 		 * per aggregator node can be only 64. Following code handles
2130 		 * aggregator(s) for VF VSIs, either selects a agg_node which
2131 		 * was already created provided num_vsis < 64, otherwise
2132 		 * select next available node, which will be created
2133 		 */
2134 		max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2135 		agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2136 		agg_node_iter = &pf->vf_agg_node[0];
2137 		break;
2138 	default:
2139 		/* other VSI type, handle later if needed */
2140 		dev_dbg(dev, "unexpected VSI type %s\n",
2141 			ice_vsi_type_str(vsi->type));
2142 		return;
2143 	}
2144 
2145 	/* find the appropriate aggregator node */
2146 	for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2147 		/* see if we can find space in previously created
2148 		 * node if num_vsis < 64, otherwise skip
2149 		 */
2150 		if (agg_node_iter->num_vsis &&
2151 		    agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2152 			agg_node_iter++;
2153 			continue;
2154 		}
2155 
2156 		if (agg_node_iter->valid &&
2157 		    agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2158 			agg_id = agg_node_iter->agg_id;
2159 			agg_node = agg_node_iter;
2160 			break;
2161 		}
2162 
2163 		/* find unclaimed agg_id */
2164 		if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2165 			agg_id = node_offset + agg_node_id_start;
2166 			agg_node = agg_node_iter;
2167 			break;
2168 		}
2169 		/* move to next agg_node */
2170 		agg_node_iter++;
2171 	}
2172 
2173 	if (!agg_node)
2174 		return;
2175 
2176 	/* if selected aggregator node was not created, create it */
2177 	if (!agg_node->valid) {
2178 		status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2179 				     (u8)vsi->tc_cfg.ena_tc);
2180 		if (status) {
2181 			dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2182 				agg_id);
2183 			return;
2184 		}
2185 		/* aggregator node is created, store the needed info */
2186 		agg_node->valid = true;
2187 		agg_node->agg_id = agg_id;
2188 	}
2189 
2190 	/* move VSI to corresponding aggregator node */
2191 	status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2192 				     (u8)vsi->tc_cfg.ena_tc);
2193 	if (status) {
2194 		dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2195 			vsi->idx, agg_id);
2196 		return;
2197 	}
2198 
2199 	/* keep active children count for aggregator node */
2200 	agg_node->num_vsis++;
2201 
2202 	/* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2203 	 * to aggregator node
2204 	 */
2205 	vsi->agg_node = agg_node;
2206 	dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2207 		vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2208 		vsi->agg_node->num_vsis);
2209 }
2210 
ice_vsi_cfg_tc_lan(struct ice_pf * pf,struct ice_vsi * vsi)2211 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
2212 {
2213 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2214 	struct device *dev = ice_pf_to_dev(pf);
2215 	int ret, i;
2216 
2217 	/* configure VSI nodes based on number of queues and TC's */
2218 	ice_for_each_traffic_class(i) {
2219 		if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2220 			continue;
2221 
2222 		if (vsi->type == ICE_VSI_CHNL) {
2223 			if (!vsi->alloc_txq && vsi->num_txq)
2224 				max_txqs[i] = vsi->num_txq;
2225 			else
2226 				max_txqs[i] = pf->num_lan_tx;
2227 		} else {
2228 			max_txqs[i] = vsi->alloc_txq;
2229 		}
2230 
2231 		if (vsi->type == ICE_VSI_PF)
2232 			max_txqs[i] += vsi->num_xdp_txq;
2233 	}
2234 
2235 	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2236 	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2237 			      max_txqs);
2238 	if (ret) {
2239 		dev_err(dev, "VSI %d failed lan queue config, error %d\n",
2240 			vsi->vsi_num, ret);
2241 		return ret;
2242 	}
2243 
2244 	return 0;
2245 }
2246 
2247 /**
2248  * ice_vsi_cfg_def - configure default VSI based on the type
2249  * @vsi: pointer to VSI
2250  */
ice_vsi_cfg_def(struct ice_vsi * vsi)2251 static int ice_vsi_cfg_def(struct ice_vsi *vsi)
2252 {
2253 	struct device *dev = ice_pf_to_dev(vsi->back);
2254 	struct ice_pf *pf = vsi->back;
2255 	int ret;
2256 
2257 	vsi->vsw = pf->first_sw;
2258 
2259 	ret = ice_vsi_alloc_def(vsi, vsi->ch);
2260 	if (ret)
2261 		return ret;
2262 
2263 	/* allocate memory for Tx/Rx ring stat pointers */
2264 	ret = ice_vsi_alloc_stat_arrays(vsi);
2265 	if (ret)
2266 		goto unroll_vsi_alloc;
2267 
2268 	ice_alloc_fd_res(vsi);
2269 
2270 	ret = ice_vsi_get_qs(vsi);
2271 	if (ret) {
2272 		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2273 			vsi->idx);
2274 		goto unroll_vsi_alloc_stat;
2275 	}
2276 
2277 	/* set RSS capabilities */
2278 	ice_vsi_set_rss_params(vsi);
2279 
2280 	/* set TC configuration */
2281 	ice_vsi_set_tc_cfg(vsi);
2282 
2283 	/* create the VSI */
2284 	ret = ice_vsi_init(vsi, vsi->flags);
2285 	if (ret)
2286 		goto unroll_get_qs;
2287 
2288 	ice_vsi_init_vlan_ops(vsi);
2289 
2290 	switch (vsi->type) {
2291 	case ICE_VSI_CTRL:
2292 	case ICE_VSI_SF:
2293 	case ICE_VSI_PF:
2294 		ret = ice_vsi_alloc_q_vectors(vsi);
2295 		if (ret)
2296 			goto unroll_vsi_init;
2297 
2298 		ret = ice_vsi_alloc_rings(vsi);
2299 		if (ret)
2300 			goto unroll_vector_base;
2301 
2302 		ret = ice_vsi_alloc_ring_stats(vsi);
2303 		if (ret)
2304 			goto unroll_vector_base;
2305 
2306 		if (ice_is_xdp_ena_vsi(vsi)) {
2307 			ret = ice_vsi_determine_xdp_res(vsi);
2308 			if (ret)
2309 				goto unroll_vector_base;
2310 			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
2311 						    ICE_XDP_CFG_PART);
2312 			if (ret)
2313 				goto unroll_vector_base;
2314 		}
2315 
2316 		ice_vsi_map_rings_to_vectors(vsi);
2317 
2318 		vsi->stat_offsets_loaded = false;
2319 
2320 		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2321 		if (vsi->type != ICE_VSI_CTRL)
2322 			/* Do not exit if configuring RSS had an issue, at
2323 			 * least receive traffic on first queue. Hence no
2324 			 * need to capture return value
2325 			 */
2326 			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2327 				ice_vsi_cfg_rss_lut_key(vsi);
2328 				ice_vsi_set_rss_flow_fld(vsi);
2329 			}
2330 		ice_init_arfs(vsi);
2331 		break;
2332 	case ICE_VSI_CHNL:
2333 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2334 			ice_vsi_cfg_rss_lut_key(vsi);
2335 			ice_vsi_set_rss_flow_fld(vsi);
2336 		}
2337 		break;
2338 	case ICE_VSI_VF:
2339 		/* VF driver will take care of creating netdev for this type and
2340 		 * map queues to vectors through Virtchnl, PF driver only
2341 		 * creates a VSI and corresponding structures for bookkeeping
2342 		 * purpose
2343 		 */
2344 		ret = ice_vsi_alloc_q_vectors(vsi);
2345 		if (ret)
2346 			goto unroll_vsi_init;
2347 
2348 		ret = ice_vsi_alloc_rings(vsi);
2349 		if (ret)
2350 			goto unroll_alloc_q_vector;
2351 
2352 		ret = ice_vsi_alloc_ring_stats(vsi);
2353 		if (ret)
2354 			goto unroll_vector_base;
2355 
2356 		vsi->stat_offsets_loaded = false;
2357 
2358 		/* Do not exit if configuring RSS had an issue, at least
2359 		 * receive traffic on first queue. Hence no need to capture
2360 		 * return value
2361 		 */
2362 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2363 			ice_vsi_cfg_rss_lut_key(vsi);
2364 			ice_vsi_set_vf_rss_flow_fld(vsi);
2365 		}
2366 		break;
2367 	case ICE_VSI_LB:
2368 		ret = ice_vsi_alloc_rings(vsi);
2369 		if (ret)
2370 			goto unroll_vsi_init;
2371 
2372 		ret = ice_vsi_alloc_ring_stats(vsi);
2373 		if (ret)
2374 			goto unroll_vector_base;
2375 
2376 		break;
2377 	default:
2378 		/* clean up the resources and exit */
2379 		ret = -EINVAL;
2380 		goto unroll_vsi_init;
2381 	}
2382 
2383 	return 0;
2384 
2385 unroll_vector_base:
2386 	/* reclaim SW interrupts back to the common pool */
2387 unroll_alloc_q_vector:
2388 	ice_vsi_free_q_vectors(vsi);
2389 unroll_vsi_init:
2390 	ice_vsi_delete_from_hw(vsi);
2391 unroll_get_qs:
2392 	ice_vsi_put_qs(vsi);
2393 unroll_vsi_alloc_stat:
2394 	ice_vsi_free_stats(vsi);
2395 unroll_vsi_alloc:
2396 	ice_vsi_free_arrays(vsi);
2397 	return ret;
2398 }
2399 
2400 /**
2401  * ice_vsi_cfg - configure a previously allocated VSI
2402  * @vsi: pointer to VSI
2403  */
ice_vsi_cfg(struct ice_vsi * vsi)2404 int ice_vsi_cfg(struct ice_vsi *vsi)
2405 {
2406 	struct ice_pf *pf = vsi->back;
2407 	int ret;
2408 
2409 	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
2410 		return -EINVAL;
2411 
2412 	ret = ice_vsi_cfg_def(vsi);
2413 	if (ret)
2414 		return ret;
2415 
2416 	ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
2417 	if (ret)
2418 		ice_vsi_decfg(vsi);
2419 
2420 	if (vsi->type == ICE_VSI_CTRL) {
2421 		if (vsi->vf) {
2422 			WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2423 			vsi->vf->ctrl_vsi_idx = vsi->idx;
2424 		} else {
2425 			WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2426 			pf->ctrl_vsi_idx = vsi->idx;
2427 		}
2428 	}
2429 
2430 	return ret;
2431 }
2432 
2433 /**
2434  * ice_vsi_decfg - remove all VSI configuration
2435  * @vsi: pointer to VSI
2436  */
ice_vsi_decfg(struct ice_vsi * vsi)2437 void ice_vsi_decfg(struct ice_vsi *vsi)
2438 {
2439 	struct ice_pf *pf = vsi->back;
2440 	int err;
2441 
2442 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2443 	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
2444 	if (err)
2445 		dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
2446 			vsi->vsi_num, err);
2447 
2448 	if (vsi->xdp_rings)
2449 		/* return value check can be skipped here, it always returns
2450 		 * 0 if reset is in progress
2451 		 */
2452 		ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
2453 
2454 	ice_vsi_clear_rings(vsi);
2455 	ice_vsi_free_q_vectors(vsi);
2456 	ice_vsi_put_qs(vsi);
2457 	ice_vsi_free_arrays(vsi);
2458 
2459 	/* SR-IOV determines needed MSIX resources all at once instead of per
2460 	 * VSI since when VFs are spawned we know how many VFs there are and how
2461 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
2462 	 * cleared in the same manner.
2463 	 */
2464 
2465 	if (vsi->type == ICE_VSI_VF &&
2466 	    vsi->agg_node && vsi->agg_node->valid)
2467 		vsi->agg_node->num_vsis--;
2468 }
2469 
2470 /**
2471  * ice_vsi_setup - Set up a VSI by a given type
2472  * @pf: board private structure
2473  * @params: parameters to use when creating the VSI
2474  *
2475  * This allocates the sw VSI structure and its queue resources.
2476  *
2477  * Returns pointer to the successfully allocated and configured VSI sw struct on
2478  * success, NULL on failure.
2479  */
2480 struct ice_vsi *
ice_vsi_setup(struct ice_pf * pf,struct ice_vsi_cfg_params * params)2481 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
2482 {
2483 	struct device *dev = ice_pf_to_dev(pf);
2484 	struct ice_vsi *vsi;
2485 	int ret;
2486 
2487 	/* ice_vsi_setup can only initialize a new VSI, and we must have
2488 	 * a port_info structure for it.
2489 	 */
2490 	if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
2491 	    WARN_ON(!params->port_info))
2492 		return NULL;
2493 
2494 	vsi = ice_vsi_alloc(pf);
2495 	if (!vsi) {
2496 		dev_err(dev, "could not allocate VSI\n");
2497 		return NULL;
2498 	}
2499 
2500 	vsi->params = *params;
2501 	ret = ice_vsi_cfg(vsi);
2502 	if (ret)
2503 		goto err_vsi_cfg;
2504 
2505 	/* Add switch rule to drop all Tx Flow Control Frames, of look up
2506 	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2507 	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2508 	 * The rule is added once for PF VSI in order to create appropriate
2509 	 * recipe, since VSI/VSI list is ignored with drop action...
2510 	 * Also add rules to handle LLDP Tx packets.  Tx LLDP packets need to
2511 	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2512 	 * settings in the HW.
2513 	 */
2514 	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
2515 		ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2516 				 ICE_DROP_PACKET);
2517 		ice_cfg_sw_lldp(vsi, true, true);
2518 	}
2519 
2520 	if (!vsi->agg_node)
2521 		ice_set_agg_vsi(vsi);
2522 
2523 	return vsi;
2524 
2525 err_vsi_cfg:
2526 	ice_vsi_free(vsi);
2527 
2528 	return NULL;
2529 }
2530 
2531 /**
2532  * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2533  * @vsi: the VSI being cleaned up
2534  */
ice_vsi_release_msix(struct ice_vsi * vsi)2535 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2536 {
2537 	struct ice_pf *pf = vsi->back;
2538 	struct ice_hw *hw = &pf->hw;
2539 	u32 txq = 0;
2540 	u32 rxq = 0;
2541 	int i, q;
2542 
2543 	ice_for_each_q_vector(vsi, i) {
2544 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2545 
2546 		ice_write_intrl(q_vector, 0);
2547 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2548 			ice_write_itr(&q_vector->tx, 0);
2549 			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2550 			if (vsi->xdp_rings) {
2551 				u32 xdp_txq = txq + vsi->num_xdp_txq;
2552 
2553 				wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2554 			}
2555 			txq++;
2556 		}
2557 
2558 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2559 			ice_write_itr(&q_vector->rx, 0);
2560 			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2561 			rxq++;
2562 		}
2563 	}
2564 
2565 	ice_flush(hw);
2566 }
2567 
2568 /**
2569  * ice_vsi_free_irq - Free the IRQ association with the OS
2570  * @vsi: the VSI being configured
2571  */
ice_vsi_free_irq(struct ice_vsi * vsi)2572 void ice_vsi_free_irq(struct ice_vsi *vsi)
2573 {
2574 	struct ice_pf *pf = vsi->back;
2575 	int i;
2576 
2577 	if (!vsi->q_vectors || !vsi->irqs_ready)
2578 		return;
2579 
2580 	ice_vsi_release_msix(vsi);
2581 	if (vsi->type == ICE_VSI_VF)
2582 		return;
2583 
2584 	vsi->irqs_ready = false;
2585 	ice_free_cpu_rx_rmap(vsi);
2586 
2587 	ice_for_each_q_vector(vsi, i) {
2588 		int irq_num;
2589 
2590 		irq_num = vsi->q_vectors[i]->irq.virq;
2591 
2592 		/* free only the irqs that were actually requested */
2593 		if (!vsi->q_vectors[i] ||
2594 		    !(vsi->q_vectors[i]->num_ring_tx ||
2595 		      vsi->q_vectors[i]->num_ring_rx))
2596 			continue;
2597 
2598 		/* clear the affinity notifier in the IRQ descriptor */
2599 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2600 			irq_set_affinity_notifier(irq_num, NULL);
2601 
2602 		/* clear the affinity_hint in the IRQ descriptor */
2603 		irq_update_affinity_hint(irq_num, NULL);
2604 		synchronize_irq(irq_num);
2605 		devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2606 	}
2607 }
2608 
2609 /**
2610  * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2611  * @vsi: the VSI having resources freed
2612  */
ice_vsi_free_tx_rings(struct ice_vsi * vsi)2613 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2614 {
2615 	int i;
2616 
2617 	if (!vsi->tx_rings)
2618 		return;
2619 
2620 	ice_for_each_txq(vsi, i)
2621 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2622 			ice_free_tx_ring(vsi->tx_rings[i]);
2623 }
2624 
2625 /**
2626  * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2627  * @vsi: the VSI having resources freed
2628  */
ice_vsi_free_rx_rings(struct ice_vsi * vsi)2629 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2630 {
2631 	int i;
2632 
2633 	if (!vsi->rx_rings)
2634 		return;
2635 
2636 	ice_for_each_rxq(vsi, i)
2637 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2638 			ice_free_rx_ring(vsi->rx_rings[i]);
2639 }
2640 
2641 /**
2642  * ice_vsi_close - Shut down a VSI
2643  * @vsi: the VSI being shut down
2644  */
ice_vsi_close(struct ice_vsi * vsi)2645 void ice_vsi_close(struct ice_vsi *vsi)
2646 {
2647 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2648 		ice_down(vsi);
2649 
2650 	ice_vsi_clear_napi_queues(vsi);
2651 	ice_vsi_free_irq(vsi);
2652 	ice_vsi_free_tx_rings(vsi);
2653 	ice_vsi_free_rx_rings(vsi);
2654 }
2655 
2656 /**
2657  * ice_ena_vsi - resume a VSI
2658  * @vsi: the VSI being resume
2659  * @locked: is the rtnl_lock already held
2660  */
ice_ena_vsi(struct ice_vsi * vsi,bool locked)2661 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2662 {
2663 	int err = 0;
2664 
2665 	if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2666 		return 0;
2667 
2668 	clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2669 
2670 	if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
2671 			    vsi->type == ICE_VSI_SF)) {
2672 		if (netif_running(vsi->netdev)) {
2673 			if (!locked)
2674 				rtnl_lock();
2675 
2676 			err = ice_open_internal(vsi->netdev);
2677 
2678 			if (!locked)
2679 				rtnl_unlock();
2680 		}
2681 	} else if (vsi->type == ICE_VSI_CTRL) {
2682 		err = ice_vsi_open_ctrl(vsi);
2683 	}
2684 
2685 	return err;
2686 }
2687 
2688 /**
2689  * ice_dis_vsi - pause a VSI
2690  * @vsi: the VSI being paused
2691  * @locked: is the rtnl_lock already held
2692  */
ice_dis_vsi(struct ice_vsi * vsi,bool locked)2693 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2694 {
2695 	bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
2696 
2697 	set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2698 
2699 	if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
2700 			    vsi->type == ICE_VSI_SF)) {
2701 		if (netif_running(vsi->netdev)) {
2702 			if (!locked)
2703 				rtnl_lock();
2704 			already_down = test_bit(ICE_VSI_DOWN, vsi->state);
2705 			if (!already_down)
2706 				ice_vsi_close(vsi);
2707 
2708 			if (!locked)
2709 				rtnl_unlock();
2710 		} else if (!already_down) {
2711 			ice_vsi_close(vsi);
2712 		}
2713 	} else if (vsi->type == ICE_VSI_CTRL && !already_down) {
2714 		ice_vsi_close(vsi);
2715 	}
2716 }
2717 
2718 /**
2719  * ice_vsi_set_napi_queues - associate netdev queues with napi
2720  * @vsi: VSI pointer
2721  *
2722  * Associate queue[s] with napi for all vectors.
2723  * The caller must hold rtnl_lock.
2724  */
ice_vsi_set_napi_queues(struct ice_vsi * vsi)2725 void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
2726 {
2727 	struct net_device *netdev = vsi->netdev;
2728 	int q_idx, v_idx;
2729 
2730 	if (!netdev)
2731 		return;
2732 
2733 	ice_for_each_rxq(vsi, q_idx)
2734 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
2735 				     &vsi->rx_rings[q_idx]->q_vector->napi);
2736 
2737 	ice_for_each_txq(vsi, q_idx)
2738 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
2739 				     &vsi->tx_rings[q_idx]->q_vector->napi);
2740 	/* Also set the interrupt number for the NAPI */
2741 	ice_for_each_q_vector(vsi, v_idx) {
2742 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2743 
2744 		netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
2745 	}
2746 }
2747 
2748 /**
2749  * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
2750  * @vsi: VSI pointer
2751  *
2752  * Clear the association between all VSI queues queue[s] and napi.
2753  * The caller must hold rtnl_lock.
2754  */
ice_vsi_clear_napi_queues(struct ice_vsi * vsi)2755 void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
2756 {
2757 	struct net_device *netdev = vsi->netdev;
2758 	int q_idx;
2759 
2760 	if (!netdev)
2761 		return;
2762 
2763 	ice_for_each_txq(vsi, q_idx)
2764 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
2765 
2766 	ice_for_each_rxq(vsi, q_idx)
2767 		netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL);
2768 }
2769 
2770 /**
2771  * ice_napi_add - register NAPI handler for the VSI
2772  * @vsi: VSI for which NAPI handler is to be registered
2773  *
2774  * This function is only called in the driver's load path. Registering the NAPI
2775  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2776  * reset/rebuild, etc.)
2777  */
ice_napi_add(struct ice_vsi * vsi)2778 void ice_napi_add(struct ice_vsi *vsi)
2779 {
2780 	int v_idx;
2781 
2782 	if (!vsi->netdev)
2783 		return;
2784 
2785 	ice_for_each_q_vector(vsi, v_idx)
2786 		netif_napi_add_config(vsi->netdev,
2787 				      &vsi->q_vectors[v_idx]->napi,
2788 				      ice_napi_poll,
2789 				      v_idx);
2790 }
2791 
2792 /**
2793  * ice_vsi_release - Delete a VSI and free its resources
2794  * @vsi: the VSI being removed
2795  *
2796  * Returns 0 on success or < 0 on error
2797  */
ice_vsi_release(struct ice_vsi * vsi)2798 int ice_vsi_release(struct ice_vsi *vsi)
2799 {
2800 	struct ice_pf *pf;
2801 
2802 	if (!vsi->back)
2803 		return -ENODEV;
2804 	pf = vsi->back;
2805 
2806 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2807 		ice_rss_clean(vsi);
2808 
2809 	ice_vsi_close(vsi);
2810 
2811 	/* The Rx rule will only exist to remove if the LLDP FW
2812 	 * engine is currently stopped
2813 	 */
2814 	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
2815 	    !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2816 		ice_cfg_sw_lldp(vsi, false, false);
2817 
2818 	ice_vsi_decfg(vsi);
2819 
2820 	/* retain SW VSI data structure since it is needed to unregister and
2821 	 * free VSI netdev when PF is not in reset recovery pending state,\
2822 	 * for ex: during rmmod.
2823 	 */
2824 	if (!ice_is_reset_in_progress(pf->state))
2825 		ice_vsi_delete(vsi);
2826 
2827 	return 0;
2828 }
2829 
2830 /**
2831  * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2832  * @vsi: VSI connected with q_vectors
2833  * @coalesce: array of struct with stored coalesce
2834  *
2835  * Returns array size.
2836  */
2837 static int
ice_vsi_rebuild_get_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce)2838 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
2839 			     struct ice_coalesce_stored *coalesce)
2840 {
2841 	int i;
2842 
2843 	ice_for_each_q_vector(vsi, i) {
2844 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2845 
2846 		coalesce[i].itr_tx = q_vector->tx.itr_settings;
2847 		coalesce[i].itr_rx = q_vector->rx.itr_settings;
2848 		coalesce[i].intrl = q_vector->intrl;
2849 
2850 		if (i < vsi->num_txq)
2851 			coalesce[i].tx_valid = true;
2852 		if (i < vsi->num_rxq)
2853 			coalesce[i].rx_valid = true;
2854 	}
2855 
2856 	return vsi->num_q_vectors;
2857 }
2858 
2859 /**
2860  * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2861  * @vsi: VSI connected with q_vectors
2862  * @coalesce: pointer to array of struct with stored coalesce
2863  * @size: size of coalesce array
2864  *
2865  * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
2866  * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
2867  * to default value.
2868  */
2869 static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce,int size)2870 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
2871 			     struct ice_coalesce_stored *coalesce, int size)
2872 {
2873 	struct ice_ring_container *rc;
2874 	int i;
2875 
2876 	if ((size && !coalesce) || !vsi)
2877 		return;
2878 
2879 	/* There are a couple of cases that have to be handled here:
2880 	 *   1. The case where the number of queue vectors stays the same, but
2881 	 *      the number of Tx or Rx rings changes (the first for loop)
2882 	 *   2. The case where the number of queue vectors increased (the
2883 	 *      second for loop)
2884 	 */
2885 	for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
2886 		/* There are 2 cases to handle here and they are the same for
2887 		 * both Tx and Rx:
2888 		 *   if the entry was valid previously (coalesce[i].[tr]x_valid
2889 		 *   and the loop variable is less than the number of rings
2890 		 *   allocated, then write the previous values
2891 		 *
2892 		 *   if the entry was not valid previously, but the number of
2893 		 *   rings is less than are allocated (this means the number of
2894 		 *   rings increased from previously), then write out the
2895 		 *   values in the first element
2896 		 *
2897 		 *   Also, always write the ITR, even if in ITR_IS_DYNAMIC
2898 		 *   as there is no harm because the dynamic algorithm
2899 		 *   will just overwrite.
2900 		 */
2901 		if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
2902 			rc = &vsi->q_vectors[i]->rx;
2903 			rc->itr_settings = coalesce[i].itr_rx;
2904 			ice_write_itr(rc, rc->itr_setting);
2905 		} else if (i < vsi->alloc_rxq) {
2906 			rc = &vsi->q_vectors[i]->rx;
2907 			rc->itr_settings = coalesce[0].itr_rx;
2908 			ice_write_itr(rc, rc->itr_setting);
2909 		}
2910 
2911 		if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
2912 			rc = &vsi->q_vectors[i]->tx;
2913 			rc->itr_settings = coalesce[i].itr_tx;
2914 			ice_write_itr(rc, rc->itr_setting);
2915 		} else if (i < vsi->alloc_txq) {
2916 			rc = &vsi->q_vectors[i]->tx;
2917 			rc->itr_settings = coalesce[0].itr_tx;
2918 			ice_write_itr(rc, rc->itr_setting);
2919 		}
2920 
2921 		vsi->q_vectors[i]->intrl = coalesce[i].intrl;
2922 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
2923 	}
2924 
2925 	/* the number of queue vectors increased so write whatever is in
2926 	 * the first element
2927 	 */
2928 	for (; i < vsi->num_q_vectors; i++) {
2929 		/* transmit */
2930 		rc = &vsi->q_vectors[i]->tx;
2931 		rc->itr_settings = coalesce[0].itr_tx;
2932 		ice_write_itr(rc, rc->itr_setting);
2933 
2934 		/* receive */
2935 		rc = &vsi->q_vectors[i]->rx;
2936 		rc->itr_settings = coalesce[0].itr_rx;
2937 		ice_write_itr(rc, rc->itr_setting);
2938 
2939 		vsi->q_vectors[i]->intrl = coalesce[0].intrl;
2940 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
2941 	}
2942 }
2943 
2944 /**
2945  * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
2946  * @vsi: VSI pointer
2947  */
2948 static int
ice_vsi_realloc_stat_arrays(struct ice_vsi * vsi)2949 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
2950 {
2951 	u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
2952 	u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
2953 	struct ice_ring_stats **tx_ring_stats;
2954 	struct ice_ring_stats **rx_ring_stats;
2955 	struct ice_vsi_stats *vsi_stat;
2956 	struct ice_pf *pf = vsi->back;
2957 	u16 prev_txq = vsi->alloc_txq;
2958 	u16 prev_rxq = vsi->alloc_rxq;
2959 	int i;
2960 
2961 	vsi_stat = pf->vsi_stats[vsi->idx];
2962 
2963 	if (req_txq < prev_txq) {
2964 		for (i = req_txq; i < prev_txq; i++) {
2965 			if (vsi_stat->tx_ring_stats[i]) {
2966 				kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
2967 				WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
2968 			}
2969 		}
2970 	}
2971 
2972 	tx_ring_stats = vsi_stat->tx_ring_stats;
2973 	vsi_stat->tx_ring_stats =
2974 		krealloc_array(vsi_stat->tx_ring_stats, req_txq,
2975 			       sizeof(*vsi_stat->tx_ring_stats),
2976 			       GFP_KERNEL | __GFP_ZERO);
2977 	if (!vsi_stat->tx_ring_stats) {
2978 		vsi_stat->tx_ring_stats = tx_ring_stats;
2979 		return -ENOMEM;
2980 	}
2981 
2982 	if (req_rxq < prev_rxq) {
2983 		for (i = req_rxq; i < prev_rxq; i++) {
2984 			if (vsi_stat->rx_ring_stats[i]) {
2985 				kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
2986 				WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
2987 			}
2988 		}
2989 	}
2990 
2991 	rx_ring_stats = vsi_stat->rx_ring_stats;
2992 	vsi_stat->rx_ring_stats =
2993 		krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
2994 			       sizeof(*vsi_stat->rx_ring_stats),
2995 			       GFP_KERNEL | __GFP_ZERO);
2996 	if (!vsi_stat->rx_ring_stats) {
2997 		vsi_stat->rx_ring_stats = rx_ring_stats;
2998 		return -ENOMEM;
2999 	}
3000 
3001 	return 0;
3002 }
3003 
3004 /**
3005  * ice_vsi_rebuild - Rebuild VSI after reset
3006  * @vsi: VSI to be rebuild
3007  * @vsi_flags: flags used for VSI rebuild flow
3008  *
3009  * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
3010  * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
3011  *
3012  * Returns 0 on success and negative value on failure
3013  */
ice_vsi_rebuild(struct ice_vsi * vsi,u32 vsi_flags)3014 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3015 {
3016 	struct ice_coalesce_stored *coalesce;
3017 	int prev_num_q_vectors;
3018 	struct ice_pf *pf;
3019 	int ret;
3020 
3021 	if (!vsi)
3022 		return -EINVAL;
3023 
3024 	vsi->flags = vsi_flags;
3025 	pf = vsi->back;
3026 	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3027 		return -EINVAL;
3028 
3029 	mutex_lock(&vsi->xdp_state_lock);
3030 
3031 	ret = ice_vsi_realloc_stat_arrays(vsi);
3032 	if (ret)
3033 		goto unlock;
3034 
3035 	ice_vsi_decfg(vsi);
3036 	ret = ice_vsi_cfg_def(vsi);
3037 	if (ret)
3038 		goto unlock;
3039 
3040 	coalesce = kcalloc(vsi->num_q_vectors,
3041 			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
3042 	if (!coalesce) {
3043 		ret = -ENOMEM;
3044 		goto decfg;
3045 	}
3046 
3047 	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3048 
3049 	ret = ice_vsi_cfg_tc_lan(pf, vsi);
3050 	if (ret) {
3051 		if (vsi_flags & ICE_VSI_FLAG_INIT) {
3052 			ret = -EIO;
3053 			goto free_coalesce;
3054 		}
3055 
3056 		ret = ice_schedule_reset(pf, ICE_RESET_PFR);
3057 		goto free_coalesce;
3058 	}
3059 
3060 	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3061 	clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
3062 
3063 free_coalesce:
3064 	kfree(coalesce);
3065 decfg:
3066 	if (ret)
3067 		ice_vsi_decfg(vsi);
3068 unlock:
3069 	mutex_unlock(&vsi->xdp_state_lock);
3070 	return ret;
3071 }
3072 
3073 /**
3074  * ice_is_reset_in_progress - check for a reset in progress
3075  * @state: PF state field
3076  */
ice_is_reset_in_progress(unsigned long * state)3077 bool ice_is_reset_in_progress(unsigned long *state)
3078 {
3079 	return test_bit(ICE_RESET_OICR_RECV, state) ||
3080 	       test_bit(ICE_PFR_REQ, state) ||
3081 	       test_bit(ICE_CORER_REQ, state) ||
3082 	       test_bit(ICE_GLOBR_REQ, state);
3083 }
3084 
3085 /**
3086  * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3087  * @pf: pointer to the PF structure
3088  * @timeout: length of time to wait, in jiffies
3089  *
3090  * Wait (sleep) for a short time until the driver finishes cleaning up from
3091  * a device reset. The caller must be able to sleep. Use this to delay
3092  * operations that could fail while the driver is cleaning up after a device
3093  * reset.
3094  *
3095  * Returns 0 on success, -EBUSY if the reset is not finished within the
3096  * timeout, and -ERESTARTSYS if the thread was interrupted.
3097  */
ice_wait_for_reset(struct ice_pf * pf,unsigned long timeout)3098 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
3099 {
3100 	long ret;
3101 
3102 	ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
3103 					       !ice_is_reset_in_progress(pf->state),
3104 					       timeout);
3105 	if (ret < 0)
3106 		return ret;
3107 	else if (!ret)
3108 		return -EBUSY;
3109 	else
3110 		return 0;
3111 }
3112 
3113 /**
3114  * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3115  * @vsi: VSI being configured
3116  * @ctx: the context buffer returned from AQ VSI update command
3117  */
ice_vsi_update_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctx)3118 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3119 {
3120 	vsi->info.mapping_flags = ctx->info.mapping_flags;
3121 	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3122 	       sizeof(vsi->info.q_mapping));
3123 	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3124 	       sizeof(vsi->info.tc_mapping));
3125 }
3126 
3127 /**
3128  * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3129  * @vsi: the VSI being configured
3130  * @ena_tc: TC map to be enabled
3131  */
ice_vsi_cfg_netdev_tc(struct ice_vsi * vsi,u8 ena_tc)3132 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3133 {
3134 	struct net_device *netdev = vsi->netdev;
3135 	struct ice_pf *pf = vsi->back;
3136 	int numtc = vsi->tc_cfg.numtc;
3137 	struct ice_dcbx_cfg *dcbcfg;
3138 	u8 netdev_tc;
3139 	int i;
3140 
3141 	if (!netdev)
3142 		return;
3143 
3144 	/* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
3145 	if (vsi->type == ICE_VSI_CHNL)
3146 		return;
3147 
3148 	if (!ena_tc) {
3149 		netdev_reset_tc(netdev);
3150 		return;
3151 	}
3152 
3153 	if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3154 		numtc = vsi->all_numtc;
3155 
3156 	if (netdev_set_num_tc(netdev, numtc))
3157 		return;
3158 
3159 	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
3160 
3161 	ice_for_each_traffic_class(i)
3162 		if (vsi->tc_cfg.ena_tc & BIT(i))
3163 			netdev_set_tc_queue(netdev,
3164 					    vsi->tc_cfg.tc_info[i].netdev_tc,
3165 					    vsi->tc_cfg.tc_info[i].qcount_tx,
3166 					    vsi->tc_cfg.tc_info[i].qoffset);
3167 	/* setup TC queue map for CHNL TCs */
3168 	ice_for_each_chnl_tc(i) {
3169 		if (!(vsi->all_enatc & BIT(i)))
3170 			break;
3171 		if (!vsi->mqprio_qopt.qopt.count[i])
3172 			break;
3173 		netdev_set_tc_queue(netdev, i,
3174 				    vsi->mqprio_qopt.qopt.count[i],
3175 				    vsi->mqprio_qopt.qopt.offset[i]);
3176 	}
3177 
3178 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3179 		return;
3180 
3181 	for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3182 		u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3183 
3184 		/* Get the mapped netdev TC# for the UP */
3185 		netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3186 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
3187 	}
3188 }
3189 
3190 /**
3191  * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3192  * @vsi: the VSI being configured,
3193  * @ctxt: VSI context structure
3194  * @ena_tc: number of traffic classes to enable
3195  *
3196  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
3197  */
3198 static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt,u8 ena_tc)3199 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
3200 			   u8 ena_tc)
3201 {
3202 	u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
3203 	u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3204 	int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3205 	u16 new_txq, new_rxq;
3206 	u8 netdev_tc = 0;
3207 	int i;
3208 
3209 	vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3210 
3211 	pow = order_base_2(tc0_qcount);
3212 	qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
3213 	qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
3214 
3215 	ice_for_each_traffic_class(i) {
3216 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3217 			/* TC is not enabled */
3218 			vsi->tc_cfg.tc_info[i].qoffset = 0;
3219 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3220 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3221 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3222 			ctxt->info.tc_mapping[i] = 0;
3223 			continue;
3224 		}
3225 
3226 		offset = vsi->mqprio_qopt.qopt.offset[i];
3227 		qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3228 		qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3229 		vsi->tc_cfg.tc_info[i].qoffset = offset;
3230 		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3231 		vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3232 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3233 	}
3234 
3235 	if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3236 		ice_for_each_chnl_tc(i) {
3237 			if (!(vsi->all_enatc & BIT(i)))
3238 				continue;
3239 			offset = vsi->mqprio_qopt.qopt.offset[i];
3240 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3241 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3242 		}
3243 	}
3244 
3245 	new_txq = offset + qcount_tx;
3246 	if (new_txq > vsi->alloc_txq) {
3247 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3248 			new_txq, vsi->alloc_txq);
3249 		return -EINVAL;
3250 	}
3251 
3252 	new_rxq = offset + qcount_rx;
3253 	if (new_rxq > vsi->alloc_rxq) {
3254 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3255 			new_rxq, vsi->alloc_rxq);
3256 		return -EINVAL;
3257 	}
3258 
3259 	/* Set actual Tx/Rx queue pairs */
3260 	vsi->num_txq = new_txq;
3261 	vsi->num_rxq = new_rxq;
3262 
3263 	/* Setup queue TC[0].qmap for given VSI context */
3264 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
3265 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3266 	ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
3267 
3268 	/* Find queue count available for channel VSIs and starting offset
3269 	 * for channel VSIs
3270 	 */
3271 	if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3272 		vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3273 		vsi->next_base_q = tc0_qcount;
3274 	}
3275 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n",  vsi->num_txq);
3276 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n",  vsi->num_rxq);
3277 	dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3278 		vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3279 
3280 	return 0;
3281 }
3282 
3283 /**
3284  * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3285  * @vsi: VSI to be configured
3286  * @ena_tc: TC bitmap
3287  *
3288  * VSI queues expected to be quiesced before calling this function
3289  */
ice_vsi_cfg_tc(struct ice_vsi * vsi,u8 ena_tc)3290 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3291 {
3292 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3293 	struct ice_pf *pf = vsi->back;
3294 	struct ice_tc_cfg old_tc_cfg;
3295 	struct ice_vsi_ctx *ctx;
3296 	struct device *dev;
3297 	int i, ret = 0;
3298 	u8 num_tc = 0;
3299 
3300 	dev = ice_pf_to_dev(pf);
3301 	if (vsi->tc_cfg.ena_tc == ena_tc &&
3302 	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3303 		return 0;
3304 
3305 	ice_for_each_traffic_class(i) {
3306 		/* build bitmap of enabled TCs */
3307 		if (ena_tc & BIT(i))
3308 			num_tc++;
3309 		/* populate max_txqs per TC */
3310 		max_txqs[i] = vsi->alloc_txq;
3311 		/* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
3312 		 * zero for CHNL VSI, hence use num_txq instead as max_txqs
3313 		 */
3314 		if (vsi->type == ICE_VSI_CHNL &&
3315 		    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3316 			max_txqs[i] = vsi->num_txq;
3317 	}
3318 
3319 	memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3320 	vsi->tc_cfg.ena_tc = ena_tc;
3321 	vsi->tc_cfg.numtc = num_tc;
3322 
3323 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3324 	if (!ctx)
3325 		return -ENOMEM;
3326 
3327 	ctx->vf_num = 0;
3328 	ctx->info = vsi->info;
3329 
3330 	if (vsi->type == ICE_VSI_PF &&
3331 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3332 		ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
3333 	else
3334 		ret = ice_vsi_setup_q_map(vsi, ctx);
3335 
3336 	if (ret) {
3337 		memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3338 		goto out;
3339 	}
3340 
3341 	/* must to indicate which section of VSI context are being modified */
3342 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3343 	ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3344 	if (ret) {
3345 		dev_info(dev, "Failed VSI Update\n");
3346 		goto out;
3347 	}
3348 
3349 	if (vsi->type == ICE_VSI_PF &&
3350 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3351 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3352 	else
3353 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3354 				      vsi->tc_cfg.ena_tc, max_txqs);
3355 
3356 	if (ret) {
3357 		dev_err(dev, "VSI %d failed TC config, error %d\n",
3358 			vsi->vsi_num, ret);
3359 		goto out;
3360 	}
3361 	ice_vsi_update_q_map(vsi, ctx);
3362 	vsi->info.valid_sections = 0;
3363 
3364 	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3365 out:
3366 	kfree(ctx);
3367 	return ret;
3368 }
3369 
3370 /**
3371  * ice_update_ring_stats - Update ring statistics
3372  * @stats: stats to be updated
3373  * @pkts: number of processed packets
3374  * @bytes: number of processed bytes
3375  *
3376  * This function assumes that caller has acquired a u64_stats_sync lock.
3377  */
ice_update_ring_stats(struct ice_q_stats * stats,u64 pkts,u64 bytes)3378 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
3379 {
3380 	stats->bytes += bytes;
3381 	stats->pkts += pkts;
3382 }
3383 
3384 /**
3385  * ice_update_tx_ring_stats - Update Tx ring specific counters
3386  * @tx_ring: ring to update
3387  * @pkts: number of processed packets
3388  * @bytes: number of processed bytes
3389  */
ice_update_tx_ring_stats(struct ice_tx_ring * tx_ring,u64 pkts,u64 bytes)3390 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
3391 {
3392 	u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3393 	ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3394 	u64_stats_update_end(&tx_ring->ring_stats->syncp);
3395 }
3396 
3397 /**
3398  * ice_update_rx_ring_stats - Update Rx ring specific counters
3399  * @rx_ring: ring to update
3400  * @pkts: number of processed packets
3401  * @bytes: number of processed bytes
3402  */
ice_update_rx_ring_stats(struct ice_rx_ring * rx_ring,u64 pkts,u64 bytes)3403 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
3404 {
3405 	u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3406 	ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3407 	u64_stats_update_end(&rx_ring->ring_stats->syncp);
3408 }
3409 
3410 /**
3411  * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3412  * @pi: port info of the switch with default VSI
3413  *
3414  * Return true if the there is a single VSI in default forwarding VSI list
3415  */
ice_is_dflt_vsi_in_use(struct ice_port_info * pi)3416 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3417 {
3418 	bool exists = false;
3419 
3420 	ice_check_if_dflt_vsi(pi, 0, &exists);
3421 	return exists;
3422 }
3423 
3424 /**
3425  * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3426  * @vsi: VSI to compare against default forwarding VSI
3427  *
3428  * If this VSI passed in is the default forwarding VSI then return true, else
3429  * return false
3430  */
ice_is_vsi_dflt_vsi(struct ice_vsi * vsi)3431 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3432 {
3433 	return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3434 }
3435 
3436 /**
3437  * ice_set_dflt_vsi - set the default forwarding VSI
3438  * @vsi: VSI getting set as the default forwarding VSI on the switch
3439  *
3440  * If the VSI passed in is already the default VSI and it's enabled just return
3441  * success.
3442  *
3443  * Otherwise try to set the VSI passed in as the switch's default VSI and
3444  * return the result.
3445  */
ice_set_dflt_vsi(struct ice_vsi * vsi)3446 int ice_set_dflt_vsi(struct ice_vsi *vsi)
3447 {
3448 	struct device *dev;
3449 	int status;
3450 
3451 	if (!vsi)
3452 		return -EINVAL;
3453 
3454 	dev = ice_pf_to_dev(vsi->back);
3455 
3456 	if (ice_lag_is_switchdev_running(vsi->back)) {
3457 		dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
3458 			vsi->vsi_num);
3459 		return 0;
3460 	}
3461 
3462 	/* the VSI passed in is already the default VSI */
3463 	if (ice_is_vsi_dflt_vsi(vsi)) {
3464 		dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3465 			vsi->vsi_num);
3466 		return 0;
3467 	}
3468 
3469 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3470 	if (status) {
3471 		dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
3472 			vsi->vsi_num, status);
3473 		return status;
3474 	}
3475 
3476 	return 0;
3477 }
3478 
3479 /**
3480  * ice_clear_dflt_vsi - clear the default forwarding VSI
3481  * @vsi: VSI to remove from filter list
3482  *
3483  * If the switch has no default VSI or it's not enabled then return error.
3484  *
3485  * Otherwise try to clear the default VSI and return the result.
3486  */
ice_clear_dflt_vsi(struct ice_vsi * vsi)3487 int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3488 {
3489 	struct device *dev;
3490 	int status;
3491 
3492 	if (!vsi)
3493 		return -EINVAL;
3494 
3495 	dev = ice_pf_to_dev(vsi->back);
3496 
3497 	/* there is no default VSI configured */
3498 	if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3499 		return -ENODEV;
3500 
3501 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3502 				  ICE_FLTR_RX);
3503 	if (status) {
3504 		dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3505 			vsi->vsi_num, status);
3506 		return -EIO;
3507 	}
3508 
3509 	return 0;
3510 }
3511 
3512 /**
3513  * ice_get_link_speed_mbps - get link speed in Mbps
3514  * @vsi: the VSI whose link speed is being queried
3515  *
3516  * Return current VSI link speed and 0 if the speed is unknown.
3517  */
ice_get_link_speed_mbps(struct ice_vsi * vsi)3518 int ice_get_link_speed_mbps(struct ice_vsi *vsi)
3519 {
3520 	unsigned int link_speed;
3521 
3522 	link_speed = vsi->port_info->phy.link_info.link_speed;
3523 
3524 	return (int)ice_get_link_speed(fls(link_speed) - 1);
3525 }
3526 
3527 /**
3528  * ice_get_link_speed_kbps - get link speed in Kbps
3529  * @vsi: the VSI whose link speed is being queried
3530  *
3531  * Return current VSI link speed and 0 if the speed is unknown.
3532  */
ice_get_link_speed_kbps(struct ice_vsi * vsi)3533 int ice_get_link_speed_kbps(struct ice_vsi *vsi)
3534 {
3535 	int speed_mbps;
3536 
3537 	speed_mbps = ice_get_link_speed_mbps(vsi);
3538 
3539 	return speed_mbps * 1000;
3540 }
3541 
3542 /**
3543  * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3544  * @vsi: VSI to be configured
3545  * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
3546  *
3547  * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
3548  * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3549  * on TC 0.
3550  */
ice_set_min_bw_limit(struct ice_vsi * vsi,u64 min_tx_rate)3551 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
3552 {
3553 	struct ice_pf *pf = vsi->back;
3554 	struct device *dev;
3555 	int status;
3556 	int speed;
3557 
3558 	dev = ice_pf_to_dev(pf);
3559 	if (!vsi->port_info) {
3560 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3561 			vsi->idx, vsi->type);
3562 		return -EINVAL;
3563 	}
3564 
3565 	speed = ice_get_link_speed_kbps(vsi);
3566 	if (min_tx_rate > (u64)speed) {
3567 		dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3568 			min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3569 			speed);
3570 		return -EINVAL;
3571 	}
3572 
3573 	/* Configure min BW for VSI limit */
3574 	if (min_tx_rate) {
3575 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3576 						   ICE_MIN_BW, min_tx_rate);
3577 		if (status) {
3578 			dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
3579 				min_tx_rate, ice_vsi_type_str(vsi->type),
3580 				vsi->idx);
3581 			return status;
3582 		}
3583 
3584 		dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
3585 			min_tx_rate, ice_vsi_type_str(vsi->type));
3586 	} else {
3587 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3588 							vsi->idx, 0,
3589 							ICE_MIN_BW);
3590 		if (status) {
3591 			dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
3592 				ice_vsi_type_str(vsi->type), vsi->idx);
3593 			return status;
3594 		}
3595 
3596 		dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
3597 			ice_vsi_type_str(vsi->type), vsi->idx);
3598 	}
3599 
3600 	return 0;
3601 }
3602 
3603 /**
3604  * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3605  * @vsi: VSI to be configured
3606  * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
3607  *
3608  * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
3609  * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3610  * on TC 0.
3611  */
ice_set_max_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate)3612 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
3613 {
3614 	struct ice_pf *pf = vsi->back;
3615 	struct device *dev;
3616 	int status;
3617 	int speed;
3618 
3619 	dev = ice_pf_to_dev(pf);
3620 	if (!vsi->port_info) {
3621 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3622 			vsi->idx, vsi->type);
3623 		return -EINVAL;
3624 	}
3625 
3626 	speed = ice_get_link_speed_kbps(vsi);
3627 	if (max_tx_rate > (u64)speed) {
3628 		dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3629 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3630 			speed);
3631 		return -EINVAL;
3632 	}
3633 
3634 	/* Configure max BW for VSI limit */
3635 	if (max_tx_rate) {
3636 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3637 						   ICE_MAX_BW, max_tx_rate);
3638 		if (status) {
3639 			dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
3640 				max_tx_rate, ice_vsi_type_str(vsi->type),
3641 				vsi->idx);
3642 			return status;
3643 		}
3644 
3645 		dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
3646 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3647 	} else {
3648 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3649 							vsi->idx, 0,
3650 							ICE_MAX_BW);
3651 		if (status) {
3652 			dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
3653 				ice_vsi_type_str(vsi->type), vsi->idx);
3654 			return status;
3655 		}
3656 
3657 		dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
3658 			ice_vsi_type_str(vsi->type), vsi->idx);
3659 	}
3660 
3661 	return 0;
3662 }
3663 
3664 /**
3665  * ice_set_link - turn on/off physical link
3666  * @vsi: VSI to modify physical link on
3667  * @ena: turn on/off physical link
3668  */
ice_set_link(struct ice_vsi * vsi,bool ena)3669 int ice_set_link(struct ice_vsi *vsi, bool ena)
3670 {
3671 	struct device *dev = ice_pf_to_dev(vsi->back);
3672 	struct ice_port_info *pi = vsi->port_info;
3673 	struct ice_hw *hw = pi->hw;
3674 	int status;
3675 
3676 	if (vsi->type != ICE_VSI_PF)
3677 		return -EINVAL;
3678 
3679 	status = ice_aq_set_link_restart_an(pi, ena, NULL);
3680 
3681 	/* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
3682 	 * this is not a fatal error, so print a warning message and return
3683 	 * a success code. Return an error if FW returns an error code other
3684 	 * than ICE_AQ_RC_EMODE
3685 	 */
3686 	if (status == -EIO) {
3687 		if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3688 			dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
3689 				(ena ? "ON" : "OFF"), status,
3690 				ice_aq_str(hw->adminq.sq_last_status));
3691 	} else if (status) {
3692 		dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
3693 			(ena ? "ON" : "OFF"), status,
3694 			ice_aq_str(hw->adminq.sq_last_status));
3695 		return status;
3696 	}
3697 
3698 	return 0;
3699 }
3700 
3701 /**
3702  * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3703  * @vsi: VSI used to add VLAN filters
3704  *
3705  * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
3706  * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
3707  * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3708  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3709  *
3710  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3711  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3712  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3713  *
3714  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3715  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3716  * part of filtering.
3717  */
ice_vsi_add_vlan_zero(struct ice_vsi * vsi)3718 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3719 {
3720 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3721 	struct ice_vlan vlan;
3722 	int err;
3723 
3724 	vlan = ICE_VLAN(0, 0, 0);
3725 	err = vlan_ops->add_vlan(vsi, &vlan);
3726 	if (err && err != -EEXIST)
3727 		return err;
3728 
3729 	/* in SVM both VLAN 0 filters are identical */
3730 	if (!ice_is_dvm_ena(&vsi->back->hw))
3731 		return 0;
3732 
3733 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3734 	err = vlan_ops->add_vlan(vsi, &vlan);
3735 	if (err && err != -EEXIST)
3736 		return err;
3737 
3738 	return 0;
3739 }
3740 
3741 /**
3742  * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3743  * @vsi: VSI used to add VLAN filters
3744  *
3745  * Delete the VLAN 0 filters in the same manner that they were added in
3746  * ice_vsi_add_vlan_zero.
3747  */
ice_vsi_del_vlan_zero(struct ice_vsi * vsi)3748 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3749 {
3750 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3751 	struct ice_vlan vlan;
3752 	int err;
3753 
3754 	vlan = ICE_VLAN(0, 0, 0);
3755 	err = vlan_ops->del_vlan(vsi, &vlan);
3756 	if (err && err != -EEXIST)
3757 		return err;
3758 
3759 	/* in SVM both VLAN 0 filters are identical */
3760 	if (!ice_is_dvm_ena(&vsi->back->hw))
3761 		return 0;
3762 
3763 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3764 	err = vlan_ops->del_vlan(vsi, &vlan);
3765 	if (err && err != -EEXIST)
3766 		return err;
3767 
3768 	/* when deleting the last VLAN filter, make sure to disable the VLAN
3769 	 * promisc mode so the filter isn't left by accident
3770 	 */
3771 	return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3772 				    ICE_MCAST_VLAN_PROMISC_BITS, 0);
3773 }
3774 
3775 /**
3776  * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3777  * @vsi: VSI used to get the VLAN mode
3778  *
3779  * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
3780  * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
3781  */
ice_vsi_num_zero_vlans(struct ice_vsi * vsi)3782 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
3783 {
3784 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS	2
3785 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS	1
3786 	/* no VLAN 0 filter is created when a port VLAN is active */
3787 	if (vsi->type == ICE_VSI_VF) {
3788 		if (WARN_ON(!vsi->vf))
3789 			return 0;
3790 
3791 		if (ice_vf_is_port_vlan_ena(vsi->vf))
3792 			return 0;
3793 	}
3794 
3795 	if (ice_is_dvm_ena(&vsi->back->hw))
3796 		return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
3797 	else
3798 		return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
3799 }
3800 
3801 /**
3802  * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3803  * @vsi: VSI used to determine if any non-zero VLANs have been added
3804  */
ice_vsi_has_non_zero_vlans(struct ice_vsi * vsi)3805 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
3806 {
3807 	return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3808 }
3809 
3810 /**
3811  * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3812  * @vsi: VSI used to get the number of non-zero VLANs added
3813  */
ice_vsi_num_non_zero_vlans(struct ice_vsi * vsi)3814 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
3815 {
3816 	return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
3817 }
3818 
3819 /**
3820  * ice_is_feature_supported
3821  * @pf: pointer to the struct ice_pf instance
3822  * @f: feature enum to be checked
3823  *
3824  * returns true if feature is supported, false otherwise
3825  */
ice_is_feature_supported(struct ice_pf * pf,enum ice_feature f)3826 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
3827 {
3828 	if (f < 0 || f >= ICE_F_MAX)
3829 		return false;
3830 
3831 	return test_bit(f, pf->features);
3832 }
3833 
3834 /**
3835  * ice_set_feature_support
3836  * @pf: pointer to the struct ice_pf instance
3837  * @f: feature enum to set
3838  */
ice_set_feature_support(struct ice_pf * pf,enum ice_feature f)3839 void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
3840 {
3841 	if (f < 0 || f >= ICE_F_MAX)
3842 		return;
3843 
3844 	set_bit(f, pf->features);
3845 }
3846 
3847 /**
3848  * ice_clear_feature_support
3849  * @pf: pointer to the struct ice_pf instance
3850  * @f: feature enum to clear
3851  */
ice_clear_feature_support(struct ice_pf * pf,enum ice_feature f)3852 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
3853 {
3854 	if (f < 0 || f >= ICE_F_MAX)
3855 		return;
3856 
3857 	clear_bit(f, pf->features);
3858 }
3859 
3860 /**
3861  * ice_init_feature_support
3862  * @pf: pointer to the struct ice_pf instance
3863  *
3864  * called during init to setup supported feature
3865  */
ice_init_feature_support(struct ice_pf * pf)3866 void ice_init_feature_support(struct ice_pf *pf)
3867 {
3868 	switch (pf->hw.device_id) {
3869 	case ICE_DEV_ID_E810C_BACKPLANE:
3870 	case ICE_DEV_ID_E810C_QSFP:
3871 	case ICE_DEV_ID_E810C_SFP:
3872 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
3873 	case ICE_DEV_ID_E810_XXV_QSFP:
3874 	case ICE_DEV_ID_E810_XXV_SFP:
3875 		ice_set_feature_support(pf, ICE_F_DSCP);
3876 		if (ice_is_phy_rclk_in_netlist(&pf->hw))
3877 			ice_set_feature_support(pf, ICE_F_PHY_RCLK);
3878 		/* If we don't own the timer - don't enable other caps */
3879 		if (!ice_pf_src_tmr_owned(pf))
3880 			break;
3881 		if (ice_is_cgu_in_netlist(&pf->hw))
3882 			ice_set_feature_support(pf, ICE_F_CGU);
3883 		if (ice_is_clock_mux_in_netlist(&pf->hw))
3884 			ice_set_feature_support(pf, ICE_F_SMA_CTRL);
3885 		if (ice_gnss_is_gps_present(&pf->hw))
3886 			ice_set_feature_support(pf, ICE_F_GNSS);
3887 		break;
3888 	default:
3889 		break;
3890 	}
3891 
3892 	if (pf->hw.mac_type == ICE_MAC_E830)
3893 		ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
3894 }
3895 
3896 /**
3897  * ice_vsi_update_security - update security block in VSI
3898  * @vsi: pointer to VSI structure
3899  * @fill: function pointer to fill ctx
3900  */
3901 int
ice_vsi_update_security(struct ice_vsi * vsi,void (* fill)(struct ice_vsi_ctx *))3902 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
3903 {
3904 	struct ice_vsi_ctx ctx = { 0 };
3905 
3906 	ctx.info = vsi->info;
3907 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
3908 	fill(&ctx);
3909 
3910 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
3911 		return -ENODEV;
3912 
3913 	vsi->info = ctx.info;
3914 	return 0;
3915 }
3916 
3917 /**
3918  * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
3919  * @ctx: pointer to VSI ctx structure
3920  */
ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx * ctx)3921 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
3922 {
3923 	ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
3924 			       (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3925 				ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3926 }
3927 
3928 /**
3929  * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
3930  * @ctx: pointer to VSI ctx structure
3931  */
ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx * ctx)3932 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
3933 {
3934 	ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
3935 			       ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3936 				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3937 }
3938 
3939 /**
3940  * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
3941  * @vsi: pointer to VSI structure
3942  * @set: set or unset the bit
3943  */
3944 int
ice_vsi_update_local_lb(struct ice_vsi * vsi,bool set)3945 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
3946 {
3947 	struct ice_vsi_ctx ctx = {
3948 		.info	= vsi->info,
3949 	};
3950 
3951 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
3952 	if (set)
3953 		ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
3954 	else
3955 		ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
3956 
3957 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
3958 		return -ENODEV;
3959 
3960 	vsi->info = ctx.info;
3961 	return 0;
3962 }
3963