xref: /linux/drivers/net/ethernet/intel/ice/ice_base.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <net/xdp_sock_drv.h>
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8 #include "ice_sriov.h"
9 
10 /**
11  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
12  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
13  *
14  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
15  */
16 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
17 {
18 	unsigned int offset, i;
19 
20 	mutex_lock(qs_cfg->qs_mutex);
21 	offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
22 					    0, qs_cfg->q_count, 0);
23 	if (offset >= qs_cfg->pf_map_size) {
24 		mutex_unlock(qs_cfg->qs_mutex);
25 		return -ENOMEM;
26 	}
27 
28 	bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
29 	for (i = 0; i < qs_cfg->q_count; i++)
30 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
31 	mutex_unlock(qs_cfg->qs_mutex);
32 
33 	return 0;
34 }
35 
36 /**
37  * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
38  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
39  *
40  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
41  */
42 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
43 {
44 	unsigned int i, index = 0;
45 
46 	mutex_lock(qs_cfg->qs_mutex);
47 	for (i = 0; i < qs_cfg->q_count; i++) {
48 		index = find_next_zero_bit(qs_cfg->pf_map,
49 					   qs_cfg->pf_map_size, index);
50 		if (index >= qs_cfg->pf_map_size)
51 			goto err_scatter;
52 		set_bit(index, qs_cfg->pf_map);
53 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
54 	}
55 	mutex_unlock(qs_cfg->qs_mutex);
56 
57 	return 0;
58 err_scatter:
59 	for (index = 0; index < i; index++) {
60 		clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
61 		qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
62 	}
63 	mutex_unlock(qs_cfg->qs_mutex);
64 
65 	return -ENOMEM;
66 }
67 
68 /**
69  * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
70  * @pf: the PF being configured
71  * @pf_q: the PF queue
72  * @ena: enable or disable state of the queue
73  *
74  * This routine will wait for the given Rx queue of the PF to reach the
75  * enabled or disabled state.
76  * Returns -ETIMEDOUT in case of failing to reach the requested state after
77  * multiple retries; else will return 0 in case of success.
78  */
79 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
80 {
81 	int i;
82 
83 	for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
84 		if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
85 			      QRX_CTRL_QENA_STAT_M))
86 			return 0;
87 
88 		usleep_range(20, 40);
89 	}
90 
91 	return -ETIMEDOUT;
92 }
93 
94 /**
95  * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
96  * @vsi: the VSI being configured
97  * @v_idx: index of the vector in the VSI struct
98  *
99  * We allocate one q_vector and set default value for ITR setting associated
100  * with this q_vector. If allocation fails we return -ENOMEM.
101  */
102 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
103 {
104 	struct ice_pf *pf = vsi->back;
105 	struct ice_q_vector *q_vector;
106 	int err;
107 
108 	/* allocate q_vector */
109 	q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
110 	if (!q_vector)
111 		return -ENOMEM;
112 
113 	q_vector->vsi = vsi;
114 	q_vector->v_idx = v_idx;
115 	q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
116 	q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
117 	q_vector->tx.itr_mode = ITR_DYNAMIC;
118 	q_vector->rx.itr_mode = ITR_DYNAMIC;
119 	q_vector->tx.type = ICE_TX_CONTAINER;
120 	q_vector->rx.type = ICE_RX_CONTAINER;
121 	q_vector->irq.index = -ENOENT;
122 
123 	if (vsi->type == ICE_VSI_VF) {
124 		ice_calc_vf_reg_idx(vsi->vf, q_vector);
125 		goto out;
126 	} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
127 		struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
128 
129 		if (ctrl_vsi) {
130 			if (unlikely(!ctrl_vsi->q_vectors)) {
131 				err = -ENOENT;
132 				goto err_free_q_vector;
133 			}
134 
135 			q_vector->irq = ctrl_vsi->q_vectors[0]->irq;
136 			goto skip_alloc;
137 		}
138 	}
139 
140 	q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc);
141 	if (q_vector->irq.index < 0) {
142 		err = -ENOMEM;
143 		goto err_free_q_vector;
144 	}
145 
146 skip_alloc:
147 	q_vector->reg_idx = q_vector->irq.index;
148 	q_vector->vf_reg_idx = q_vector->irq.index;
149 
150 	/* This will not be called in the driver load path because the netdev
151 	 * will not be created yet. All other cases with register the NAPI
152 	 * handler here (i.e. resume, reset/rebuild, etc.)
153 	 */
154 	if (vsi->netdev)
155 		netif_napi_add_config(vsi->netdev, &q_vector->napi,
156 				      ice_napi_poll, v_idx);
157 
158 out:
159 	/* tie q_vector and VSI together */
160 	vsi->q_vectors[v_idx] = q_vector;
161 
162 	return 0;
163 
164 err_free_q_vector:
165 	kfree(q_vector);
166 
167 	return err;
168 }
169 
170 /**
171  * ice_free_q_vector - Free memory allocated for a specific interrupt vector
172  * @vsi: VSI having the memory freed
173  * @v_idx: index of the vector to be freed
174  */
175 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
176 {
177 	struct ice_q_vector *q_vector;
178 	struct ice_pf *pf = vsi->back;
179 	struct ice_tx_ring *tx_ring;
180 	struct ice_rx_ring *rx_ring;
181 	struct device *dev;
182 
183 	dev = ice_pf_to_dev(pf);
184 	if (!vsi->q_vectors[v_idx]) {
185 		dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
186 		return;
187 	}
188 	q_vector = vsi->q_vectors[v_idx];
189 
190 	ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx)
191 		tx_ring->q_vector = NULL;
192 
193 	ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx)
194 		rx_ring->q_vector = NULL;
195 
196 	/* only VSI with an associated netdev is set up with NAPI */
197 	if (vsi->netdev)
198 		netif_napi_del(&q_vector->napi);
199 
200 	/* release MSIX interrupt if q_vector had interrupt allocated */
201 	if (q_vector->irq.index < 0)
202 		goto free_q_vector;
203 
204 	/* only free last VF ctrl vsi interrupt */
205 	if (vsi->type == ICE_VSI_CTRL && vsi->vf &&
206 	    ice_get_vf_ctrl_vsi(pf, vsi))
207 		goto free_q_vector;
208 
209 	ice_free_irq(pf, q_vector->irq);
210 
211 free_q_vector:
212 	kfree(q_vector);
213 	vsi->q_vectors[v_idx] = NULL;
214 }
215 
216 /**
217  * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
218  * @hw: board specific structure
219  */
220 static void ice_cfg_itr_gran(struct ice_hw *hw)
221 {
222 	u32 regval = rd32(hw, GLINT_CTL);
223 
224 	/* no need to update global register if ITR gran is already set */
225 	if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
226 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) &&
227 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) &&
228 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) &&
229 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US))
230 		return;
231 
232 	regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) |
233 		 FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) |
234 		 FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) |
235 		 FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US);
236 	wr32(hw, GLINT_CTL, regval);
237 }
238 
239 /**
240  * ice_calc_txq_handle - calculate the queue handle
241  * @vsi: VSI that ring belongs to
242  * @ring: ring to get the absolute queue index
243  * @tc: traffic class number
244  */
245 static u16
246 ice_calc_txq_handle(const struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
247 {
248 	WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
249 
250 	if (ring->ch)
251 		return ring->q_index - ring->ch->base_q;
252 
253 	/* Idea here for calculation is that we subtract the number of queue
254 	 * count from TC that ring belongs to from its absolute queue index
255 	 * and as a result we get the queue's index within TC.
256 	 */
257 	return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
258 }
259 
260 /**
261  * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
262  * @ring: The Tx ring to configure
263  *
264  * This enables/disables XPS for a given Tx descriptor ring
265  * based on the TCs enabled for the VSI that ring belongs to.
266  */
267 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
268 {
269 	if (!ring->q_vector || !ring->netdev)
270 		return;
271 
272 	/* We only initialize XPS once, so as not to overwrite user settings */
273 	if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
274 		return;
275 
276 	netif_set_xps_queue(ring->netdev,
277 			    &ring->q_vector->napi.config->affinity_mask,
278 			    ring->q_index);
279 }
280 
281 /**
282  * ice_set_txq_ctx_vmvf - set queue context VM/VF type and number by VSI type
283  * @ring: the Tx ring to configure
284  * @vmvf_type: VM/VF type
285  * @vmvf_num: VM/VF number
286  *
287  * Return: 0 on success and a negative value on error.
288  */
289 static int
290 ice_set_txq_ctx_vmvf(struct ice_tx_ring *ring, u8 *vmvf_type, u16 *vmvf_num)
291 {
292 	struct ice_vsi *vsi = ring->vsi;
293 	struct ice_hw *hw;
294 
295 	hw = &vsi->back->hw;
296 
297 	/* queue belongs to a specific VSI type
298 	 * VF / VM index should be programmed per vmvf_type setting:
299 	 * for vmvf_type = VF, it is VF number between 0-256
300 	 * for vmvf_type = VM, it is VM number between 0-767
301 	 * for PF or EMP this field should be set to zero
302 	 */
303 	switch (vsi->type) {
304 	case ICE_VSI_LB:
305 	case ICE_VSI_CTRL:
306 	case ICE_VSI_PF:
307 		if (ring->ch)
308 			*vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
309 		else
310 			*vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
311 		break;
312 	case ICE_VSI_VF:
313 		/* Firmware expects vmvf_num to be absolute VF ID */
314 		*vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
315 		*vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
316 		break;
317 	case ICE_VSI_SF:
318 		*vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
319 		break;
320 	default:
321 		dev_info(ice_pf_to_dev(vsi->back),
322 			 "Unable to set VMVF type for VSI type %d\n",
323 			 vsi->type);
324 		return -EINVAL;
325 	}
326 	return 0;
327 }
328 
329 /**
330  * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
331  * @ring: the Tx ring to configure
332  * @tlan_ctx: pointer to the Tx LAN queue context structure to be initialized
333  * @pf_q: queue index in the PF space
334  *
335  * Configure the Tx descriptor ring in TLAN context.
336  *
337  * Return: 0 on success and a negative value on error.
338  */
339 static int
340 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
341 {
342 	struct ice_vsi *vsi = ring->vsi;
343 	struct ice_hw *hw;
344 	int err;
345 
346 	hw = &vsi->back->hw;
347 	tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
348 	tlan_ctx->port_num = vsi->port_info->lport;
349 
350 	/* Transmit Queue Length */
351 	tlan_ctx->qlen = ring->count;
352 
353 	ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
354 
355 	/* PF number */
356 	tlan_ctx->pf_num = hw->pf_id;
357 
358 	err = ice_set_txq_ctx_vmvf(ring, &tlan_ctx->vmvf_type,
359 				   &tlan_ctx->vmvf_num);
360 	if (err)
361 		return err;
362 
363 	/* make sure the context is associated with the right VSI */
364 	if (ring->ch)
365 		tlan_ctx->src_vsi = ring->ch->vsi_num;
366 	else
367 		tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
368 
369 	/* Restrict Tx timestamps to the PF VSI */
370 	switch (vsi->type) {
371 	case ICE_VSI_PF:
372 		tlan_ctx->tsyn_ena = 1;
373 		break;
374 	default:
375 		break;
376 	}
377 
378 	tlan_ctx->quanta_prof_idx = ring->quanta_prof_id;
379 
380 	tlan_ctx->tso_ena = ICE_TX_LEGACY;
381 	tlan_ctx->tso_qnum = pf_q;
382 
383 	/* Legacy or Advanced Host Interface:
384 	 * 0: Advanced Host Interface
385 	 * 1: Legacy Host Interface
386 	 */
387 	tlan_ctx->legacy_int = ICE_TX_LEGACY;
388 
389 	return 0;
390 }
391 
392 /**
393  * ice_setup_txtime_ctx - setup a struct ice_txtime_ctx instance
394  * @ring: the tstamp ring to configure
395  * @txtime_ctx: pointer to the Tx time queue context structure to be initialized
396  *
397  * Return: 0 on success and a negative value on error.
398  */
399 static int
400 ice_setup_txtime_ctx(const struct ice_tstamp_ring *ring,
401 		     struct ice_txtime_ctx *txtime_ctx)
402 {
403 	struct ice_tx_ring *tx_ring = ring->tx_ring;
404 	struct ice_vsi *vsi = tx_ring->vsi;
405 	struct ice_hw *hw = &vsi->back->hw;
406 	int err;
407 
408 	txtime_ctx->base = ring->dma >> ICE_TXTIME_CTX_BASE_S;
409 
410 	/* Tx time Queue Length */
411 	txtime_ctx->qlen = ring->count;
412 	txtime_ctx->txtime_ena_q = 1;
413 
414 	/* PF number */
415 	txtime_ctx->pf_num = hw->pf_id;
416 
417 	err = ice_set_txq_ctx_vmvf(tx_ring, &txtime_ctx->vmvf_type,
418 				   &txtime_ctx->vmvf_num);
419 	if (err)
420 		return err;
421 
422 	/* make sure the context is associated with the right VSI */
423 	if (tx_ring->ch)
424 		txtime_ctx->src_vsi = tx_ring->ch->vsi_num;
425 	else
426 		txtime_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
427 
428 	txtime_ctx->ts_res = ICE_TXTIME_CTX_RESOLUTION_128NS;
429 	txtime_ctx->drbell_mode_32 = ICE_TXTIME_CTX_DRBELL_MODE_32;
430 	txtime_ctx->ts_fetch_prof_id = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
431 
432 	return 0;
433 }
434 
435 /**
436  * ice_calc_ts_ring_count - calculate the number of Tx time stamp descriptors
437  * @tx_ring: Tx ring to calculate the count for
438  *
439  * Return: the number of Tx time stamp descriptors.
440  */
441 u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring)
442 {
443 	u16 prof = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
444 	struct ice_vsi *vsi = tx_ring->vsi;
445 	struct ice_hw *hw = &vsi->back->hw;
446 	u16 max_fetch_desc = 0, fetch, i;
447 	u32 reg;
448 
449 	for (i = 0; i < ICE_TXTIME_FETCH_PROFILE_CNT; i++) {
450 		reg = rd32(hw, E830_GLTXTIME_FETCH_PROFILE(prof, 0));
451 		fetch = FIELD_GET(E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M,
452 				  reg);
453 		max_fetch_desc = max(fetch, max_fetch_desc);
454 	}
455 
456 	if (!max_fetch_desc)
457 		max_fetch_desc = ICE_TXTIME_FETCH_TS_DESC_DFLT;
458 
459 	max_fetch_desc = ALIGN(max_fetch_desc, ICE_REQ_DESC_MULTIPLE);
460 
461 	return tx_ring->count + max_fetch_desc;
462 }
463 
464 /**
465  * ice_rx_offset - Return expected offset into page to access data
466  * @rx_ring: Ring we are requesting offset of
467  *
468  * Returns the offset value for ring into the data buffer.
469  */
470 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
471 {
472 	if (ice_ring_uses_build_skb(rx_ring))
473 		return ICE_SKB_PAD;
474 	return 0;
475 }
476 
477 /**
478  * ice_setup_rx_ctx - Configure a receive ring context
479  * @ring: The Rx ring to configure
480  *
481  * Configure the Rx descriptor ring in RLAN context.
482  */
483 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
484 {
485 	struct ice_vsi *vsi = ring->vsi;
486 	u32 rxdid = ICE_RXDID_FLEX_NIC;
487 	struct ice_rlan_ctx rlan_ctx;
488 	struct ice_hw *hw;
489 	u16 pf_q;
490 	int err;
491 
492 	hw = &vsi->back->hw;
493 
494 	/* what is Rx queue number in global space of 2K Rx queues */
495 	pf_q = vsi->rxq_map[ring->q_index];
496 
497 	/* clear the context structure first */
498 	memset(&rlan_ctx, 0, sizeof(rlan_ctx));
499 
500 	/* Receive Queue Base Address.
501 	 * Indicates the starting address of the descriptor queue defined in
502 	 * 128 Byte units.
503 	 */
504 	rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
505 
506 	rlan_ctx.qlen = ring->count;
507 
508 	/* Receive Packet Data Buffer Size.
509 	 * The Packet Data Buffer Size is defined in 128 byte units.
510 	 */
511 	rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
512 				     BIT_ULL(ICE_RLAN_CTX_DBUF_S));
513 
514 	/* use 32 byte descriptors */
515 	rlan_ctx.dsize = 1;
516 
517 	/* Strip the Ethernet CRC bytes before the packet is posted to host
518 	 * memory.
519 	 */
520 	rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
521 
522 	/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
523 	 * and it needs to remain 1 for non-DVM capable configurations to not
524 	 * break backward compatibility for VF drivers. Setting this field to 0
525 	 * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
526 	 * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
527 	 * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
528 	 * check for the tag
529 	 */
530 	if (ice_is_dvm_ena(hw))
531 		if (vsi->type == ICE_VSI_VF &&
532 		    ice_vf_is_port_vlan_ena(vsi->vf))
533 			rlan_ctx.l2tsel = 1;
534 		else
535 			rlan_ctx.l2tsel = 0;
536 	else
537 		rlan_ctx.l2tsel = 1;
538 
539 	rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
540 	rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
541 	rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
542 
543 	/* This controls whether VLAN is stripped from inner headers
544 	 * The VLAN in the inner L2 header is stripped to the receive
545 	 * descriptor if enabled by this flag.
546 	 */
547 	rlan_ctx.showiv = 0;
548 
549 	/* Max packet size for this queue - must not be set to a larger value
550 	 * than 5 x DBUF
551 	 */
552 	rlan_ctx.rxmax = min_t(u32, ring->max_frame,
553 			       ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
554 
555 	/* Rx queue threshold in units of 64 */
556 	rlan_ctx.lrxqthresh = 1;
557 
558 	/* Enable descriptor prefetch */
559 	rlan_ctx.prefena = 1;
560 
561 	/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
562 	 * metadata and flags to allow redirecting to PR netdev
563 	 */
564 	if (ice_is_eswitch_mode_switchdev(vsi->back)) {
565 		ring->flags |= ICE_RX_FLAGS_MULTIDEV;
566 		rxdid = ICE_RXDID_FLEX_NIC_2;
567 	}
568 
569 	/* Enable Flexible Descriptors in the queue context which
570 	 * allows this driver to select a specific receive descriptor format
571 	 * increasing context priority to pick up profile ID; default is 0x01;
572 	 * setting to 0x03 to ensure profile is programming if prev context is
573 	 * of same priority
574 	 */
575 	if (vsi->type != ICE_VSI_VF)
576 		ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
577 
578 	/* Absolute queue number out of 2K needs to be passed */
579 	err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
580 	if (err) {
581 		dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
582 			pf_q, err);
583 		return -EIO;
584 	}
585 
586 	if (vsi->type == ICE_VSI_VF)
587 		return 0;
588 
589 	/* configure Rx buffer alignment */
590 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
591 		ice_clear_ring_build_skb_ena(ring);
592 	else
593 		ice_set_ring_build_skb_ena(ring);
594 
595 	ring->rx_offset = ice_rx_offset(ring);
596 
597 	/* init queue specific tail register */
598 	ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
599 	writel(0, ring->tail);
600 
601 	return 0;
602 }
603 
604 static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
605 {
606 	void *ctx_ptr = &ring->pkt_ctx;
607 	struct xsk_cb_desc desc = {};
608 
609 	XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
610 	desc.src = &ctx_ptr;
611 	desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
612 		   sizeof(struct xdp_buff);
613 	desc.bytes = sizeof(ctx_ptr);
614 	xsk_pool_fill_cb(ring->xsk_pool, &desc);
615 }
616 
617 /**
618  * ice_get_frame_sz - calculate xdp_buff::frame_sz
619  * @rx_ring: the ring being configured
620  *
621  * Return frame size based on underlying PAGE_SIZE
622  */
623 static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
624 {
625 	unsigned int frame_sz;
626 
627 #if (PAGE_SIZE >= 8192)
628 	frame_sz = rx_ring->rx_buf_len;
629 #else
630 	frame_sz = ice_rx_pg_size(rx_ring) / 2;
631 #endif
632 
633 	return frame_sz;
634 }
635 
636 /**
637  * ice_vsi_cfg_rxq - Configure an Rx queue
638  * @ring: the ring being configured
639  *
640  * Return 0 on success and a negative value on error.
641  */
642 static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
643 {
644 	struct device *dev = ice_pf_to_dev(ring->vsi->back);
645 	u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
646 	int err;
647 
648 	if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
649 		if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
650 			err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
651 						 ring->q_index,
652 						 ring->q_vector->napi.napi_id,
653 						 ring->rx_buf_len);
654 			if (err)
655 				return err;
656 		}
657 
658 		ice_rx_xsk_pool(ring);
659 		if (ring->xsk_pool) {
660 			xdp_rxq_info_unreg(&ring->xdp_rxq);
661 
662 			ring->rx_buf_len =
663 				xsk_pool_get_rx_frame_size(ring->xsk_pool);
664 			err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
665 						 ring->q_index,
666 						 ring->q_vector->napi.napi_id,
667 						 ring->rx_buf_len);
668 			if (err)
669 				return err;
670 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
671 							 MEM_TYPE_XSK_BUFF_POOL,
672 							 NULL);
673 			if (err)
674 				return err;
675 			xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
676 			ice_xsk_pool_fill_cb(ring);
677 
678 			dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
679 				 ring->q_index);
680 		} else {
681 			if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
682 				err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
683 							 ring->q_index,
684 							 ring->q_vector->napi.napi_id,
685 							 ring->rx_buf_len);
686 				if (err)
687 					return err;
688 			}
689 
690 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
691 							 MEM_TYPE_PAGE_SHARED,
692 							 NULL);
693 			if (err)
694 				return err;
695 		}
696 	}
697 
698 	xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
699 	ring->xdp.data = NULL;
700 	ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
701 	err = ice_setup_rx_ctx(ring);
702 	if (err) {
703 		dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
704 			ring->q_index, err);
705 		return err;
706 	}
707 
708 	if (ring->xsk_pool) {
709 		bool ok;
710 
711 		if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
712 			dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
713 				 num_bufs, ring->q_index);
714 			dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
715 
716 			return 0;
717 		}
718 
719 		ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
720 		if (!ok) {
721 			u16 pf_q = ring->vsi->rxq_map[ring->q_index];
722 
723 			dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
724 				 ring->q_index, pf_q);
725 		}
726 
727 		return 0;
728 	}
729 
730 	if (ring->vsi->type == ICE_VSI_CTRL)
731 		ice_init_ctrl_rx_descs(ring, num_bufs);
732 	else
733 		ice_alloc_rx_bufs(ring, num_bufs);
734 
735 	return 0;
736 }
737 
738 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
739 {
740 	if (q_idx >= vsi->num_rxq)
741 		return -EINVAL;
742 
743 	return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
744 }
745 
746 /**
747  * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
748  * @vsi: VSI
749  * @ring: Rx ring to configure
750  *
751  * Determine the maximum frame size and Rx buffer length to use for a PF VSI.
752  * Set these in the associated Rx ring structure.
753  */
754 static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
755 {
756 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
757 		ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
758 		ring->rx_buf_len = ICE_RXBUF_1664;
759 #if (PAGE_SIZE < 8192)
760 	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
761 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
762 		ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
763 		ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
764 #endif
765 	} else {
766 		ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
767 		ring->rx_buf_len = ICE_RXBUF_3072;
768 	}
769 }
770 
771 /**
772  * ice_vsi_cfg_rxqs - Configure the VSI for Rx
773  * @vsi: the VSI being configured
774  *
775  * Return 0 on success and a negative value on error
776  * Configure the Rx VSI for operation.
777  */
778 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
779 {
780 	u16 i;
781 
782 	/* set up individual rings */
783 	ice_for_each_rxq(vsi, i) {
784 		struct ice_rx_ring *ring = vsi->rx_rings[i];
785 		int err;
786 
787 		if (vsi->type != ICE_VSI_VF)
788 			ice_vsi_cfg_frame_size(vsi, ring);
789 
790 		err = ice_vsi_cfg_rxq(ring);
791 		if (err)
792 			return err;
793 	}
794 
795 	return 0;
796 }
797 
798 /**
799  * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
800  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
801  *
802  * This function first tries to find contiguous space. If it is not successful,
803  * it tries with the scatter approach.
804  *
805  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
806  */
807 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
808 {
809 	int ret = 0;
810 
811 	ret = __ice_vsi_get_qs_contig(qs_cfg);
812 	if (ret) {
813 		/* contig failed, so try with scatter approach */
814 		qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
815 		qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
816 					qs_cfg->scatter_count);
817 		ret = __ice_vsi_get_qs_sc(qs_cfg);
818 	}
819 	return ret;
820 }
821 
822 /**
823  * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
824  * @vsi: the VSI being configured
825  * @ena: start or stop the Rx ring
826  * @rxq_idx: 0-based Rx queue index for the VSI passed in
827  * @wait: wait or don't wait for configuration to finish in hardware
828  *
829  * Return 0 on success and negative on error.
830  */
831 int
832 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
833 {
834 	int pf_q = vsi->rxq_map[rxq_idx];
835 	struct ice_pf *pf = vsi->back;
836 	struct ice_hw *hw = &pf->hw;
837 	u32 rx_reg;
838 
839 	rx_reg = rd32(hw, QRX_CTRL(pf_q));
840 
841 	/* Skip if the queue is already in the requested state */
842 	if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
843 		return 0;
844 
845 	/* turn on/off the queue */
846 	if (ena)
847 		rx_reg |= QRX_CTRL_QENA_REQ_M;
848 	else
849 		rx_reg &= ~QRX_CTRL_QENA_REQ_M;
850 	wr32(hw, QRX_CTRL(pf_q), rx_reg);
851 
852 	if (!wait)
853 		return 0;
854 
855 	ice_flush(hw);
856 	return ice_pf_rxq_wait(pf, pf_q, ena);
857 }
858 
859 /**
860  * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
861  * @vsi: the VSI being configured
862  * @ena: true/false to verify Rx ring has been enabled/disabled respectively
863  * @rxq_idx: 0-based Rx queue index for the VSI passed in
864  *
865  * This routine will wait for the given Rx queue of the VSI to reach the
866  * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
867  * the requested state after multiple retries; else will return 0 in case of
868  * success.
869  */
870 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
871 {
872 	int pf_q = vsi->rxq_map[rxq_idx];
873 	struct ice_pf *pf = vsi->back;
874 
875 	return ice_pf_rxq_wait(pf, pf_q, ena);
876 }
877 
878 /**
879  * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
880  * @vsi: the VSI being configured
881  *
882  * We allocate one q_vector per queue interrupt. If allocation fails we
883  * return -ENOMEM.
884  */
885 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
886 {
887 	struct device *dev = ice_pf_to_dev(vsi->back);
888 	u16 v_idx;
889 	int err;
890 
891 	if (vsi->q_vectors[0]) {
892 		dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
893 		return -EEXIST;
894 	}
895 
896 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
897 		err = ice_vsi_alloc_q_vector(vsi, v_idx);
898 		if (err)
899 			goto err_out;
900 	}
901 
902 	return 0;
903 
904 err_out:
905 
906 	dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
907 		 vsi->num_q_vectors, vsi->vsi_num, v_idx);
908 	vsi->num_q_vectors = v_idx;
909 	return v_idx ? 0 : err;
910 }
911 
912 /**
913  * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
914  * @vsi: the VSI being configured
915  *
916  * This function maps descriptor rings to the queue-specific vectors allotted
917  * through the MSI-X enabling code. On a constrained vector budget, we map Tx
918  * and Rx rings to the vector as "efficiently" as possible.
919  */
920 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
921 {
922 	int q_vectors = vsi->num_q_vectors;
923 	u16 tx_rings_rem, rx_rings_rem;
924 	int v_id;
925 
926 	/* initially assigning remaining rings count to VSIs num queue value */
927 	tx_rings_rem = vsi->num_txq;
928 	rx_rings_rem = vsi->num_rxq;
929 
930 	for (v_id = 0; v_id < q_vectors; v_id++) {
931 		struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
932 		u8 tx_rings_per_v, rx_rings_per_v;
933 		u16 q_id, q_base;
934 
935 		/* Tx rings mapping to vector */
936 		tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
937 						  q_vectors - v_id);
938 		q_vector->num_ring_tx = tx_rings_per_v;
939 		q_vector->tx.tx_ring = NULL;
940 		q_vector->tx.itr_idx = ICE_TX_ITR;
941 		q_base = vsi->num_txq - tx_rings_rem;
942 
943 		for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
944 			struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
945 
946 			tx_ring->q_vector = q_vector;
947 			tx_ring->next = q_vector->tx.tx_ring;
948 			q_vector->tx.tx_ring = tx_ring;
949 		}
950 		tx_rings_rem -= tx_rings_per_v;
951 
952 		/* Rx rings mapping to vector */
953 		rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
954 						  q_vectors - v_id);
955 		q_vector->num_ring_rx = rx_rings_per_v;
956 		q_vector->rx.rx_ring = NULL;
957 		q_vector->rx.itr_idx = ICE_RX_ITR;
958 		q_base = vsi->num_rxq - rx_rings_rem;
959 
960 		for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
961 			struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
962 
963 			rx_ring->q_vector = q_vector;
964 			rx_ring->next = q_vector->rx.rx_ring;
965 			q_vector->rx.rx_ring = rx_ring;
966 		}
967 		rx_rings_rem -= rx_rings_per_v;
968 	}
969 
970 	if (ice_is_xdp_ena_vsi(vsi))
971 		ice_map_xdp_rings(vsi);
972 }
973 
974 /**
975  * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
976  * @vsi: the VSI having memory freed
977  */
978 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
979 {
980 	int v_idx;
981 
982 	ice_for_each_q_vector(vsi, v_idx)
983 		ice_free_q_vector(vsi, v_idx);
984 
985 	vsi->num_q_vectors = 0;
986 }
987 
988 /**
989  * ice_cfg_tstamp - Configure Tx time stamp queue
990  * @tx_ring: Tx ring to be configured with timestamping
991  *
992  * Return: 0 on success and a negative value on error.
993  */
994 static int
995 ice_cfg_tstamp(struct ice_tx_ring *tx_ring)
996 {
997 	DEFINE_RAW_FLEX(struct ice_aqc_set_txtime_qgrp, txtime_qg_buf,
998 			txtimeqs, 1);
999 	u8 txtime_buf_len = struct_size(txtime_qg_buf, txtimeqs, 1);
1000 	struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
1001 	struct ice_txtime_ctx txtime_ctx = {};
1002 	struct ice_vsi *vsi = tx_ring->vsi;
1003 	struct ice_pf *pf = vsi->back;
1004 	struct ice_hw *hw = &pf->hw;
1005 	u16 pf_q = tx_ring->reg_idx;
1006 	int err;
1007 
1008 	err = ice_setup_txtime_ctx(tstamp_ring, &txtime_ctx);
1009 	if (err) {
1010 		dev_err(ice_pf_to_dev(pf), "Failed to setup Tx time queue context for queue %d, error: %d\n",
1011 			pf_q, err);
1012 		return err;
1013 	}
1014 	ice_pack_txtime_ctx(&txtime_ctx,
1015 			    &txtime_qg_buf->txtimeqs[0].txtime_ctx);
1016 
1017 	tstamp_ring->tail = hw->hw_addr + E830_GLQTX_TXTIME_DBELL_LSB(pf_q);
1018 	return ice_aq_set_txtimeq(hw, pf_q, 1, txtime_qg_buf,
1019 				  txtime_buf_len, NULL);
1020 }
1021 
1022 /**
1023  * ice_vsi_cfg_txq - Configure single Tx queue
1024  * @vsi: the VSI that queue belongs to
1025  * @ring: Tx ring to be configured
1026  * @qg_buf: queue group buffer
1027  *
1028  * Return: 0 on success and a negative value on error.
1029  */
1030 static int
1031 ice_vsi_cfg_txq(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
1032 		struct ice_aqc_add_tx_qgrp *qg_buf)
1033 {
1034 	u8 buf_len = struct_size(qg_buf, txqs, 1);
1035 	struct ice_tlan_ctx tlan_ctx = { 0 };
1036 	struct ice_aqc_add_txqs_perq *txq;
1037 	struct ice_channel *ch = ring->ch;
1038 	struct ice_pf *pf = vsi->back;
1039 	struct ice_hw *hw = &pf->hw;
1040 	u32 pf_q, vsi_idx;
1041 	int status;
1042 	u8 tc;
1043 
1044 	/* Configure XPS */
1045 	ice_cfg_xps_tx_ring(ring);
1046 
1047 	pf_q = ring->reg_idx;
1048 	status = ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
1049 	if (status) {
1050 		dev_err(ice_pf_to_dev(pf), "Failed to setup Tx context for queue %d, error: %d\n",
1051 			pf_q, status);
1052 		return status;
1053 	}
1054 	/* copy context contents into the qg_buf */
1055 	qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1056 	ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
1057 
1058 	/* init queue specific tail reg. It is referred as
1059 	 * transmit comm scheduler queue doorbell.
1060 	 */
1061 	ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
1062 
1063 	if (IS_ENABLED(CONFIG_DCB))
1064 		tc = ring->dcb_tc;
1065 	else
1066 		tc = 0;
1067 
1068 	/* Add unique software queue handle of the Tx queue per
1069 	 * TC into the VSI Tx ring
1070 	 */
1071 	ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
1072 
1073 	if (ch) {
1074 		tc = 0;
1075 		vsi_idx = ch->ch_vsi->idx;
1076 	} else {
1077 		vsi_idx = vsi->idx;
1078 	}
1079 
1080 	status = ice_ena_vsi_txq(vsi->port_info, vsi_idx, tc, ring->q_handle,
1081 				 1, qg_buf, buf_len, NULL);
1082 	if (status) {
1083 		dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
1084 			status);
1085 		return status;
1086 	}
1087 
1088 	/* Add Tx Queue TEID into the VSI Tx ring from the
1089 	 * response. This will complete configuring and
1090 	 * enabling the queue.
1091 	 */
1092 	txq = &qg_buf->txqs[0];
1093 	if (pf_q == le16_to_cpu(txq->txq_id))
1094 		ring->txq_teid = le32_to_cpu(txq->q_teid);
1095 
1096 	if (ice_is_txtime_ena(ring)) {
1097 		status = ice_alloc_setup_tstamp_ring(ring);
1098 		if (status) {
1099 			dev_err(ice_pf_to_dev(pf),
1100 				"Failed to allocate Tx timestamp ring, error: %d\n",
1101 				status);
1102 			goto err_setup_tstamp;
1103 		}
1104 
1105 		status = ice_cfg_tstamp(ring);
1106 		if (status) {
1107 			dev_err(ice_pf_to_dev(pf), "Failed to set Tx Time queue context, error: %d\n",
1108 				status);
1109 			goto err_cfg_tstamp;
1110 		}
1111 	}
1112 	return 0;
1113 
1114 err_cfg_tstamp:
1115 	ice_free_tx_tstamp_ring(ring);
1116 err_setup_tstamp:
1117 	ice_dis_vsi_txq(vsi->port_info, vsi_idx, tc, 1, &ring->q_handle,
1118 			&ring->reg_idx, &ring->txq_teid, ICE_NO_RESET,
1119 			tlan_ctx.vmvf_num, NULL);
1120 
1121 	return status;
1122 }
1123 
1124 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
1125 			   u16 q_idx)
1126 {
1127 	DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
1128 
1129 	if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
1130 		return -EINVAL;
1131 
1132 	qg_buf->num_txqs = 1;
1133 
1134 	return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
1135 }
1136 
1137 /**
1138  * ice_vsi_cfg_txqs - Configure the VSI for Tx
1139  * @vsi: the VSI being configured
1140  * @rings: Tx ring array to be configured
1141  * @count: number of Tx ring array elements
1142  *
1143  * Return 0 on success and a negative value on error
1144  * Configure the Tx VSI for operation.
1145  */
1146 static int
1147 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
1148 {
1149 	DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
1150 	int err = 0;
1151 	u16 q_idx;
1152 
1153 	qg_buf->num_txqs = 1;
1154 
1155 	for (q_idx = 0; q_idx < count; q_idx++) {
1156 		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1157 		if (err)
1158 			break;
1159 	}
1160 
1161 	return err;
1162 }
1163 
1164 /**
1165  * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1166  * @vsi: the VSI being configured
1167  *
1168  * Return 0 on success and a negative value on error
1169  * Configure the Tx VSI for operation.
1170  */
1171 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1172 {
1173 	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1174 }
1175 
1176 /**
1177  * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1178  * @vsi: the VSI being configured
1179  *
1180  * Return 0 on success and a negative value on error
1181  * Configure the Tx queues dedicated for XDP in given VSI for operation.
1182  */
1183 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1184 {
1185 	int ret;
1186 	int i;
1187 
1188 	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1189 	if (ret)
1190 		return ret;
1191 
1192 	ice_for_each_rxq(vsi, i)
1193 		ice_tx_xsk_pool(vsi, i);
1194 
1195 	return 0;
1196 }
1197 
1198 /**
1199  * ice_cfg_itr - configure the initial interrupt throttle values
1200  * @hw: pointer to the HW structure
1201  * @q_vector: interrupt vector that's being configured
1202  *
1203  * Configure interrupt throttling values for the ring containers that are
1204  * associated with the interrupt vector passed in.
1205  */
1206 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
1207 {
1208 	ice_cfg_itr_gran(hw);
1209 
1210 	if (q_vector->num_ring_rx)
1211 		ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
1212 
1213 	if (q_vector->num_ring_tx)
1214 		ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
1215 
1216 	ice_write_intrl(q_vector, q_vector->intrl);
1217 }
1218 
1219 /**
1220  * ice_cfg_txq_interrupt - configure interrupt on Tx queue
1221  * @vsi: the VSI being configured
1222  * @txq: Tx queue being mapped to MSI-X vector
1223  * @msix_idx: MSI-X vector index within the function
1224  * @itr_idx: ITR index of the interrupt cause
1225  *
1226  * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
1227  * within the function space.
1228  */
1229 void
1230 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1231 {
1232 	struct ice_pf *pf = vsi->back;
1233 	struct ice_hw *hw = &pf->hw;
1234 	u32 val;
1235 
1236 	itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx);
1237 
1238 	val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
1239 	      FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx);
1240 
1241 	wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1242 	if (ice_is_xdp_ena_vsi(vsi)) {
1243 		u32 xdp_txq = txq + vsi->num_xdp_txq;
1244 
1245 		wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
1246 		     val);
1247 	}
1248 	ice_flush(hw);
1249 }
1250 
1251 /**
1252  * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
1253  * @vsi: the VSI being configured
1254  * @rxq: Rx queue being mapped to MSI-X vector
1255  * @msix_idx: MSI-X vector index within the function
1256  * @itr_idx: ITR index of the interrupt cause
1257  *
1258  * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
1259  * within the function space.
1260  */
1261 void
1262 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1263 {
1264 	struct ice_pf *pf = vsi->back;
1265 	struct ice_hw *hw = &pf->hw;
1266 	u32 val;
1267 
1268 	itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx);
1269 
1270 	val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
1271 	      FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx);
1272 
1273 	wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1274 
1275 	ice_flush(hw);
1276 }
1277 
1278 /**
1279  * ice_trigger_sw_intr - trigger a software interrupt
1280  * @hw: pointer to the HW structure
1281  * @q_vector: interrupt vector to trigger the software interrupt for
1282  */
1283 void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector)
1284 {
1285 	wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
1286 	     (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
1287 	     GLINT_DYN_CTL_SWINT_TRIG_M |
1288 	     GLINT_DYN_CTL_INTENA_M);
1289 }
1290 
1291 /**
1292  * ice_vsi_stop_tx_ring - Disable single Tx ring
1293  * @vsi: the VSI being configured
1294  * @rst_src: reset source
1295  * @rel_vmvf_num: Relative ID of VF/VM
1296  * @ring: Tx ring to be stopped
1297  * @txq_meta: Meta data of Tx ring to be stopped
1298  */
1299 int
1300 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1301 		     u16 rel_vmvf_num, struct ice_tx_ring *ring,
1302 		     struct ice_txq_meta *txq_meta)
1303 {
1304 	struct ice_pf *pf = vsi->back;
1305 	struct ice_q_vector *q_vector;
1306 	struct ice_hw *hw = &pf->hw;
1307 	int status;
1308 	u32 val;
1309 
1310 	/* clear cause_ena bit for disabled queues */
1311 	val = rd32(hw, QINT_TQCTL(ring->reg_idx));
1312 	val &= ~QINT_TQCTL_CAUSE_ENA_M;
1313 	wr32(hw, QINT_TQCTL(ring->reg_idx), val);
1314 
1315 	/* software is expected to wait for 100 ns */
1316 	ndelay(100);
1317 
1318 	/* trigger a software interrupt for the vector
1319 	 * associated to the queue to schedule NAPI handler
1320 	 */
1321 	q_vector = ring->q_vector;
1322 	if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
1323 		ice_trigger_sw_intr(hw, q_vector);
1324 
1325 	status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
1326 				 txq_meta->tc, 1, &txq_meta->q_handle,
1327 				 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
1328 				 rel_vmvf_num, NULL);
1329 
1330 	/* if the disable queue command was exercised during an
1331 	 * active reset flow, -EBUSY is returned.
1332 	 * This is not an error as the reset operation disables
1333 	 * queues at the hardware level anyway.
1334 	 */
1335 	if (status == -EBUSY) {
1336 		dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
1337 	} else if (status == -ENOENT) {
1338 		dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
1339 	} else if (status) {
1340 		dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
1341 			status);
1342 		return status;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 /**
1349  * ice_fill_txq_meta - Prepare the Tx queue's meta data
1350  * @vsi: VSI that ring belongs to
1351  * @ring: ring that txq_meta will be based on
1352  * @txq_meta: a helper struct that wraps Tx queue's information
1353  *
1354  * Set up a helper struct that will contain all the necessary fields that
1355  * are needed for stopping Tx queue
1356  */
1357 void
1358 ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
1359 		  struct ice_txq_meta *txq_meta)
1360 {
1361 	struct ice_channel *ch = ring->ch;
1362 	u8 tc;
1363 
1364 	if (IS_ENABLED(CONFIG_DCB))
1365 		tc = ring->dcb_tc;
1366 	else
1367 		tc = 0;
1368 
1369 	txq_meta->q_id = ring->reg_idx;
1370 	txq_meta->q_teid = ring->txq_teid;
1371 	txq_meta->q_handle = ring->q_handle;
1372 	if (ch) {
1373 		txq_meta->vsi_idx = ch->ch_vsi->idx;
1374 		txq_meta->tc = 0;
1375 	} else {
1376 		txq_meta->vsi_idx = vsi->idx;
1377 		txq_meta->tc = tc;
1378 	}
1379 }
1380 
1381 /**
1382  * ice_qp_reset_stats - Resets all stats for rings of given index
1383  * @vsi: VSI that contains rings of interest
1384  * @q_idx: ring index in array
1385  */
1386 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
1387 {
1388 	struct ice_vsi_stats *vsi_stat;
1389 	struct ice_pf *pf;
1390 
1391 	pf = vsi->back;
1392 	if (!pf->vsi_stats)
1393 		return;
1394 
1395 	vsi_stat = pf->vsi_stats[vsi->idx];
1396 	if (!vsi_stat)
1397 		return;
1398 
1399 	memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
1400 	       sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
1401 	memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
1402 	       sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
1403 	if (vsi->xdp_rings)
1404 		memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
1405 		       sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
1406 }
1407 
1408 /**
1409  * ice_qp_clean_rings - Cleans all the rings of a given index
1410  * @vsi: VSI that contains rings of interest
1411  * @q_idx: ring index in array
1412  */
1413 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
1414 {
1415 	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
1416 	if (vsi->xdp_rings)
1417 		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
1418 	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
1419 }
1420 
1421 /**
1422  * ice_qp_dis - Disables a queue pair
1423  * @vsi: VSI of interest
1424  * @q_idx: ring index in array
1425  *
1426  * Returns 0 on success, negative on failure.
1427  */
1428 int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
1429 {
1430 	struct ice_txq_meta txq_meta = { };
1431 	struct ice_q_vector *q_vector;
1432 	struct ice_tx_ring *tx_ring;
1433 	struct ice_rx_ring *rx_ring;
1434 	int fail = 0;
1435 	int err;
1436 
1437 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
1438 		return -EINVAL;
1439 
1440 	tx_ring = vsi->tx_rings[q_idx];
1441 	rx_ring = vsi->rx_rings[q_idx];
1442 	q_vector = rx_ring->q_vector;
1443 
1444 	synchronize_net();
1445 	netif_carrier_off(vsi->netdev);
1446 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
1447 
1448 	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
1449 	ice_qvec_toggle_napi(vsi, q_vector, false);
1450 
1451 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
1452 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
1453 	if (!fail)
1454 		fail = err;
1455 	if (vsi->xdp_rings) {
1456 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
1457 
1458 		memset(&txq_meta, 0, sizeof(txq_meta));
1459 		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
1460 		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
1461 					   &txq_meta);
1462 		if (!fail)
1463 			fail = err;
1464 	}
1465 
1466 	ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
1467 	ice_qp_clean_rings(vsi, q_idx);
1468 	ice_qp_reset_stats(vsi, q_idx);
1469 
1470 	return fail;
1471 }
1472 
1473 /**
1474  * ice_qp_ena - Enables a queue pair
1475  * @vsi: VSI of interest
1476  * @q_idx: ring index in array
1477  *
1478  * Returns 0 on success, negative on failure.
1479  */
1480 int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
1481 {
1482 	struct ice_q_vector *q_vector;
1483 	int fail = 0;
1484 	bool link_up;
1485 	int err;
1486 
1487 	err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
1488 	if (!fail)
1489 		fail = err;
1490 
1491 	if (ice_is_xdp_ena_vsi(vsi)) {
1492 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
1493 
1494 		err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
1495 		if (!fail)
1496 			fail = err;
1497 		ice_set_ring_xdp(xdp_ring);
1498 		ice_tx_xsk_pool(vsi, q_idx);
1499 	}
1500 
1501 	err = ice_vsi_cfg_single_rxq(vsi, q_idx);
1502 	if (!fail)
1503 		fail = err;
1504 
1505 	q_vector = vsi->rx_rings[q_idx]->q_vector;
1506 	ice_qvec_cfg_msix(vsi, q_vector, q_idx);
1507 
1508 	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
1509 	if (!fail)
1510 		fail = err;
1511 
1512 	ice_qvec_toggle_napi(vsi, q_vector, true);
1513 	ice_qvec_ena_irq(vsi, q_vector);
1514 
1515 	/* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
1516 	synchronize_net();
1517 	ice_get_link_status(vsi->port_info, &link_up);
1518 	if (link_up) {
1519 		netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
1520 		netif_carrier_on(vsi->netdev);
1521 	}
1522 
1523 	return fail;
1524 }
1525