xref: /linux/drivers/net/ethernet/intel/ice/ice_base.c (revision f12b363887c706c40611fba645265527a8415832)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <net/xdp_sock_drv.h>
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8 #include "ice_sriov.h"
9 
10 /**
11  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
12  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
13  *
14  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
15  */
16 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
17 {
18 	unsigned int offset, i;
19 
20 	mutex_lock(qs_cfg->qs_mutex);
21 	offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
22 					    0, qs_cfg->q_count, 0);
23 	if (offset >= qs_cfg->pf_map_size) {
24 		mutex_unlock(qs_cfg->qs_mutex);
25 		return -ENOMEM;
26 	}
27 
28 	bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
29 	for (i = 0; i < qs_cfg->q_count; i++)
30 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
31 	mutex_unlock(qs_cfg->qs_mutex);
32 
33 	return 0;
34 }
35 
36 /**
37  * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
38  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
39  *
40  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
41  */
42 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
43 {
44 	unsigned int i, index = 0;
45 
46 	mutex_lock(qs_cfg->qs_mutex);
47 	for (i = 0; i < qs_cfg->q_count; i++) {
48 		index = find_next_zero_bit(qs_cfg->pf_map,
49 					   qs_cfg->pf_map_size, index);
50 		if (index >= qs_cfg->pf_map_size)
51 			goto err_scatter;
52 		set_bit(index, qs_cfg->pf_map);
53 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
54 	}
55 	mutex_unlock(qs_cfg->qs_mutex);
56 
57 	return 0;
58 err_scatter:
59 	for (index = 0; index < i; index++) {
60 		clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
61 		qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
62 	}
63 	mutex_unlock(qs_cfg->qs_mutex);
64 
65 	return -ENOMEM;
66 }
67 
68 /**
69  * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
70  * @pf: the PF being configured
71  * @pf_q: the PF queue
72  * @ena: enable or disable state of the queue
73  *
74  * This routine will wait for the given Rx queue of the PF to reach the
75  * enabled or disabled state.
76  * Returns -ETIMEDOUT in case of failing to reach the requested state after
77  * multiple retries; else will return 0 in case of success.
78  */
79 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
80 {
81 	int i;
82 
83 	for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
84 		if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
85 			      QRX_CTRL_QENA_STAT_M))
86 			return 0;
87 
88 		usleep_range(20, 40);
89 	}
90 
91 	return -ETIMEDOUT;
92 }
93 
94 /**
95  * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
96  * @vsi: the VSI being configured
97  * @v_idx: index of the vector in the VSI struct
98  *
99  * We allocate one q_vector and set default value for ITR setting associated
100  * with this q_vector. If allocation fails we return -ENOMEM.
101  */
102 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
103 {
104 	struct ice_pf *pf = vsi->back;
105 	struct ice_q_vector *q_vector;
106 	int err;
107 
108 	/* allocate q_vector */
109 	q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
110 	if (!q_vector)
111 		return -ENOMEM;
112 
113 	q_vector->vsi = vsi;
114 	q_vector->v_idx = v_idx;
115 	q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
116 	q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
117 	q_vector->tx.itr_mode = ITR_DYNAMIC;
118 	q_vector->rx.itr_mode = ITR_DYNAMIC;
119 	q_vector->tx.type = ICE_TX_CONTAINER;
120 	q_vector->rx.type = ICE_RX_CONTAINER;
121 	q_vector->irq.index = -ENOENT;
122 
123 	if (vsi->type == ICE_VSI_VF) {
124 		ice_calc_vf_reg_idx(vsi->vf, q_vector);
125 		goto out;
126 	} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
127 		struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
128 
129 		if (ctrl_vsi) {
130 			if (unlikely(!ctrl_vsi->q_vectors)) {
131 				err = -ENOENT;
132 				goto err_free_q_vector;
133 			}
134 
135 			q_vector->irq = ctrl_vsi->q_vectors[0]->irq;
136 			goto skip_alloc;
137 		}
138 	}
139 
140 	q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc);
141 	if (q_vector->irq.index < 0) {
142 		err = -ENOMEM;
143 		goto err_free_q_vector;
144 	}
145 
146 skip_alloc:
147 	q_vector->reg_idx = q_vector->irq.index;
148 	q_vector->vf_reg_idx = q_vector->irq.index;
149 
150 	/* only set affinity_mask if the CPU is online */
151 	if (cpu_online(v_idx))
152 		cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
153 
154 	/* This will not be called in the driver load path because the netdev
155 	 * will not be created yet. All other cases with register the NAPI
156 	 * handler here (i.e. resume, reset/rebuild, etc.)
157 	 */
158 	if (vsi->netdev)
159 		netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
160 
161 out:
162 	/* tie q_vector and VSI together */
163 	vsi->q_vectors[v_idx] = q_vector;
164 
165 	return 0;
166 
167 err_free_q_vector:
168 	kfree(q_vector);
169 
170 	return err;
171 }
172 
173 /**
174  * ice_free_q_vector - Free memory allocated for a specific interrupt vector
175  * @vsi: VSI having the memory freed
176  * @v_idx: index of the vector to be freed
177  */
178 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
179 {
180 	struct ice_q_vector *q_vector;
181 	struct ice_pf *pf = vsi->back;
182 	struct ice_tx_ring *tx_ring;
183 	struct ice_rx_ring *rx_ring;
184 	struct device *dev;
185 
186 	dev = ice_pf_to_dev(pf);
187 	if (!vsi->q_vectors[v_idx]) {
188 		dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
189 		return;
190 	}
191 	q_vector = vsi->q_vectors[v_idx];
192 
193 	ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx)
194 		tx_ring->q_vector = NULL;
195 
196 	ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx)
197 		rx_ring->q_vector = NULL;
198 
199 	/* only VSI with an associated netdev is set up with NAPI */
200 	if (vsi->netdev)
201 		netif_napi_del(&q_vector->napi);
202 
203 	/* release MSIX interrupt if q_vector had interrupt allocated */
204 	if (q_vector->irq.index < 0)
205 		goto free_q_vector;
206 
207 	/* only free last VF ctrl vsi interrupt */
208 	if (vsi->type == ICE_VSI_CTRL && vsi->vf &&
209 	    ice_get_vf_ctrl_vsi(pf, vsi))
210 		goto free_q_vector;
211 
212 	ice_free_irq(pf, q_vector->irq);
213 
214 free_q_vector:
215 	kfree(q_vector);
216 	vsi->q_vectors[v_idx] = NULL;
217 }
218 
219 /**
220  * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
221  * @hw: board specific structure
222  */
223 static void ice_cfg_itr_gran(struct ice_hw *hw)
224 {
225 	u32 regval = rd32(hw, GLINT_CTL);
226 
227 	/* no need to update global register if ITR gran is already set */
228 	if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
229 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) &&
230 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) &&
231 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) &&
232 	    (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US))
233 		return;
234 
235 	regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) |
236 		 FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) |
237 		 FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) |
238 		 FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US);
239 	wr32(hw, GLINT_CTL, regval);
240 }
241 
242 /**
243  * ice_calc_txq_handle - calculate the queue handle
244  * @vsi: VSI that ring belongs to
245  * @ring: ring to get the absolute queue index
246  * @tc: traffic class number
247  */
248 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
249 {
250 	WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
251 
252 	if (ring->ch)
253 		return ring->q_index - ring->ch->base_q;
254 
255 	/* Idea here for calculation is that we subtract the number of queue
256 	 * count from TC that ring belongs to from it's absolute queue index
257 	 * and as a result we get the queue's index within TC.
258 	 */
259 	return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
260 }
261 
262 /**
263  * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
264  * @ring: The Tx ring to configure
265  *
266  * This enables/disables XPS for a given Tx descriptor ring
267  * based on the TCs enabled for the VSI that ring belongs to.
268  */
269 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
270 {
271 	if (!ring->q_vector || !ring->netdev)
272 		return;
273 
274 	/* We only initialize XPS once, so as not to overwrite user settings */
275 	if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
276 		return;
277 
278 	netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
279 			    ring->q_index);
280 }
281 
282 /**
283  * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
284  * @ring: The Tx ring to configure
285  * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
286  * @pf_q: queue index in the PF space
287  *
288  * Configure the Tx descriptor ring in TLAN context.
289  */
290 static void
291 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
292 {
293 	struct ice_vsi *vsi = ring->vsi;
294 	struct ice_hw *hw = &vsi->back->hw;
295 
296 	tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
297 
298 	tlan_ctx->port_num = vsi->port_info->lport;
299 
300 	/* Transmit Queue Length */
301 	tlan_ctx->qlen = ring->count;
302 
303 	ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
304 
305 	/* PF number */
306 	tlan_ctx->pf_num = hw->pf_id;
307 
308 	/* queue belongs to a specific VSI type
309 	 * VF / VM index should be programmed per vmvf_type setting:
310 	 * for vmvf_type = VF, it is VF number between 0-256
311 	 * for vmvf_type = VM, it is VM number between 0-767
312 	 * for PF or EMP this field should be set to zero
313 	 */
314 	switch (vsi->type) {
315 	case ICE_VSI_LB:
316 	case ICE_VSI_CTRL:
317 	case ICE_VSI_PF:
318 		if (ring->ch)
319 			tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
320 		else
321 			tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
322 		break;
323 	case ICE_VSI_VF:
324 		/* Firmware expects vmvf_num to be absolute VF ID */
325 		tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
326 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
327 		break;
328 	case ICE_VSI_SF:
329 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
330 		break;
331 	default:
332 		return;
333 	}
334 
335 	/* make sure the context is associated with the right VSI */
336 	if (ring->ch)
337 		tlan_ctx->src_vsi = ring->ch->vsi_num;
338 	else
339 		tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
340 
341 	/* Restrict Tx timestamps to the PF VSI */
342 	switch (vsi->type) {
343 	case ICE_VSI_PF:
344 		tlan_ctx->tsyn_ena = 1;
345 		break;
346 	default:
347 		break;
348 	}
349 
350 	tlan_ctx->quanta_prof_idx = ring->quanta_prof_id;
351 
352 	tlan_ctx->tso_ena = ICE_TX_LEGACY;
353 	tlan_ctx->tso_qnum = pf_q;
354 
355 	/* Legacy or Advanced Host Interface:
356 	 * 0: Advanced Host Interface
357 	 * 1: Legacy Host Interface
358 	 */
359 	tlan_ctx->legacy_int = ICE_TX_LEGACY;
360 }
361 
362 /**
363  * ice_rx_offset - Return expected offset into page to access data
364  * @rx_ring: Ring we are requesting offset of
365  *
366  * Returns the offset value for ring into the data buffer.
367  */
368 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
369 {
370 	if (ice_ring_uses_build_skb(rx_ring))
371 		return ICE_SKB_PAD;
372 	return 0;
373 }
374 
375 /**
376  * ice_setup_rx_ctx - Configure a receive ring context
377  * @ring: The Rx ring to configure
378  *
379  * Configure the Rx descriptor ring in RLAN context.
380  */
381 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
382 {
383 	struct ice_vsi *vsi = ring->vsi;
384 	u32 rxdid = ICE_RXDID_FLEX_NIC;
385 	struct ice_rlan_ctx rlan_ctx;
386 	struct ice_hw *hw;
387 	u16 pf_q;
388 	int err;
389 
390 	hw = &vsi->back->hw;
391 
392 	/* what is Rx queue number in global space of 2K Rx queues */
393 	pf_q = vsi->rxq_map[ring->q_index];
394 
395 	/* clear the context structure first */
396 	memset(&rlan_ctx, 0, sizeof(rlan_ctx));
397 
398 	/* Receive Queue Base Address.
399 	 * Indicates the starting address of the descriptor queue defined in
400 	 * 128 Byte units.
401 	 */
402 	rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
403 
404 	rlan_ctx.qlen = ring->count;
405 
406 	/* Receive Packet Data Buffer Size.
407 	 * The Packet Data Buffer Size is defined in 128 byte units.
408 	 */
409 	rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
410 				     BIT_ULL(ICE_RLAN_CTX_DBUF_S));
411 
412 	/* use 32 byte descriptors */
413 	rlan_ctx.dsize = 1;
414 
415 	/* Strip the Ethernet CRC bytes before the packet is posted to host
416 	 * memory.
417 	 */
418 	rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
419 
420 	/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
421 	 * and it needs to remain 1 for non-DVM capable configurations to not
422 	 * break backward compatibility for VF drivers. Setting this field to 0
423 	 * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
424 	 * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
425 	 * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
426 	 * check for the tag
427 	 */
428 	if (ice_is_dvm_ena(hw))
429 		if (vsi->type == ICE_VSI_VF &&
430 		    ice_vf_is_port_vlan_ena(vsi->vf))
431 			rlan_ctx.l2tsel = 1;
432 		else
433 			rlan_ctx.l2tsel = 0;
434 	else
435 		rlan_ctx.l2tsel = 1;
436 
437 	rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
438 	rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
439 	rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
440 
441 	/* This controls whether VLAN is stripped from inner headers
442 	 * The VLAN in the inner L2 header is stripped to the receive
443 	 * descriptor if enabled by this flag.
444 	 */
445 	rlan_ctx.showiv = 0;
446 
447 	/* Max packet size for this queue - must not be set to a larger value
448 	 * than 5 x DBUF
449 	 */
450 	rlan_ctx.rxmax = min_t(u32, ring->max_frame,
451 			       ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
452 
453 	/* Rx queue threshold in units of 64 */
454 	rlan_ctx.lrxqthresh = 1;
455 
456 	/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
457 	 * metadata and flags to allow redirecting to PR netdev
458 	 */
459 	if (ice_is_eswitch_mode_switchdev(vsi->back)) {
460 		ring->flags |= ICE_RX_FLAGS_MULTIDEV;
461 		rxdid = ICE_RXDID_FLEX_NIC_2;
462 	}
463 
464 	/* Enable Flexible Descriptors in the queue context which
465 	 * allows this driver to select a specific receive descriptor format
466 	 * increasing context priority to pick up profile ID; default is 0x01;
467 	 * setting to 0x03 to ensure profile is programming if prev context is
468 	 * of same priority
469 	 */
470 	if (vsi->type != ICE_VSI_VF)
471 		ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
472 	else
473 		ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
474 					false);
475 
476 	/* Absolute queue number out of 2K needs to be passed */
477 	err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
478 	if (err) {
479 		dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
480 			pf_q, err);
481 		return -EIO;
482 	}
483 
484 	if (vsi->type == ICE_VSI_VF)
485 		return 0;
486 
487 	/* configure Rx buffer alignment */
488 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
489 		ice_clear_ring_build_skb_ena(ring);
490 	else
491 		ice_set_ring_build_skb_ena(ring);
492 
493 	ring->rx_offset = ice_rx_offset(ring);
494 
495 	/* init queue specific tail register */
496 	ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
497 	writel(0, ring->tail);
498 
499 	return 0;
500 }
501 
502 static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
503 {
504 	void *ctx_ptr = &ring->pkt_ctx;
505 	struct xsk_cb_desc desc = {};
506 
507 	XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
508 	desc.src = &ctx_ptr;
509 	desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
510 		   sizeof(struct xdp_buff);
511 	desc.bytes = sizeof(ctx_ptr);
512 	xsk_pool_fill_cb(ring->xsk_pool, &desc);
513 }
514 
515 /**
516  * ice_get_frame_sz - calculate xdp_buff::frame_sz
517  * @rx_ring: the ring being configured
518  *
519  * Return frame size based on underlying PAGE_SIZE
520  */
521 static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
522 {
523 	unsigned int frame_sz;
524 
525 #if (PAGE_SIZE >= 8192)
526 	frame_sz = rx_ring->rx_buf_len;
527 #else
528 	frame_sz = ice_rx_pg_size(rx_ring) / 2;
529 #endif
530 
531 	return frame_sz;
532 }
533 
534 /**
535  * ice_vsi_cfg_rxq - Configure an Rx queue
536  * @ring: the ring being configured
537  *
538  * Return 0 on success and a negative value on error.
539  */
540 static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
541 {
542 	struct device *dev = ice_pf_to_dev(ring->vsi->back);
543 	u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
544 	int err;
545 
546 	if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
547 		if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
548 			err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
549 						 ring->q_index,
550 						 ring->q_vector->napi.napi_id,
551 						 ring->rx_buf_len);
552 			if (err)
553 				return err;
554 		}
555 
556 		ice_rx_xsk_pool(ring);
557 		if (ring->xsk_pool) {
558 			xdp_rxq_info_unreg(&ring->xdp_rxq);
559 
560 			ring->rx_buf_len =
561 				xsk_pool_get_rx_frame_size(ring->xsk_pool);
562 			err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
563 						 ring->q_index,
564 						 ring->q_vector->napi.napi_id,
565 						 ring->rx_buf_len);
566 			if (err)
567 				return err;
568 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
569 							 MEM_TYPE_XSK_BUFF_POOL,
570 							 NULL);
571 			if (err)
572 				return err;
573 			xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
574 			ice_xsk_pool_fill_cb(ring);
575 
576 			dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
577 				 ring->q_index);
578 		} else {
579 			if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
580 				err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
581 							 ring->q_index,
582 							 ring->q_vector->napi.napi_id,
583 							 ring->rx_buf_len);
584 				if (err)
585 					return err;
586 			}
587 
588 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
589 							 MEM_TYPE_PAGE_SHARED,
590 							 NULL);
591 			if (err)
592 				return err;
593 		}
594 	}
595 
596 	xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
597 	ring->xdp.data = NULL;
598 	ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
599 	err = ice_setup_rx_ctx(ring);
600 	if (err) {
601 		dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
602 			ring->q_index, err);
603 		return err;
604 	}
605 
606 	if (ring->xsk_pool) {
607 		bool ok;
608 
609 		if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
610 			dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
611 				 num_bufs, ring->q_index);
612 			dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
613 
614 			return 0;
615 		}
616 
617 		ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
618 		if (!ok) {
619 			u16 pf_q = ring->vsi->rxq_map[ring->q_index];
620 
621 			dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
622 				 ring->q_index, pf_q);
623 		}
624 
625 		return 0;
626 	}
627 
628 	ice_alloc_rx_bufs(ring, num_bufs);
629 
630 	return 0;
631 }
632 
633 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
634 {
635 	if (q_idx >= vsi->num_rxq)
636 		return -EINVAL;
637 
638 	return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
639 }
640 
641 /**
642  * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
643  * @vsi: VSI
644  * @ring: Rx ring to configure
645  *
646  * Determine the maximum frame size and Rx buffer length to use for a PF VSI.
647  * Set these in the associated Rx ring structure.
648  */
649 static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
650 {
651 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
652 		ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
653 		ring->rx_buf_len = ICE_RXBUF_1664;
654 #if (PAGE_SIZE < 8192)
655 	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
656 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
657 		ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
658 		ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
659 #endif
660 	} else {
661 		ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
662 		ring->rx_buf_len = ICE_RXBUF_3072;
663 	}
664 }
665 
666 /**
667  * ice_vsi_cfg_rxqs - Configure the VSI for Rx
668  * @vsi: the VSI being configured
669  *
670  * Return 0 on success and a negative value on error
671  * Configure the Rx VSI for operation.
672  */
673 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
674 {
675 	u16 i;
676 
677 	/* set up individual rings */
678 	ice_for_each_rxq(vsi, i) {
679 		struct ice_rx_ring *ring = vsi->rx_rings[i];
680 		int err;
681 
682 		if (vsi->type != ICE_VSI_VF)
683 			ice_vsi_cfg_frame_size(vsi, ring);
684 
685 		err = ice_vsi_cfg_rxq(ring);
686 		if (err)
687 			return err;
688 	}
689 
690 	return 0;
691 }
692 
693 /**
694  * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
695  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
696  *
697  * This function first tries to find contiguous space. If it is not successful,
698  * it tries with the scatter approach.
699  *
700  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
701  */
702 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
703 {
704 	int ret = 0;
705 
706 	ret = __ice_vsi_get_qs_contig(qs_cfg);
707 	if (ret) {
708 		/* contig failed, so try with scatter approach */
709 		qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
710 		qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
711 					qs_cfg->scatter_count);
712 		ret = __ice_vsi_get_qs_sc(qs_cfg);
713 	}
714 	return ret;
715 }
716 
717 /**
718  * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
719  * @vsi: the VSI being configured
720  * @ena: start or stop the Rx ring
721  * @rxq_idx: 0-based Rx queue index for the VSI passed in
722  * @wait: wait or don't wait for configuration to finish in hardware
723  *
724  * Return 0 on success and negative on error.
725  */
726 int
727 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
728 {
729 	int pf_q = vsi->rxq_map[rxq_idx];
730 	struct ice_pf *pf = vsi->back;
731 	struct ice_hw *hw = &pf->hw;
732 	u32 rx_reg;
733 
734 	rx_reg = rd32(hw, QRX_CTRL(pf_q));
735 
736 	/* Skip if the queue is already in the requested state */
737 	if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
738 		return 0;
739 
740 	/* turn on/off the queue */
741 	if (ena)
742 		rx_reg |= QRX_CTRL_QENA_REQ_M;
743 	else
744 		rx_reg &= ~QRX_CTRL_QENA_REQ_M;
745 	wr32(hw, QRX_CTRL(pf_q), rx_reg);
746 
747 	if (!wait)
748 		return 0;
749 
750 	ice_flush(hw);
751 	return ice_pf_rxq_wait(pf, pf_q, ena);
752 }
753 
754 /**
755  * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
756  * @vsi: the VSI being configured
757  * @ena: true/false to verify Rx ring has been enabled/disabled respectively
758  * @rxq_idx: 0-based Rx queue index for the VSI passed in
759  *
760  * This routine will wait for the given Rx queue of the VSI to reach the
761  * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
762  * the requested state after multiple retries; else will return 0 in case of
763  * success.
764  */
765 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
766 {
767 	int pf_q = vsi->rxq_map[rxq_idx];
768 	struct ice_pf *pf = vsi->back;
769 
770 	return ice_pf_rxq_wait(pf, pf_q, ena);
771 }
772 
773 /**
774  * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
775  * @vsi: the VSI being configured
776  *
777  * We allocate one q_vector per queue interrupt. If allocation fails we
778  * return -ENOMEM.
779  */
780 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
781 {
782 	struct device *dev = ice_pf_to_dev(vsi->back);
783 	u16 v_idx;
784 	int err;
785 
786 	if (vsi->q_vectors[0]) {
787 		dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
788 		return -EEXIST;
789 	}
790 
791 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
792 		err = ice_vsi_alloc_q_vector(vsi, v_idx);
793 		if (err)
794 			goto err_out;
795 	}
796 
797 	return 0;
798 
799 err_out:
800 	while (v_idx--)
801 		ice_free_q_vector(vsi, v_idx);
802 
803 	dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
804 		vsi->num_q_vectors, vsi->vsi_num, err);
805 	vsi->num_q_vectors = 0;
806 	return err;
807 }
808 
809 /**
810  * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
811  * @vsi: the VSI being configured
812  *
813  * This function maps descriptor rings to the queue-specific vectors allotted
814  * through the MSI-X enabling code. On a constrained vector budget, we map Tx
815  * and Rx rings to the vector as "efficiently" as possible.
816  */
817 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
818 {
819 	int q_vectors = vsi->num_q_vectors;
820 	u16 tx_rings_rem, rx_rings_rem;
821 	int v_id;
822 
823 	/* initially assigning remaining rings count to VSIs num queue value */
824 	tx_rings_rem = vsi->num_txq;
825 	rx_rings_rem = vsi->num_rxq;
826 
827 	for (v_id = 0; v_id < q_vectors; v_id++) {
828 		struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
829 		u8 tx_rings_per_v, rx_rings_per_v;
830 		u16 q_id, q_base;
831 
832 		/* Tx rings mapping to vector */
833 		tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
834 						  q_vectors - v_id);
835 		q_vector->num_ring_tx = tx_rings_per_v;
836 		q_vector->tx.tx_ring = NULL;
837 		q_vector->tx.itr_idx = ICE_TX_ITR;
838 		q_base = vsi->num_txq - tx_rings_rem;
839 
840 		for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
841 			struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
842 
843 			tx_ring->q_vector = q_vector;
844 			tx_ring->next = q_vector->tx.tx_ring;
845 			q_vector->tx.tx_ring = tx_ring;
846 		}
847 		tx_rings_rem -= tx_rings_per_v;
848 
849 		/* Rx rings mapping to vector */
850 		rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
851 						  q_vectors - v_id);
852 		q_vector->num_ring_rx = rx_rings_per_v;
853 		q_vector->rx.rx_ring = NULL;
854 		q_vector->rx.itr_idx = ICE_RX_ITR;
855 		q_base = vsi->num_rxq - rx_rings_rem;
856 
857 		for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
858 			struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
859 
860 			rx_ring->q_vector = q_vector;
861 			rx_ring->next = q_vector->rx.rx_ring;
862 			q_vector->rx.rx_ring = rx_ring;
863 		}
864 		rx_rings_rem -= rx_rings_per_v;
865 	}
866 
867 	if (ice_is_xdp_ena_vsi(vsi))
868 		ice_map_xdp_rings(vsi);
869 }
870 
871 /**
872  * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
873  * @vsi: the VSI having memory freed
874  */
875 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
876 {
877 	int v_idx;
878 
879 	ice_for_each_q_vector(vsi, v_idx)
880 		ice_free_q_vector(vsi, v_idx);
881 
882 	vsi->num_q_vectors = 0;
883 }
884 
885 /**
886  * ice_vsi_cfg_txq - Configure single Tx queue
887  * @vsi: the VSI that queue belongs to
888  * @ring: Tx ring to be configured
889  * @qg_buf: queue group buffer
890  */
891 static int
892 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
893 		struct ice_aqc_add_tx_qgrp *qg_buf)
894 {
895 	u8 buf_len = struct_size(qg_buf, txqs, 1);
896 	struct ice_tlan_ctx tlan_ctx = { 0 };
897 	struct ice_aqc_add_txqs_perq *txq;
898 	struct ice_channel *ch = ring->ch;
899 	struct ice_pf *pf = vsi->back;
900 	struct ice_hw *hw = &pf->hw;
901 	int status;
902 	u16 pf_q;
903 	u8 tc;
904 
905 	/* Configure XPS */
906 	ice_cfg_xps_tx_ring(ring);
907 
908 	pf_q = ring->reg_idx;
909 	ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
910 	/* copy context contents into the qg_buf */
911 	qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
912 	ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
913 		    ice_tlan_ctx_info);
914 
915 	/* init queue specific tail reg. It is referred as
916 	 * transmit comm scheduler queue doorbell.
917 	 */
918 	ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
919 
920 	if (IS_ENABLED(CONFIG_DCB))
921 		tc = ring->dcb_tc;
922 	else
923 		tc = 0;
924 
925 	/* Add unique software queue handle of the Tx queue per
926 	 * TC into the VSI Tx ring
927 	 */
928 	ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
929 
930 	if (ch)
931 		status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
932 					 ring->q_handle, 1, qg_buf, buf_len,
933 					 NULL);
934 	else
935 		status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
936 					 ring->q_handle, 1, qg_buf, buf_len,
937 					 NULL);
938 	if (status) {
939 		dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
940 			status);
941 		return status;
942 	}
943 
944 	/* Add Tx Queue TEID into the VSI Tx ring from the
945 	 * response. This will complete configuring and
946 	 * enabling the queue.
947 	 */
948 	txq = &qg_buf->txqs[0];
949 	if (pf_q == le16_to_cpu(txq->txq_id))
950 		ring->txq_teid = le32_to_cpu(txq->q_teid);
951 
952 	return 0;
953 }
954 
955 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
956 			   u16 q_idx)
957 {
958 	DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
959 
960 	if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
961 		return -EINVAL;
962 
963 	qg_buf->num_txqs = 1;
964 
965 	return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
966 }
967 
968 /**
969  * ice_vsi_cfg_txqs - Configure the VSI for Tx
970  * @vsi: the VSI being configured
971  * @rings: Tx ring array to be configured
972  * @count: number of Tx ring array elements
973  *
974  * Return 0 on success and a negative value on error
975  * Configure the Tx VSI for operation.
976  */
977 static int
978 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
979 {
980 	DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
981 	int err = 0;
982 	u16 q_idx;
983 
984 	qg_buf->num_txqs = 1;
985 
986 	for (q_idx = 0; q_idx < count; q_idx++) {
987 		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
988 		if (err)
989 			break;
990 	}
991 
992 	return err;
993 }
994 
995 /**
996  * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
997  * @vsi: the VSI being configured
998  *
999  * Return 0 on success and a negative value on error
1000  * Configure the Tx VSI for operation.
1001  */
1002 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1003 {
1004 	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1005 }
1006 
1007 /**
1008  * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1009  * @vsi: the VSI being configured
1010  *
1011  * Return 0 on success and a negative value on error
1012  * Configure the Tx queues dedicated for XDP in given VSI for operation.
1013  */
1014 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1015 {
1016 	int ret;
1017 	int i;
1018 
1019 	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1020 	if (ret)
1021 		return ret;
1022 
1023 	ice_for_each_rxq(vsi, i)
1024 		ice_tx_xsk_pool(vsi, i);
1025 
1026 	return 0;
1027 }
1028 
1029 /**
1030  * ice_cfg_itr - configure the initial interrupt throttle values
1031  * @hw: pointer to the HW structure
1032  * @q_vector: interrupt vector that's being configured
1033  *
1034  * Configure interrupt throttling values for the ring containers that are
1035  * associated with the interrupt vector passed in.
1036  */
1037 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
1038 {
1039 	ice_cfg_itr_gran(hw);
1040 
1041 	if (q_vector->num_ring_rx)
1042 		ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
1043 
1044 	if (q_vector->num_ring_tx)
1045 		ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
1046 
1047 	ice_write_intrl(q_vector, q_vector->intrl);
1048 }
1049 
1050 /**
1051  * ice_cfg_txq_interrupt - configure interrupt on Tx queue
1052  * @vsi: the VSI being configured
1053  * @txq: Tx queue being mapped to MSI-X vector
1054  * @msix_idx: MSI-X vector index within the function
1055  * @itr_idx: ITR index of the interrupt cause
1056  *
1057  * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
1058  * within the function space.
1059  */
1060 void
1061 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1062 {
1063 	struct ice_pf *pf = vsi->back;
1064 	struct ice_hw *hw = &pf->hw;
1065 	u32 val;
1066 
1067 	itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx);
1068 
1069 	val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
1070 	      FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx);
1071 
1072 	wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1073 	if (ice_is_xdp_ena_vsi(vsi)) {
1074 		u32 xdp_txq = txq + vsi->num_xdp_txq;
1075 
1076 		wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
1077 		     val);
1078 	}
1079 	ice_flush(hw);
1080 }
1081 
1082 /**
1083  * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
1084  * @vsi: the VSI being configured
1085  * @rxq: Rx queue being mapped to MSI-X vector
1086  * @msix_idx: MSI-X vector index within the function
1087  * @itr_idx: ITR index of the interrupt cause
1088  *
1089  * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
1090  * within the function space.
1091  */
1092 void
1093 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1094 {
1095 	struct ice_pf *pf = vsi->back;
1096 	struct ice_hw *hw = &pf->hw;
1097 	u32 val;
1098 
1099 	itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx);
1100 
1101 	val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
1102 	      FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx);
1103 
1104 	wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1105 
1106 	ice_flush(hw);
1107 }
1108 
1109 /**
1110  * ice_trigger_sw_intr - trigger a software interrupt
1111  * @hw: pointer to the HW structure
1112  * @q_vector: interrupt vector to trigger the software interrupt for
1113  */
1114 void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector)
1115 {
1116 	wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
1117 	     (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
1118 	     GLINT_DYN_CTL_SWINT_TRIG_M |
1119 	     GLINT_DYN_CTL_INTENA_M);
1120 }
1121 
1122 /**
1123  * ice_vsi_stop_tx_ring - Disable single Tx ring
1124  * @vsi: the VSI being configured
1125  * @rst_src: reset source
1126  * @rel_vmvf_num: Relative ID of VF/VM
1127  * @ring: Tx ring to be stopped
1128  * @txq_meta: Meta data of Tx ring to be stopped
1129  */
1130 int
1131 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1132 		     u16 rel_vmvf_num, struct ice_tx_ring *ring,
1133 		     struct ice_txq_meta *txq_meta)
1134 {
1135 	struct ice_pf *pf = vsi->back;
1136 	struct ice_q_vector *q_vector;
1137 	struct ice_hw *hw = &pf->hw;
1138 	int status;
1139 	u32 val;
1140 
1141 	/* clear cause_ena bit for disabled queues */
1142 	val = rd32(hw, QINT_TQCTL(ring->reg_idx));
1143 	val &= ~QINT_TQCTL_CAUSE_ENA_M;
1144 	wr32(hw, QINT_TQCTL(ring->reg_idx), val);
1145 
1146 	/* software is expected to wait for 100 ns */
1147 	ndelay(100);
1148 
1149 	/* trigger a software interrupt for the vector
1150 	 * associated to the queue to schedule NAPI handler
1151 	 */
1152 	q_vector = ring->q_vector;
1153 	if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
1154 		ice_trigger_sw_intr(hw, q_vector);
1155 
1156 	status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
1157 				 txq_meta->tc, 1, &txq_meta->q_handle,
1158 				 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
1159 				 rel_vmvf_num, NULL);
1160 
1161 	/* if the disable queue command was exercised during an
1162 	 * active reset flow, -EBUSY is returned.
1163 	 * This is not an error as the reset operation disables
1164 	 * queues at the hardware level anyway.
1165 	 */
1166 	if (status == -EBUSY) {
1167 		dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
1168 	} else if (status == -ENOENT) {
1169 		dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
1170 	} else if (status) {
1171 		dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
1172 			status);
1173 		return status;
1174 	}
1175 
1176 	return 0;
1177 }
1178 
1179 /**
1180  * ice_fill_txq_meta - Prepare the Tx queue's meta data
1181  * @vsi: VSI that ring belongs to
1182  * @ring: ring that txq_meta will be based on
1183  * @txq_meta: a helper struct that wraps Tx queue's information
1184  *
1185  * Set up a helper struct that will contain all the necessary fields that
1186  * are needed for stopping Tx queue
1187  */
1188 void
1189 ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
1190 		  struct ice_txq_meta *txq_meta)
1191 {
1192 	struct ice_channel *ch = ring->ch;
1193 	u8 tc;
1194 
1195 	if (IS_ENABLED(CONFIG_DCB))
1196 		tc = ring->dcb_tc;
1197 	else
1198 		tc = 0;
1199 
1200 	txq_meta->q_id = ring->reg_idx;
1201 	txq_meta->q_teid = ring->txq_teid;
1202 	txq_meta->q_handle = ring->q_handle;
1203 	if (ch) {
1204 		txq_meta->vsi_idx = ch->ch_vsi->idx;
1205 		txq_meta->tc = 0;
1206 	} else {
1207 		txq_meta->vsi_idx = vsi->idx;
1208 		txq_meta->tc = tc;
1209 	}
1210 }
1211