xref: /linux/drivers/net/ethernet/intel/ice/ice_base.c (revision 2c63221cd9e5c0dad0424029aeb1c40faada8330)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_base.h"
5 #include "ice_dcb_lib.h"
6 
7 /**
8  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
9  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
10  *
11  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
12  */
13 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
14 {
15 	int offset, i;
16 
17 	mutex_lock(qs_cfg->qs_mutex);
18 	offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
19 					    0, qs_cfg->q_count, 0);
20 	if (offset >= qs_cfg->pf_map_size) {
21 		mutex_unlock(qs_cfg->qs_mutex);
22 		return -ENOMEM;
23 	}
24 
25 	bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
26 	for (i = 0; i < qs_cfg->q_count; i++)
27 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
28 	mutex_unlock(qs_cfg->qs_mutex);
29 
30 	return 0;
31 }
32 
33 /**
34  * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
35  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
36  *
37  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
38  */
39 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
40 {
41 	int i, index = 0;
42 
43 	mutex_lock(qs_cfg->qs_mutex);
44 	for (i = 0; i < qs_cfg->q_count; i++) {
45 		index = find_next_zero_bit(qs_cfg->pf_map,
46 					   qs_cfg->pf_map_size, index);
47 		if (index >= qs_cfg->pf_map_size)
48 			goto err_scatter;
49 		set_bit(index, qs_cfg->pf_map);
50 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
51 	}
52 	mutex_unlock(qs_cfg->qs_mutex);
53 
54 	return 0;
55 err_scatter:
56 	for (index = 0; index < i; index++) {
57 		clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
58 		qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
59 	}
60 	mutex_unlock(qs_cfg->qs_mutex);
61 
62 	return -ENOMEM;
63 }
64 
65 /**
66  * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
67  * @pf: the PF being configured
68  * @pf_q: the PF queue
69  * @ena: enable or disable state of the queue
70  *
71  * This routine will wait for the given Rx queue of the PF to reach the
72  * enabled or disabled state.
73  * Returns -ETIMEDOUT in case of failing to reach the requested state after
74  * multiple retries; else will return 0 in case of success.
75  */
76 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
77 {
78 	int i;
79 
80 	for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
81 		if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
82 			      QRX_CTRL_QENA_STAT_M))
83 			return 0;
84 
85 		usleep_range(20, 40);
86 	}
87 
88 	return -ETIMEDOUT;
89 }
90 
91 /**
92  * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
93  * @vsi: the VSI being configured
94  * @v_idx: index of the vector in the VSI struct
95  *
96  * We allocate one q_vector. If allocation fails we return -ENOMEM.
97  */
98 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
99 {
100 	struct ice_pf *pf = vsi->back;
101 	struct ice_q_vector *q_vector;
102 
103 	/* allocate q_vector */
104 	q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
105 	if (!q_vector)
106 		return -ENOMEM;
107 
108 	q_vector->vsi = vsi;
109 	q_vector->v_idx = v_idx;
110 	if (vsi->type == ICE_VSI_VF)
111 		goto out;
112 	/* only set affinity_mask if the CPU is online */
113 	if (cpu_online(v_idx))
114 		cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
115 
116 	/* This will not be called in the driver load path because the netdev
117 	 * will not be created yet. All other cases with register the NAPI
118 	 * handler here (i.e. resume, reset/rebuild, etc.)
119 	 */
120 	if (vsi->netdev)
121 		netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
122 			       NAPI_POLL_WEIGHT);
123 
124 out:
125 	/* tie q_vector and VSI together */
126 	vsi->q_vectors[v_idx] = q_vector;
127 
128 	return 0;
129 }
130 
131 /**
132  * ice_free_q_vector - Free memory allocated for a specific interrupt vector
133  * @vsi: VSI having the memory freed
134  * @v_idx: index of the vector to be freed
135  */
136 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
137 {
138 	struct ice_q_vector *q_vector;
139 	struct ice_pf *pf = vsi->back;
140 	struct ice_ring *ring;
141 
142 	if (!vsi->q_vectors[v_idx]) {
143 		dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
144 			v_idx);
145 		return;
146 	}
147 	q_vector = vsi->q_vectors[v_idx];
148 
149 	ice_for_each_ring(ring, q_vector->tx)
150 		ring->q_vector = NULL;
151 	ice_for_each_ring(ring, q_vector->rx)
152 		ring->q_vector = NULL;
153 
154 	/* only VSI with an associated netdev is set up with NAPI */
155 	if (vsi->netdev)
156 		netif_napi_del(&q_vector->napi);
157 
158 	devm_kfree(&pf->pdev->dev, q_vector);
159 	vsi->q_vectors[v_idx] = NULL;
160 }
161 
162 /**
163  * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
164  * @hw: board specific structure
165  */
166 static void ice_cfg_itr_gran(struct ice_hw *hw)
167 {
168 	u32 regval = rd32(hw, GLINT_CTL);
169 
170 	/* no need to update global register if ITR gran is already set */
171 	if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
172 	    (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
173 	     GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
174 	    (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
175 	     GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
176 	    (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
177 	     GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
178 	    (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
179 	      GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
180 		return;
181 
182 	regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
183 		  GLINT_CTL_ITR_GRAN_200_M) |
184 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
185 		  GLINT_CTL_ITR_GRAN_100_M) |
186 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
187 		  GLINT_CTL_ITR_GRAN_50_M) |
188 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
189 		  GLINT_CTL_ITR_GRAN_25_M);
190 	wr32(hw, GLINT_CTL, regval);
191 }
192 
193 /**
194  * ice_calc_q_handle - calculate the queue handle
195  * @vsi: VSI that ring belongs to
196  * @ring: ring to get the absolute queue index
197  * @tc: traffic class number
198  */
199 static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
200 {
201 	WARN_ONCE(ice_ring_is_xdp(ring) && tc,
202 		  "XDP ring can't belong to TC other than 0");
203 
204 	/* Idea here for calculation is that we subtract the number of queue
205 	 * count from TC that ring belongs to from it's absolute queue index
206 	 * and as a result we get the queue's index within TC.
207 	 */
208 	return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
209 }
210 
211 /**
212  * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
213  * @ring: The Tx ring to configure
214  * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
215  * @pf_q: queue index in the PF space
216  *
217  * Configure the Tx descriptor ring in TLAN context.
218  */
219 static void
220 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
221 {
222 	struct ice_vsi *vsi = ring->vsi;
223 	struct ice_hw *hw = &vsi->back->hw;
224 
225 	tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
226 
227 	tlan_ctx->port_num = vsi->port_info->lport;
228 
229 	/* Transmit Queue Length */
230 	tlan_ctx->qlen = ring->count;
231 
232 	ice_set_cgd_num(tlan_ctx, ring);
233 
234 	/* PF number */
235 	tlan_ctx->pf_num = hw->pf_id;
236 
237 	/* queue belongs to a specific VSI type
238 	 * VF / VM index should be programmed per vmvf_type setting:
239 	 * for vmvf_type = VF, it is VF number between 0-256
240 	 * for vmvf_type = VM, it is VM number between 0-767
241 	 * for PF or EMP this field should be set to zero
242 	 */
243 	switch (vsi->type) {
244 	case ICE_VSI_LB:
245 		/* fall through */
246 	case ICE_VSI_PF:
247 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
248 		break;
249 	case ICE_VSI_VF:
250 		/* Firmware expects vmvf_num to be absolute VF ID */
251 		tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
252 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
253 		break;
254 	default:
255 		return;
256 	}
257 
258 	/* make sure the context is associated with the right VSI */
259 	tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
260 
261 	tlan_ctx->tso_ena = ICE_TX_LEGACY;
262 	tlan_ctx->tso_qnum = pf_q;
263 
264 	/* Legacy or Advanced Host Interface:
265 	 * 0: Advanced Host Interface
266 	 * 1: Legacy Host Interface
267 	 */
268 	tlan_ctx->legacy_int = ICE_TX_LEGACY;
269 }
270 
271 /**
272  * ice_setup_rx_ctx - Configure a receive ring context
273  * @ring: The Rx ring to configure
274  *
275  * Configure the Rx descriptor ring in RLAN context.
276  */
277 int ice_setup_rx_ctx(struct ice_ring *ring)
278 {
279 	int chain_len = ICE_MAX_CHAINED_RX_BUFS;
280 	struct ice_vsi *vsi = ring->vsi;
281 	u32 rxdid = ICE_RXDID_FLEX_NIC;
282 	struct ice_rlan_ctx rlan_ctx;
283 	struct ice_hw *hw;
284 	u32 regval;
285 	u16 pf_q;
286 	int err;
287 
288 	hw = &vsi->back->hw;
289 
290 	/* what is Rx queue number in global space of 2K Rx queues */
291 	pf_q = vsi->rxq_map[ring->q_index];
292 
293 	/* clear the context structure first */
294 	memset(&rlan_ctx, 0, sizeof(rlan_ctx));
295 
296 	ring->rx_buf_len = vsi->rx_buf_len;
297 
298 	if (ring->vsi->type == ICE_VSI_PF) {
299 		if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
300 			xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
301 					 ring->q_index);
302 
303 		ring->xsk_umem = ice_xsk_umem(ring);
304 		if (ring->xsk_umem) {
305 			xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
306 
307 			ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
308 					   XDP_PACKET_HEADROOM;
309 			/* For AF_XDP ZC, we disallow packets to span on
310 			 * multiple buffers, thus letting us skip that
311 			 * handling in the fast-path.
312 			 */
313 			chain_len = 1;
314 			ring->zca.free = ice_zca_free;
315 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
316 							 MEM_TYPE_ZERO_COPY,
317 							 &ring->zca);
318 			if (err)
319 				return err;
320 
321 			dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
322 				 ring->q_index);
323 		} else {
324 			if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
325 				xdp_rxq_info_reg(&ring->xdp_rxq,
326 						 ring->netdev,
327 						 ring->q_index);
328 
329 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
330 							 MEM_TYPE_PAGE_SHARED,
331 							 NULL);
332 			if (err)
333 				return err;
334 		}
335 	}
336 	/* Receive Queue Base Address.
337 	 * Indicates the starting address of the descriptor queue defined in
338 	 * 128 Byte units.
339 	 */
340 	rlan_ctx.base = ring->dma >> 7;
341 
342 	rlan_ctx.qlen = ring->count;
343 
344 	/* Receive Packet Data Buffer Size.
345 	 * The Packet Data Buffer Size is defined in 128 byte units.
346 	 */
347 	rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
348 
349 	/* use 32 byte descriptors */
350 	rlan_ctx.dsize = 1;
351 
352 	/* Strip the Ethernet CRC bytes before the packet is posted to host
353 	 * memory.
354 	 */
355 	rlan_ctx.crcstrip = 1;
356 
357 	/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
358 	rlan_ctx.l2tsel = 1;
359 
360 	rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
361 	rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
362 	rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
363 
364 	/* This controls whether VLAN is stripped from inner headers
365 	 * The VLAN in the inner L2 header is stripped to the receive
366 	 * descriptor if enabled by this flag.
367 	 */
368 	rlan_ctx.showiv = 0;
369 
370 	/* Max packet size for this queue - must not be set to a larger value
371 	 * than 5 x DBUF
372 	 */
373 	rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
374 			       chain_len * ring->rx_buf_len);
375 
376 	/* Rx queue threshold in units of 64 */
377 	rlan_ctx.lrxqthresh = 1;
378 
379 	 /* Enable Flexible Descriptors in the queue context which
380 	  * allows this driver to select a specific receive descriptor format
381 	  */
382 	if (vsi->type != ICE_VSI_VF) {
383 		regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
384 		regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
385 			QRXFLXP_CNTXT_RXDID_IDX_M;
386 
387 		/* increasing context priority to pick up profile ID;
388 		 * default is 0x01; setting to 0x03 to ensure profile
389 		 * is programming if prev context is of same priority
390 		 */
391 		regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
392 			QRXFLXP_CNTXT_RXDID_PRIO_M;
393 
394 		wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
395 	}
396 
397 	/* Absolute queue number out of 2K needs to be passed */
398 	err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
399 	if (err) {
400 		dev_err(&vsi->back->pdev->dev,
401 			"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
402 			pf_q, err);
403 		return -EIO;
404 	}
405 
406 	if (vsi->type == ICE_VSI_VF)
407 		return 0;
408 
409 	/* configure Rx buffer alignment */
410 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
411 		ice_clear_ring_build_skb_ena(ring);
412 	else
413 		ice_set_ring_build_skb_ena(ring);
414 
415 	/* init queue specific tail register */
416 	ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
417 	writel(0, ring->tail);
418 
419 	err = ring->xsk_umem ?
420 	      ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
421 	      ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
422 	if (err)
423 		dev_info(&vsi->back->pdev->dev,
424 			 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
425 			 ring->xsk_umem ? "UMEM enabled " : "",
426 			 ring->q_index, pf_q);
427 
428 	return 0;
429 }
430 
431 /**
432  * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
433  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
434  *
435  * This function first tries to find contiguous space. If it is not successful,
436  * it tries with the scatter approach.
437  *
438  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
439  */
440 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
441 {
442 	int ret = 0;
443 
444 	ret = __ice_vsi_get_qs_contig(qs_cfg);
445 	if (ret) {
446 		/* contig failed, so try with scatter approach */
447 		qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
448 		qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
449 					qs_cfg->scatter_count);
450 		ret = __ice_vsi_get_qs_sc(qs_cfg);
451 	}
452 	return ret;
453 }
454 
455 /**
456  * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
457  * @vsi: the VSI being configured
458  * @ena: start or stop the Rx rings
459  * @rxq_idx: Rx queue index
460  */
461 int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
462 {
463 	int pf_q = vsi->rxq_map[rxq_idx];
464 	struct ice_pf *pf = vsi->back;
465 	struct ice_hw *hw = &pf->hw;
466 	int ret = 0;
467 	u32 rx_reg;
468 
469 	rx_reg = rd32(hw, QRX_CTRL(pf_q));
470 
471 	/* Skip if the queue is already in the requested state */
472 	if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
473 		return 0;
474 
475 	/* turn on/off the queue */
476 	if (ena)
477 		rx_reg |= QRX_CTRL_QENA_REQ_M;
478 	else
479 		rx_reg &= ~QRX_CTRL_QENA_REQ_M;
480 	wr32(hw, QRX_CTRL(pf_q), rx_reg);
481 
482 	/* wait for the change to finish */
483 	ret = ice_pf_rxq_wait(pf, pf_q, ena);
484 	if (ret)
485 		dev_err(&pf->pdev->dev,
486 			"VSI idx %d Rx ring %d %sable timeout\n",
487 			vsi->idx, pf_q, (ena ? "en" : "dis"));
488 
489 	return ret;
490 }
491 
492 /**
493  * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
494  * @vsi: the VSI being configured
495  *
496  * We allocate one q_vector per queue interrupt. If allocation fails we
497  * return -ENOMEM.
498  */
499 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
500 {
501 	struct ice_pf *pf = vsi->back;
502 	int v_idx = 0, num_q_vectors;
503 	int err;
504 
505 	if (vsi->q_vectors[0]) {
506 		dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
507 			vsi->vsi_num);
508 		return -EEXIST;
509 	}
510 
511 	num_q_vectors = vsi->num_q_vectors;
512 
513 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
514 		err = ice_vsi_alloc_q_vector(vsi, v_idx);
515 		if (err)
516 			goto err_out;
517 	}
518 
519 	return 0;
520 
521 err_out:
522 	while (v_idx--)
523 		ice_free_q_vector(vsi, v_idx);
524 
525 	dev_err(&pf->pdev->dev,
526 		"Failed to allocate %d q_vector for VSI %d, ret=%d\n",
527 		vsi->num_q_vectors, vsi->vsi_num, err);
528 	vsi->num_q_vectors = 0;
529 	return err;
530 }
531 
532 /**
533  * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
534  * @vsi: the VSI being configured
535  *
536  * This function maps descriptor rings to the queue-specific vectors allotted
537  * through the MSI-X enabling code. On a constrained vector budget, we map Tx
538  * and Rx rings to the vector as "efficiently" as possible.
539  */
540 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
541 {
542 	int q_vectors = vsi->num_q_vectors;
543 	int tx_rings_rem, rx_rings_rem;
544 	int v_id;
545 
546 	/* initially assigning remaining rings count to VSIs num queue value */
547 	tx_rings_rem = vsi->num_txq;
548 	rx_rings_rem = vsi->num_rxq;
549 
550 	for (v_id = 0; v_id < q_vectors; v_id++) {
551 		struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
552 		int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
553 
554 		/* Tx rings mapping to vector */
555 		tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
556 		q_vector->num_ring_tx = tx_rings_per_v;
557 		q_vector->tx.ring = NULL;
558 		q_vector->tx.itr_idx = ICE_TX_ITR;
559 		q_base = vsi->num_txq - tx_rings_rem;
560 
561 		for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
562 			struct ice_ring *tx_ring = vsi->tx_rings[q_id];
563 
564 			tx_ring->q_vector = q_vector;
565 			tx_ring->next = q_vector->tx.ring;
566 			q_vector->tx.ring = tx_ring;
567 		}
568 		tx_rings_rem -= tx_rings_per_v;
569 
570 		/* Rx rings mapping to vector */
571 		rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
572 		q_vector->num_ring_rx = rx_rings_per_v;
573 		q_vector->rx.ring = NULL;
574 		q_vector->rx.itr_idx = ICE_RX_ITR;
575 		q_base = vsi->num_rxq - rx_rings_rem;
576 
577 		for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
578 			struct ice_ring *rx_ring = vsi->rx_rings[q_id];
579 
580 			rx_ring->q_vector = q_vector;
581 			rx_ring->next = q_vector->rx.ring;
582 			q_vector->rx.ring = rx_ring;
583 		}
584 		rx_rings_rem -= rx_rings_per_v;
585 	}
586 }
587 
588 /**
589  * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
590  * @vsi: the VSI having memory freed
591  */
592 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
593 {
594 	int v_idx;
595 
596 	ice_for_each_q_vector(vsi, v_idx)
597 		ice_free_q_vector(vsi, v_idx);
598 }
599 
600 /**
601  * ice_vsi_cfg_txq - Configure single Tx queue
602  * @vsi: the VSI that queue belongs to
603  * @ring: Tx ring to be configured
604  * @qg_buf: queue group buffer
605  */
606 int
607 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
608 		struct ice_aqc_add_tx_qgrp *qg_buf)
609 {
610 	struct ice_tlan_ctx tlan_ctx = { 0 };
611 	struct ice_aqc_add_txqs_perq *txq;
612 	struct ice_pf *pf = vsi->back;
613 	u8 buf_len = sizeof(*qg_buf);
614 	enum ice_status status;
615 	u16 pf_q;
616 	u8 tc;
617 
618 	pf_q = ring->reg_idx;
619 	ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
620 	/* copy context contents into the qg_buf */
621 	qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
622 	ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
623 		    ice_tlan_ctx_info);
624 
625 	/* init queue specific tail reg. It is referred as
626 	 * transmit comm scheduler queue doorbell.
627 	 */
628 	ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
629 
630 	if (IS_ENABLED(CONFIG_DCB))
631 		tc = ring->dcb_tc;
632 	else
633 		tc = 0;
634 
635 	/* Add unique software queue handle of the Tx queue per
636 	 * TC into the VSI Tx ring
637 	 */
638 	ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
639 
640 	status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
641 				 1, qg_buf, buf_len, NULL);
642 	if (status) {
643 		dev_err(&pf->pdev->dev,
644 			"Failed to set LAN Tx queue context, error: %d\n",
645 			status);
646 		return -ENODEV;
647 	}
648 
649 	/* Add Tx Queue TEID into the VSI Tx ring from the
650 	 * response. This will complete configuring and
651 	 * enabling the queue.
652 	 */
653 	txq = &qg_buf->txqs[0];
654 	if (pf_q == le16_to_cpu(txq->txq_id))
655 		ring->txq_teid = le32_to_cpu(txq->q_teid);
656 
657 	return 0;
658 }
659 
660 /**
661  * ice_cfg_itr - configure the initial interrupt throttle values
662  * @hw: pointer to the HW structure
663  * @q_vector: interrupt vector that's being configured
664  *
665  * Configure interrupt throttling values for the ring containers that are
666  * associated with the interrupt vector passed in.
667  */
668 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
669 {
670 	ice_cfg_itr_gran(hw);
671 
672 	if (q_vector->num_ring_rx) {
673 		struct ice_ring_container *rc = &q_vector->rx;
674 
675 		/* if this value is set then don't overwrite with default */
676 		if (!rc->itr_setting)
677 			rc->itr_setting = ICE_DFLT_RX_ITR;
678 
679 		rc->target_itr = ITR_TO_REG(rc->itr_setting);
680 		rc->next_update = jiffies + 1;
681 		rc->current_itr = rc->target_itr;
682 		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
683 		     ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
684 	}
685 
686 	if (q_vector->num_ring_tx) {
687 		struct ice_ring_container *rc = &q_vector->tx;
688 
689 		/* if this value is set then don't overwrite with default */
690 		if (!rc->itr_setting)
691 			rc->itr_setting = ICE_DFLT_TX_ITR;
692 
693 		rc->target_itr = ITR_TO_REG(rc->itr_setting);
694 		rc->next_update = jiffies + 1;
695 		rc->current_itr = rc->target_itr;
696 		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
697 		     ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
698 	}
699 }
700 
701 /**
702  * ice_cfg_txq_interrupt - configure interrupt on Tx queue
703  * @vsi: the VSI being configured
704  * @txq: Tx queue being mapped to MSI-X vector
705  * @msix_idx: MSI-X vector index within the function
706  * @itr_idx: ITR index of the interrupt cause
707  *
708  * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
709  * within the function space.
710  */
711 void
712 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
713 {
714 	struct ice_pf *pf = vsi->back;
715 	struct ice_hw *hw = &pf->hw;
716 	u32 val;
717 
718 	itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
719 
720 	val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
721 	      ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
722 
723 	wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
724 	if (ice_is_xdp_ena_vsi(vsi)) {
725 		u32 xdp_txq = txq + vsi->num_xdp_txq;
726 
727 		wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
728 		     val);
729 	}
730 	ice_flush(hw);
731 }
732 
733 /**
734  * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
735  * @vsi: the VSI being configured
736  * @rxq: Rx queue being mapped to MSI-X vector
737  * @msix_idx: MSI-X vector index within the function
738  * @itr_idx: ITR index of the interrupt cause
739  *
740  * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
741  * within the function space.
742  */
743 void
744 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
745 {
746 	struct ice_pf *pf = vsi->back;
747 	struct ice_hw *hw = &pf->hw;
748 	u32 val;
749 
750 	itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
751 
752 	val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
753 	      ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
754 
755 	wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
756 
757 	ice_flush(hw);
758 }
759 
760 /**
761  * ice_trigger_sw_intr - trigger a software interrupt
762  * @hw: pointer to the HW structure
763  * @q_vector: interrupt vector to trigger the software interrupt for
764  */
765 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
766 {
767 	wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
768 	     (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
769 	     GLINT_DYN_CTL_SWINT_TRIG_M |
770 	     GLINT_DYN_CTL_INTENA_M);
771 }
772 
773 /**
774  * ice_vsi_stop_tx_ring - Disable single Tx ring
775  * @vsi: the VSI being configured
776  * @rst_src: reset source
777  * @rel_vmvf_num: Relative ID of VF/VM
778  * @ring: Tx ring to be stopped
779  * @txq_meta: Meta data of Tx ring to be stopped
780  */
781 int
782 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
783 		     u16 rel_vmvf_num, struct ice_ring *ring,
784 		     struct ice_txq_meta *txq_meta)
785 {
786 	struct ice_pf *pf = vsi->back;
787 	struct ice_q_vector *q_vector;
788 	struct ice_hw *hw = &pf->hw;
789 	enum ice_status status;
790 	u32 val;
791 
792 	/* clear cause_ena bit for disabled queues */
793 	val = rd32(hw, QINT_TQCTL(ring->reg_idx));
794 	val &= ~QINT_TQCTL_CAUSE_ENA_M;
795 	wr32(hw, QINT_TQCTL(ring->reg_idx), val);
796 
797 	/* software is expected to wait for 100 ns */
798 	ndelay(100);
799 
800 	/* trigger a software interrupt for the vector
801 	 * associated to the queue to schedule NAPI handler
802 	 */
803 	q_vector = ring->q_vector;
804 	if (q_vector)
805 		ice_trigger_sw_intr(hw, q_vector);
806 
807 	status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
808 				 txq_meta->tc, 1, &txq_meta->q_handle,
809 				 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
810 				 rel_vmvf_num, NULL);
811 
812 	/* if the disable queue command was exercised during an
813 	 * active reset flow, ICE_ERR_RESET_ONGOING is returned.
814 	 * This is not an error as the reset operation disables
815 	 * queues at the hardware level anyway.
816 	 */
817 	if (status == ICE_ERR_RESET_ONGOING) {
818 		dev_dbg(&vsi->back->pdev->dev,
819 			"Reset in progress. LAN Tx queues already disabled\n");
820 	} else if (status == ICE_ERR_DOES_NOT_EXIST) {
821 		dev_dbg(&vsi->back->pdev->dev,
822 			"LAN Tx queues do not exist, nothing to disable\n");
823 	} else if (status) {
824 		dev_err(&vsi->back->pdev->dev,
825 			"Failed to disable LAN Tx queues, error: %d\n", status);
826 		return -ENODEV;
827 	}
828 
829 	return 0;
830 }
831 
832 /**
833  * ice_fill_txq_meta - Prepare the Tx queue's meta data
834  * @vsi: VSI that ring belongs to
835  * @ring: ring that txq_meta will be based on
836  * @txq_meta: a helper struct that wraps Tx queue's information
837  *
838  * Set up a helper struct that will contain all the necessary fields that
839  * are needed for stopping Tx queue
840  */
841 void
842 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
843 		  struct ice_txq_meta *txq_meta)
844 {
845 	u8 tc;
846 
847 	if (IS_ENABLED(CONFIG_DCB))
848 		tc = ring->dcb_tc;
849 	else
850 		tc = 0;
851 
852 	txq_meta->q_id = ring->reg_idx;
853 	txq_meta->q_teid = ring->txq_teid;
854 	txq_meta->q_handle = ring->q_handle;
855 	txq_meta->vsi_idx = vsi->idx;
856 	txq_meta->tc = tc;
857 }
858