xref: /linux/drivers/net/ethernet/intel/ice/virt/queues.c (revision 7a7c52645ce62314cdd69815e9d8fcb33e0042d5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "virtchnl.h"
5 #include "queues.h"
6 #include "ice_vf_lib_private.h"
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_lib.h"
10 
11 /**
12  * ice_vc_get_max_frame_size - get max frame size allowed for VF
13  * @vf: VF used to determine max frame size
14  *
15  * Max frame size is determined based on the current port's max frame size and
16  * whether a port VLAN is configured on this VF. The VF is not aware whether
17  * it's in a port VLAN so the PF needs to account for this in max frame size
18  * checks and sending the max frame size to the VF.
19  */
20 u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
21 {
22 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
23 	u16 max_frame_size;
24 
25 	max_frame_size = pi->phy.link_info.max_frame_size;
26 
27 	if (ice_vf_is_port_vlan_ena(vf))
28 		max_frame_size -= VLAN_HLEN;
29 
30 	return max_frame_size;
31 }
32 
33 /**
34  * ice_vc_isvalid_q_id
35  * @vsi: VSI to check queue ID against
36  * @qid: VSI relative queue ID
37  *
38  * check for the valid queue ID
39  */
40 static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
41 {
42 	/* allocated Tx and Rx queues should be always equal for VF VSI */
43 	return qid < vsi->alloc_txq;
44 }
45 
46 /**
47  * ice_vc_isvalid_ring_len
48  * @ring_len: length of ring
49  *
50  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
51  * or zero
52  */
53 static bool ice_vc_isvalid_ring_len(u16 ring_len)
54 {
55 	return ring_len == 0 ||
56 	       (ring_len >= ICE_MIN_NUM_DESC &&
57 		ring_len <= ICE_MAX_NUM_DESC &&
58 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
59 }
60 
61 /**
62  * ice_vf_cfg_qs_bw - Configure per queue bandwidth
63  * @vf: pointer to the VF info
64  * @num_queues: number of queues to be configured
65  *
66  * Configure per queue bandwidth.
67  *
68  * Return: 0 on success or negative error value.
69  */
70 static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
71 {
72 	struct ice_hw *hw = &vf->pf->hw;
73 	struct ice_vsi *vsi;
74 	int ret;
75 	u16 i;
76 
77 	vsi = ice_get_vf_vsi(vf);
78 	if (!vsi)
79 		return -EINVAL;
80 
81 	for (i = 0; i < num_queues; i++) {
82 		u32 p_rate, min_rate;
83 		u8 tc;
84 
85 		p_rate = vf->qs_bw[i].peak;
86 		min_rate = vf->qs_bw[i].committed;
87 		tc = vf->qs_bw[i].tc;
88 		if (p_rate)
89 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
90 					       vf->qs_bw[i].queue_id,
91 					       ICE_MAX_BW, p_rate);
92 		else
93 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
94 						    vf->qs_bw[i].queue_id,
95 						    ICE_MAX_BW);
96 		if (ret)
97 			return ret;
98 
99 		if (min_rate)
100 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
101 					       vf->qs_bw[i].queue_id,
102 					       ICE_MIN_BW, min_rate);
103 		else
104 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
105 						    vf->qs_bw[i].queue_id,
106 						    ICE_MIN_BW);
107 
108 		if (ret)
109 			return ret;
110 	}
111 
112 	return 0;
113 }
114 
115 /**
116  * ice_vf_cfg_q_quanta_profile - Configure quanta profile
117  * @vf: pointer to the VF info
118  * @quanta_prof_idx: pointer to the quanta profile index
119  * @quanta_size: quanta size to be set
120  *
121  * This function chooses available quanta profile and configures the register.
122  * The quanta profile is evenly divided by the number of device ports, and then
123  * available to the specific PF and VFs. The first profile for each PF is a
124  * reserved default profile. Only quanta size of the rest unused profile can be
125  * modified.
126  *
127  * Return: 0 on success or negative error value.
128  */
129 static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
130 				       u16 *quanta_prof_idx)
131 {
132 	const u16 n_desc = calc_quanta_desc(quanta_size);
133 	struct ice_hw *hw = &vf->pf->hw;
134 	const u16 n_cmd = 2 * n_desc;
135 	struct ice_pf *pf = vf->pf;
136 	u16 per_pf, begin_id;
137 	u8 n_used;
138 	u32 reg;
139 
140 	begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
141 		   hw->logical_pf_id;
142 
143 	if (quanta_size == ICE_DFLT_QUANTA) {
144 		*quanta_prof_idx = begin_id;
145 	} else {
146 		per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
147 			 hw->dev_caps.num_funcs;
148 		n_used = pf->num_quanta_prof_used;
149 		if (n_used < per_pf) {
150 			*quanta_prof_idx = begin_id + 1 + n_used;
151 			pf->num_quanta_prof_used++;
152 		} else {
153 			return -EINVAL;
154 		}
155 	}
156 
157 	reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
158 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
159 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
160 	wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
161 
162 	return 0;
163 }
164 
165 /**
166  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
167  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
168  *
169  * Return true on successful validation, else false
170  */
171 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
172 {
173 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
174 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
175 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
176 		return false;
177 
178 	return true;
179 }
180 
181 /**
182  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
183  * @vsi: VSI of the VF to configure
184  * @q_idx: VF queue index used to determine the queue in the PF's space
185  */
186 void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
187 {
188 	struct ice_hw *hw = &vsi->back->hw;
189 	u32 pfq = vsi->txq_map[q_idx];
190 	u32 reg;
191 
192 	reg = rd32(hw, QINT_TQCTL(pfq));
193 
194 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
195 	 * this is most likely a poll mode VF driver, so don't enable an
196 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
197 	 */
198 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
199 		return;
200 
201 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
202 }
203 
204 /**
205  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
206  * @vsi: VSI of the VF to configure
207  * @q_idx: VF queue index used to determine the queue in the PF's space
208  */
209 void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
210 {
211 	struct ice_hw *hw = &vsi->back->hw;
212 	u32 pfq = vsi->rxq_map[q_idx];
213 	u32 reg;
214 
215 	reg = rd32(hw, QINT_RQCTL(pfq));
216 
217 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
218 	 * this is most likely a poll mode VF driver, so don't enable an
219 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
220 	 */
221 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
222 		return;
223 
224 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
225 }
226 
227 /**
228  * ice_vc_ena_qs_msg
229  * @vf: pointer to the VF info
230  * @msg: pointer to the msg buffer
231  *
232  * called from the VF to enable all or specific queue(s)
233  */
234 int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
235 {
236 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
237 	struct virtchnl_queue_select *vqs =
238 	    (struct virtchnl_queue_select *)msg;
239 	struct ice_vsi *vsi;
240 	unsigned long q_map;
241 	u16 vf_q_id;
242 
243 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
244 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
245 		goto error_param;
246 	}
247 
248 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
249 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
250 		goto error_param;
251 	}
252 
253 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
254 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
255 		goto error_param;
256 	}
257 
258 	vsi = ice_get_vf_vsi(vf);
259 	if (!vsi) {
260 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
261 		goto error_param;
262 	}
263 
264 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
265 	 * Tx queue group list was configured and the context bits were
266 	 * programmed using ice_vsi_cfg_txqs
267 	 */
268 	q_map = vqs->rx_queues;
269 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
270 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
271 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
272 			goto error_param;
273 		}
274 
275 		/* Skip queue if enabled */
276 		if (test_bit(vf_q_id, vf->rxq_ena))
277 			continue;
278 
279 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
280 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
281 				vf_q_id, vsi->vsi_num);
282 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
283 			goto error_param;
284 		}
285 
286 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
287 		set_bit(vf_q_id, vf->rxq_ena);
288 	}
289 
290 	q_map = vqs->tx_queues;
291 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
292 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
293 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
294 			goto error_param;
295 		}
296 
297 		/* Skip queue if enabled */
298 		if (test_bit(vf_q_id, vf->txq_ena))
299 			continue;
300 
301 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
302 		set_bit(vf_q_id, vf->txq_ena);
303 	}
304 
305 	/* Set flag to indicate that queues are enabled */
306 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
307 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
308 
309 error_param:
310 	/* send the response to the VF */
311 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
312 				     NULL, 0);
313 }
314 
315 /**
316  * ice_vf_vsi_dis_single_txq - disable a single Tx queue
317  * @vf: VF to disable queue for
318  * @vsi: VSI for the VF
319  * @q_id: VF relative (0-based) queue ID
320  *
321  * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
322  * disabled then clear q_id bit in the enabled queues bitmap and return
323  * success. Otherwise return error.
324  */
325 int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
326 {
327 	struct ice_txq_meta txq_meta = { 0 };
328 	struct ice_tx_ring *ring;
329 	int err;
330 
331 	if (!test_bit(q_id, vf->txq_ena))
332 		dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
333 			q_id, vsi->vsi_num);
334 
335 	ring = vsi->tx_rings[q_id];
336 	if (!ring)
337 		return -EINVAL;
338 
339 	ice_fill_txq_meta(vsi, ring, &txq_meta);
340 
341 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
342 	if (err) {
343 		dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
344 			q_id, vsi->vsi_num);
345 		return err;
346 	}
347 
348 	/* Clear enabled queues flag */
349 	clear_bit(q_id, vf->txq_ena);
350 
351 	return 0;
352 }
353 
354 /**
355  * ice_vc_dis_qs_msg
356  * @vf: pointer to the VF info
357  * @msg: pointer to the msg buffer
358  *
359  * called from the VF to disable all or specific queue(s)
360  */
361 int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
362 {
363 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
364 	struct virtchnl_queue_select *vqs =
365 	    (struct virtchnl_queue_select *)msg;
366 	struct ice_vsi *vsi;
367 	unsigned long q_map;
368 	u16 vf_q_id;
369 
370 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
371 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
372 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
373 		goto error_param;
374 	}
375 
376 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
377 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
378 		goto error_param;
379 	}
380 
381 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
382 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
383 		goto error_param;
384 	}
385 
386 	vsi = ice_get_vf_vsi(vf);
387 	if (!vsi) {
388 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
389 		goto error_param;
390 	}
391 
392 	if (vqs->tx_queues) {
393 		q_map = vqs->tx_queues;
394 
395 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
396 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
397 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
398 				goto error_param;
399 			}
400 
401 			if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
402 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
403 				goto error_param;
404 			}
405 		}
406 	}
407 
408 	q_map = vqs->rx_queues;
409 	/* speed up Rx queue disable by batching them if possible */
410 	if (q_map &&
411 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
412 		if (ice_vsi_stop_all_rx_rings(vsi)) {
413 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
414 				vsi->vsi_num);
415 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
416 			goto error_param;
417 		}
418 
419 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
420 	} else if (q_map) {
421 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
422 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
423 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
424 				goto error_param;
425 			}
426 
427 			/* Skip queue if not enabled */
428 			if (!test_bit(vf_q_id, vf->rxq_ena))
429 				continue;
430 
431 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
432 						     true)) {
433 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
434 					vf_q_id, vsi->vsi_num);
435 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
436 				goto error_param;
437 			}
438 
439 			/* Clear enabled queues flag */
440 			clear_bit(vf_q_id, vf->rxq_ena);
441 		}
442 	}
443 
444 	/* Clear enabled queues flag */
445 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
446 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
447 
448 error_param:
449 	/* send the response to the VF */
450 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
451 				     NULL, 0);
452 }
453 
454 /**
455  * ice_cfg_interrupt
456  * @vf: pointer to the VF info
457  * @vsi: the VSI being configured
458  * @map: vector map for mapping vectors to queues
459  * @q_vector: structure for interrupt vector
460  * configure the IRQ to queue map
461  */
462 static enum virtchnl_status_code
463 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
464 		  struct virtchnl_vector_map *map,
465 		  struct ice_q_vector *q_vector)
466 {
467 	u16 vsi_q_id, vsi_q_id_idx;
468 	unsigned long qmap;
469 
470 	q_vector->num_ring_rx = 0;
471 	q_vector->num_ring_tx = 0;
472 
473 	qmap = map->rxq_map;
474 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
475 		vsi_q_id = vsi_q_id_idx;
476 
477 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
478 			return VIRTCHNL_STATUS_ERR_PARAM;
479 
480 		q_vector->num_ring_rx++;
481 		q_vector->rx.itr_idx = map->rxitr_idx;
482 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
483 		ice_cfg_rxq_interrupt(vsi, vsi_q_id,
484 				      q_vector->vf_reg_idx,
485 				      q_vector->rx.itr_idx);
486 	}
487 
488 	qmap = map->txq_map;
489 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
490 		vsi_q_id = vsi_q_id_idx;
491 
492 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
493 			return VIRTCHNL_STATUS_ERR_PARAM;
494 
495 		q_vector->num_ring_tx++;
496 		q_vector->tx.itr_idx = map->txitr_idx;
497 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
498 		ice_cfg_txq_interrupt(vsi, vsi_q_id,
499 				      q_vector->vf_reg_idx,
500 				      q_vector->tx.itr_idx);
501 	}
502 
503 	return VIRTCHNL_STATUS_SUCCESS;
504 }
505 
506 /**
507  * ice_vc_cfg_irq_map_msg
508  * @vf: pointer to the VF info
509  * @msg: pointer to the msg buffer
510  *
511  * called from the VF to configure the IRQ to queue map
512  */
513 int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
514 {
515 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
516 	u16 num_q_vectors_mapped, vsi_id, vector_id;
517 	struct virtchnl_irq_map_info *irqmap_info;
518 	struct virtchnl_vector_map *map;
519 	struct ice_vsi *vsi;
520 	int i;
521 
522 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
523 	num_q_vectors_mapped = irqmap_info->num_vectors;
524 
525 	/* Check to make sure number of VF vectors mapped is not greater than
526 	 * number of VF vectors originally allocated, and check that
527 	 * there is actually at least a single VF queue vector mapped
528 	 */
529 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
530 	    vf->num_msix < num_q_vectors_mapped ||
531 	    !num_q_vectors_mapped) {
532 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
533 		goto error_param;
534 	}
535 
536 	vsi = ice_get_vf_vsi(vf);
537 	if (!vsi) {
538 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
539 		goto error_param;
540 	}
541 
542 	for (i = 0; i < num_q_vectors_mapped; i++) {
543 		struct ice_q_vector *q_vector;
544 
545 		map = &irqmap_info->vecmap[i];
546 
547 		vector_id = map->vector_id;
548 		vsi_id = map->vsi_id;
549 		/* vector_id is always 0-based for each VF, and can never be
550 		 * larger than or equal to the max allowed interrupts per VF
551 		 */
552 		if (!(vector_id < vf->num_msix) ||
553 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
554 		    (!vector_id && (map->rxq_map || map->txq_map))) {
555 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
556 			goto error_param;
557 		}
558 
559 		/* No need to map VF miscellaneous or rogue vector */
560 		if (!vector_id)
561 			continue;
562 
563 		/* Subtract non queue vector from vector_id passed by VF
564 		 * to get actual number of VSI queue vector array index
565 		 */
566 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
567 		if (!q_vector) {
568 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
569 			goto error_param;
570 		}
571 
572 		/* lookout for the invalid queue index */
573 		v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
574 		if (v_ret)
575 			goto error_param;
576 	}
577 
578 error_param:
579 	/* send the response to the VF */
580 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
581 				     NULL, 0);
582 }
583 
584 /**
585  * ice_vc_cfg_q_bw - Configure per queue bandwidth
586  * @vf: pointer to the VF info
587  * @msg: pointer to the msg buffer which holds the command descriptor
588  *
589  * Configure VF queues bandwidth.
590  *
591  * Return: 0 on success or negative error value.
592  */
593 int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
594 {
595 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
596 	struct virtchnl_queues_bw_cfg *qbw =
597 		(struct virtchnl_queues_bw_cfg *)msg;
598 	struct ice_vsi *vsi;
599 	u16 i;
600 
601 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
602 	    !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
603 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
604 		goto err;
605 	}
606 
607 	vsi = ice_get_vf_vsi(vf);
608 	if (!vsi) {
609 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
610 		goto err;
611 	}
612 
613 	if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
614 	    qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
615 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
616 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
617 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
618 		goto err;
619 	}
620 
621 	for (i = 0; i < qbw->num_queues; i++) {
622 		if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
623 		    qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
624 			dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
625 				 qbw->cfg[i].queue_id, vf->vf_id,
626 				 vf->max_tx_rate);
627 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
628 			goto err;
629 		}
630 		if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
631 		    qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
632 			dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
633 				 qbw->cfg[i].queue_id, vf->vf_id,
634 				 vf->min_tx_rate);
635 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
636 			goto err;
637 		}
638 		if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
639 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
640 				 vf->vf_id);
641 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
642 			goto err;
643 		}
644 		if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
645 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
646 				 vf->vf_id);
647 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
648 			goto err;
649 		}
650 	}
651 
652 	for (i = 0; i < qbw->num_queues; i++) {
653 		vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
654 		vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
655 		vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
656 		vf->qs_bw[i].tc = qbw->cfg[i].tc;
657 	}
658 
659 	if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
660 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
661 
662 err:
663 	/* send the response to the VF */
664 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
665 				    v_ret, NULL, 0);
666 }
667 
668 /**
669  * ice_vc_cfg_q_quanta - Configure per queue quanta
670  * @vf: pointer to the VF info
671  * @msg: pointer to the msg buffer which holds the command descriptor
672  *
673  * Configure VF queues quanta.
674  *
675  * Return: 0 on success or negative error value.
676  */
677 int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
678 {
679 	u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
680 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
681 	struct virtchnl_quanta_cfg *qquanta =
682 		(struct virtchnl_quanta_cfg *)msg;
683 	struct ice_vsi *vsi;
684 	int ret;
685 
686 	start_qid = qquanta->queue_select.start_queue_id;
687 	num_queues = qquanta->queue_select.num_queues;
688 
689 	if (check_add_overflow(start_qid, num_queues, &end_qid)) {
690 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
691 		goto err;
692 	}
693 
694 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
695 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
696 		goto err;
697 	}
698 
699 	vsi = ice_get_vf_vsi(vf);
700 	if (!vsi) {
701 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
702 		goto err;
703 	}
704 
705 	if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
706 	    end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
707 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
708 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
709 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
710 		goto err;
711 	}
712 
713 	quanta_size = qquanta->quanta_size;
714 	if (quanta_size > ICE_MAX_QUANTA_SIZE ||
715 	    quanta_size < ICE_MIN_QUANTA_SIZE) {
716 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
717 		goto err;
718 	}
719 
720 	if (quanta_size % 64) {
721 		dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
722 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
723 		goto err;
724 	}
725 
726 	ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
727 					  &quanta_prof_id);
728 	if (ret) {
729 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
730 		goto err;
731 	}
732 
733 	for (i = start_qid; i < end_qid; i++)
734 		vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
735 
736 err:
737 	/* send the response to the VF */
738 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
739 				     v_ret, NULL, 0);
740 }
741 
742 /**
743  * ice_vc_cfg_qs_msg
744  * @vf: pointer to the VF info
745  * @msg: pointer to the msg buffer
746  *
747  * called from the VF to configure the Rx/Tx queues
748  */
749 int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
750 {
751 	struct virtchnl_vsi_queue_config_info *qci =
752 	    (struct virtchnl_vsi_queue_config_info *)msg;
753 	struct virtchnl_queue_pair_info *qpi;
754 	struct ice_pf *pf = vf->pf;
755 	struct ice_vsi *vsi;
756 	int i = -1, q_idx;
757 	bool ena_ts;
758 	u8 act_prt;
759 
760 	mutex_lock(&pf->lag_mutex);
761 	act_prt = ice_lag_prepare_vf_reset(pf->lag);
762 
763 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
764 		goto error_param;
765 
766 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
767 		goto error_param;
768 
769 	vsi = ice_get_vf_vsi(vf);
770 	if (!vsi)
771 		goto error_param;
772 
773 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
774 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
775 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
776 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
777 		goto error_param;
778 	}
779 
780 	for (i = 0; i < qci->num_queue_pairs; i++) {
781 		if (!qci->qpair[i].rxq.crc_disable)
782 			continue;
783 
784 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
785 		    vf->vlan_strip_ena)
786 			goto error_param;
787 	}
788 
789 	for (i = 0; i < qci->num_queue_pairs; i++) {
790 		qpi = &qci->qpair[i];
791 		if (qpi->txq.vsi_id != qci->vsi_id ||
792 		    qpi->rxq.vsi_id != qci->vsi_id ||
793 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
794 		    qpi->txq.headwb_enabled ||
795 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
796 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
797 		    !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
798 			goto error_param;
799 		}
800 
801 		q_idx = qpi->rxq.queue_id;
802 
803 		/* make sure selected "q_idx" is in valid range of queues
804 		 * for selected "vsi"
805 		 */
806 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
807 			goto error_param;
808 		}
809 
810 		/* copy Tx queue info from VF into VSI */
811 		if (qpi->txq.ring_len > 0) {
812 			vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
813 			vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
814 
815 			/* Disable any existing queue first */
816 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
817 				goto error_param;
818 
819 			/* Configure a queue with the requested settings */
820 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
821 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
822 					 vf->vf_id, q_idx);
823 				goto error_param;
824 			}
825 		}
826 
827 		/* copy Rx queue info from VF into VSI */
828 		if (qpi->rxq.ring_len > 0) {
829 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
830 			struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
831 			u32 rxdid;
832 
833 			ring->dma = qpi->rxq.dma_ring_addr;
834 			ring->count = qpi->rxq.ring_len;
835 
836 			if (qpi->rxq.crc_disable)
837 				ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
838 			else
839 				ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
840 
841 			if (qpi->rxq.databuffer_size != 0 &&
842 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
843 			     qpi->rxq.databuffer_size < 1024))
844 				goto error_param;
845 			ring->rx_buf_len = qpi->rxq.databuffer_size;
846 			if (qpi->rxq.max_pkt_size > max_frame_size ||
847 			    qpi->rxq.max_pkt_size < 64)
848 				goto error_param;
849 
850 			ring->max_frame = qpi->rxq.max_pkt_size;
851 			/* add space for the port VLAN since the VF driver is
852 			 * not expected to account for it in the MTU
853 			 * calculation
854 			 */
855 			if (ice_vf_is_port_vlan_ena(vf))
856 				ring->max_frame += VLAN_HLEN;
857 
858 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
859 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
860 					 vf->vf_id, q_idx);
861 				goto error_param;
862 			}
863 
864 			/* If Rx flex desc is supported, select RXDID for Rx
865 			 * queues. Otherwise, use legacy 32byte descriptor
866 			 * format. Legacy 16byte descriptor is not supported.
867 			 * If this RXDID is selected, return error.
868 			 */
869 			if (vf->driver_caps &
870 			    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
871 				rxdid = qpi->rxq.rxdid;
872 				if (!(BIT(rxdid) & pf->supported_rxdids))
873 					goto error_param;
874 			} else {
875 				rxdid = ICE_RXDID_LEGACY_1;
876 			}
877 
878 			ena_ts = ((vf->driver_caps &
879 				  VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
880 				  (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
881 				  (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
882 
883 			ice_write_qrxflxp_cntxt(&vsi->back->hw,
884 						vsi->rxq_map[q_idx], rxdid,
885 						ICE_RXDID_PRIO, ena_ts);
886 		}
887 	}
888 
889 	ice_lag_complete_vf_reset(pf->lag, act_prt);
890 	mutex_unlock(&pf->lag_mutex);
891 
892 	/* send the response to the VF */
893 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
894 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
895 error_param:
896 	/* disable whatever we can */
897 	for (; i >= 0; i--) {
898 		if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
899 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
900 				vf->vf_id, i);
901 		if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
902 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
903 				vf->vf_id, i);
904 	}
905 
906 	ice_lag_complete_vf_reset(pf->lag, act_prt);
907 	mutex_unlock(&pf->lag_mutex);
908 
909 	ice_lag_move_new_vf_nodes(vf);
910 
911 	/* send the response to the VF */
912 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
913 				     VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
914 }
915 
916 /**
917  * ice_vc_request_qs_msg
918  * @vf: pointer to the VF info
919  * @msg: pointer to the msg buffer
920  *
921  * VFs get a default number of queues but can use this message to request a
922  * different number. If the request is successful, PF will reset the VF and
923  * return 0. If unsuccessful, PF will send message informing VF of number of
924  * available queue pairs via virtchnl message response to VF.
925  */
926 int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
927 {
928 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
929 	struct virtchnl_vf_res_request *vfres =
930 		(struct virtchnl_vf_res_request *)msg;
931 	u16 req_queues = vfres->num_queue_pairs;
932 	struct ice_pf *pf = vf->pf;
933 	u16 max_allowed_vf_queues;
934 	u16 tx_rx_queue_left;
935 	struct device *dev;
936 	u16 cur_queues;
937 
938 	dev = ice_pf_to_dev(pf);
939 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
940 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
941 		goto error_param;
942 	}
943 
944 	cur_queues = vf->num_vf_qs;
945 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
946 				 ice_get_avail_rxq_count(pf));
947 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
948 	if (!req_queues) {
949 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
950 			vf->vf_id);
951 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
952 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
953 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
954 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
955 	} else if (req_queues > cur_queues &&
956 		   req_queues - cur_queues > tx_rx_queue_left) {
957 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
958 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
959 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
960 					       ICE_MAX_RSS_QS_PER_VF);
961 	} else {
962 		/* request is successful, then reset VF */
963 		vf->num_req_qs = req_queues;
964 		ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
965 		dev_info(dev, "VF %d granted request of %u queues.\n",
966 			 vf->vf_id, req_queues);
967 		return 0;
968 	}
969 
970 error_param:
971 	/* send the response to the VF */
972 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
973 				     v_ret, (u8 *)vfres, sizeof(*vfres));
974 }
975 
976