xref: /linux/drivers/net/ethernet/intel/ice/virt/queues.c (revision cfee454ca1113a090b85c9834f8b6576a946fd45)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "virtchnl.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice.h"
7 #include "ice_base.h"
8 #include "ice_lib.h"
9 
10 /**
11  * ice_vc_get_max_frame_size - get max frame size allowed for VF
12  * @vf: VF used to determine max frame size
13  *
14  * Max frame size is determined based on the current port's max frame size and
15  * whether a port VLAN is configured on this VF. The VF is not aware whether
16  * it's in a port VLAN so the PF needs to account for this in max frame size
17  * checks and sending the max frame size to the VF.
18  */
19 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
20 {
21 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
22 	u16 max_frame_size;
23 
24 	max_frame_size = pi->phy.link_info.max_frame_size;
25 
26 	if (ice_vf_is_port_vlan_ena(vf))
27 		max_frame_size -= VLAN_HLEN;
28 
29 	return max_frame_size;
30 }
31 
32 /**
33  * ice_vc_isvalid_q_id
34  * @vsi: VSI to check queue ID against
35  * @qid: VSI relative queue ID
36  *
37  * check for the valid queue ID
38  */
39 static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
40 {
41 	/* allocated Tx and Rx queues should be always equal for VF VSI */
42 	return qid < vsi->alloc_txq;
43 }
44 
45 /**
46  * ice_vc_isvalid_ring_len
47  * @ring_len: length of ring
48  *
49  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
50  * or zero
51  */
52 static bool ice_vc_isvalid_ring_len(u16 ring_len)
53 {
54 	return ring_len == 0 ||
55 	       (ring_len >= ICE_MIN_NUM_DESC &&
56 		ring_len <= ICE_MAX_NUM_DESC &&
57 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
58 }
59 
60 /**
61  * ice_vf_cfg_qs_bw - Configure per queue bandwidth
62  * @vf: pointer to the VF info
63  * @num_queues: number of queues to be configured
64  *
65  * Configure per queue bandwidth.
66  *
67  * Return: 0 on success or negative error value.
68  */
69 static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
70 {
71 	struct ice_hw *hw = &vf->pf->hw;
72 	struct ice_vsi *vsi;
73 	int ret;
74 	u16 i;
75 
76 	vsi = ice_get_vf_vsi(vf);
77 	if (!vsi)
78 		return -EINVAL;
79 
80 	for (i = 0; i < num_queues; i++) {
81 		u32 p_rate, min_rate;
82 		u8 tc;
83 
84 		p_rate = vf->qs_bw[i].peak;
85 		min_rate = vf->qs_bw[i].committed;
86 		tc = vf->qs_bw[i].tc;
87 		if (p_rate)
88 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
89 					       vf->qs_bw[i].queue_id,
90 					       ICE_MAX_BW, p_rate);
91 		else
92 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
93 						    vf->qs_bw[i].queue_id,
94 						    ICE_MAX_BW);
95 		if (ret)
96 			return ret;
97 
98 		if (min_rate)
99 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
100 					       vf->qs_bw[i].queue_id,
101 					       ICE_MIN_BW, min_rate);
102 		else
103 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
104 						    vf->qs_bw[i].queue_id,
105 						    ICE_MIN_BW);
106 
107 		if (ret)
108 			return ret;
109 	}
110 
111 	return 0;
112 }
113 
114 /**
115  * ice_vf_cfg_q_quanta_profile - Configure quanta profile
116  * @vf: pointer to the VF info
117  * @quanta_prof_idx: pointer to the quanta profile index
118  * @quanta_size: quanta size to be set
119  *
120  * This function chooses available quanta profile and configures the register.
121  * The quanta profile is evenly divided by the number of device ports, and then
122  * available to the specific PF and VFs. The first profile for each PF is a
123  * reserved default profile. Only quanta size of the rest unused profile can be
124  * modified.
125  *
126  * Return: 0 on success or negative error value.
127  */
128 static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
129 				       u16 *quanta_prof_idx)
130 {
131 	const u16 n_desc = calc_quanta_desc(quanta_size);
132 	struct ice_hw *hw = &vf->pf->hw;
133 	const u16 n_cmd = 2 * n_desc;
134 	struct ice_pf *pf = vf->pf;
135 	u16 per_pf, begin_id;
136 	u8 n_used;
137 	u32 reg;
138 
139 	begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
140 		   hw->logical_pf_id;
141 
142 	if (quanta_size == ICE_DFLT_QUANTA) {
143 		*quanta_prof_idx = begin_id;
144 	} else {
145 		per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
146 			 hw->dev_caps.num_funcs;
147 		n_used = pf->num_quanta_prof_used;
148 		if (n_used < per_pf) {
149 			*quanta_prof_idx = begin_id + 1 + n_used;
150 			pf->num_quanta_prof_used++;
151 		} else {
152 			return -EINVAL;
153 		}
154 	}
155 
156 	reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
157 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
158 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
159 	wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
160 
161 	return 0;
162 }
163 
164 /**
165  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
166  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
167  *
168  * Return true on successful validation, else false
169  */
170 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
171 {
172 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
173 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
174 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
175 		return false;
176 
177 	return true;
178 }
179 
180 /**
181  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
182  * @vsi: VSI of the VF to configure
183  * @q_idx: VF queue index used to determine the queue in the PF's space
184  */
185 void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
186 {
187 	struct ice_hw *hw = &vsi->back->hw;
188 	u32 pfq = vsi->txq_map[q_idx];
189 	u32 reg;
190 
191 	reg = rd32(hw, QINT_TQCTL(pfq));
192 
193 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
194 	 * this is most likely a poll mode VF driver, so don't enable an
195 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
196 	 */
197 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
198 		return;
199 
200 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
201 }
202 
203 /**
204  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
205  * @vsi: VSI of the VF to configure
206  * @q_idx: VF queue index used to determine the queue in the PF's space
207  */
208 void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
209 {
210 	struct ice_hw *hw = &vsi->back->hw;
211 	u32 pfq = vsi->rxq_map[q_idx];
212 	u32 reg;
213 
214 	reg = rd32(hw, QINT_RQCTL(pfq));
215 
216 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
217 	 * this is most likely a poll mode VF driver, so don't enable an
218 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
219 	 */
220 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
221 		return;
222 
223 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
224 }
225 
226 /**
227  * ice_vc_ena_qs_msg
228  * @vf: pointer to the VF info
229  * @msg: pointer to the msg buffer
230  *
231  * called from the VF to enable all or specific queue(s)
232  */
233 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
234 {
235 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
236 	struct virtchnl_queue_select *vqs =
237 	    (struct virtchnl_queue_select *)msg;
238 	struct ice_vsi *vsi;
239 	unsigned long q_map;
240 	u16 vf_q_id;
241 
242 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
243 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
244 		goto error_param;
245 	}
246 
247 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
248 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
249 		goto error_param;
250 	}
251 
252 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
253 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
254 		goto error_param;
255 	}
256 
257 	vsi = ice_get_vf_vsi(vf);
258 	if (!vsi) {
259 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
260 		goto error_param;
261 	}
262 
263 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
264 	 * Tx queue group list was configured and the context bits were
265 	 * programmed using ice_vsi_cfg_txqs
266 	 */
267 	q_map = vqs->rx_queues;
268 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
269 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
270 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
271 			goto error_param;
272 		}
273 
274 		/* Skip queue if enabled */
275 		if (test_bit(vf_q_id, vf->rxq_ena))
276 			continue;
277 
278 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
279 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
280 				vf_q_id, vsi->vsi_num);
281 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
282 			goto error_param;
283 		}
284 
285 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
286 		set_bit(vf_q_id, vf->rxq_ena);
287 	}
288 
289 	q_map = vqs->tx_queues;
290 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
291 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
292 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
293 			goto error_param;
294 		}
295 
296 		/* Skip queue if enabled */
297 		if (test_bit(vf_q_id, vf->txq_ena))
298 			continue;
299 
300 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
301 		set_bit(vf_q_id, vf->txq_ena);
302 	}
303 
304 	/* Set flag to indicate that queues are enabled */
305 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
306 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
307 
308 error_param:
309 	/* send the response to the VF */
310 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
311 				     NULL, 0);
312 }
313 
314 /**
315  * ice_vf_vsi_dis_single_txq - disable a single Tx queue
316  * @vf: VF to disable queue for
317  * @vsi: VSI for the VF
318  * @q_id: VF relative (0-based) queue ID
319  *
320  * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
321  * disabled then clear q_id bit in the enabled queues bitmap and return
322  * success. Otherwise return error.
323  */
324 int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
325 {
326 	struct ice_txq_meta txq_meta = { 0 };
327 	struct ice_tx_ring *ring;
328 	int err;
329 
330 	if (!test_bit(q_id, vf->txq_ena))
331 		dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
332 			q_id, vsi->vsi_num);
333 
334 	ring = vsi->tx_rings[q_id];
335 	if (!ring)
336 		return -EINVAL;
337 
338 	ice_fill_txq_meta(vsi, ring, &txq_meta);
339 
340 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
341 	if (err) {
342 		dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
343 			q_id, vsi->vsi_num);
344 		return err;
345 	}
346 
347 	/* Clear enabled queues flag */
348 	clear_bit(q_id, vf->txq_ena);
349 
350 	return 0;
351 }
352 
353 /**
354  * ice_vc_dis_qs_msg
355  * @vf: pointer to the VF info
356  * @msg: pointer to the msg buffer
357  *
358  * called from the VF to disable all or specific queue(s)
359  */
360 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
361 {
362 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
363 	struct virtchnl_queue_select *vqs =
364 	    (struct virtchnl_queue_select *)msg;
365 	struct ice_vsi *vsi;
366 	unsigned long q_map;
367 	u16 vf_q_id;
368 
369 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
370 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
371 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
372 		goto error_param;
373 	}
374 
375 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
376 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
377 		goto error_param;
378 	}
379 
380 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
381 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
382 		goto error_param;
383 	}
384 
385 	vsi = ice_get_vf_vsi(vf);
386 	if (!vsi) {
387 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
388 		goto error_param;
389 	}
390 
391 	if (vqs->tx_queues) {
392 		q_map = vqs->tx_queues;
393 
394 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
395 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
396 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
397 				goto error_param;
398 			}
399 
400 			if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
401 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
402 				goto error_param;
403 			}
404 		}
405 	}
406 
407 	q_map = vqs->rx_queues;
408 	/* speed up Rx queue disable by batching them if possible */
409 	if (q_map &&
410 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
411 		if (ice_vsi_stop_all_rx_rings(vsi)) {
412 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
413 				vsi->vsi_num);
414 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
415 			goto error_param;
416 		}
417 
418 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
419 	} else if (q_map) {
420 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
421 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
422 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
423 				goto error_param;
424 			}
425 
426 			/* Skip queue if not enabled */
427 			if (!test_bit(vf_q_id, vf->rxq_ena))
428 				continue;
429 
430 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
431 						     true)) {
432 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
433 					vf_q_id, vsi->vsi_num);
434 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
435 				goto error_param;
436 			}
437 
438 			/* Clear enabled queues flag */
439 			clear_bit(vf_q_id, vf->rxq_ena);
440 		}
441 	}
442 
443 	/* Clear enabled queues flag */
444 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
445 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
446 
447 error_param:
448 	/* send the response to the VF */
449 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
450 				     NULL, 0);
451 }
452 
453 /**
454  * ice_cfg_interrupt
455  * @vf: pointer to the VF info
456  * @vsi: the VSI being configured
457  * @map: vector map for mapping vectors to queues
458  * @q_vector: structure for interrupt vector
459  * configure the IRQ to queue map
460  */
461 static enum virtchnl_status_code
462 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
463 		  struct virtchnl_vector_map *map,
464 		  struct ice_q_vector *q_vector)
465 {
466 	u16 vsi_q_id, vsi_q_id_idx;
467 	unsigned long qmap;
468 
469 	q_vector->num_ring_rx = 0;
470 	q_vector->num_ring_tx = 0;
471 
472 	qmap = map->rxq_map;
473 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
474 		vsi_q_id = vsi_q_id_idx;
475 
476 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
477 			return VIRTCHNL_STATUS_ERR_PARAM;
478 
479 		q_vector->num_ring_rx++;
480 		q_vector->rx.itr_idx = map->rxitr_idx;
481 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
482 		ice_cfg_rxq_interrupt(vsi, vsi_q_id,
483 				      q_vector->vf_reg_idx,
484 				      q_vector->rx.itr_idx);
485 	}
486 
487 	qmap = map->txq_map;
488 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
489 		vsi_q_id = vsi_q_id_idx;
490 
491 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
492 			return VIRTCHNL_STATUS_ERR_PARAM;
493 
494 		q_vector->num_ring_tx++;
495 		q_vector->tx.itr_idx = map->txitr_idx;
496 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
497 		ice_cfg_txq_interrupt(vsi, vsi_q_id,
498 				      q_vector->vf_reg_idx,
499 				      q_vector->tx.itr_idx);
500 	}
501 
502 	return VIRTCHNL_STATUS_SUCCESS;
503 }
504 
505 /**
506  * ice_vc_cfg_irq_map_msg
507  * @vf: pointer to the VF info
508  * @msg: pointer to the msg buffer
509  *
510  * called from the VF to configure the IRQ to queue map
511  */
512 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
513 {
514 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
515 	u16 num_q_vectors_mapped, vsi_id, vector_id;
516 	struct virtchnl_irq_map_info *irqmap_info;
517 	struct virtchnl_vector_map *map;
518 	struct ice_vsi *vsi;
519 	int i;
520 
521 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
522 	num_q_vectors_mapped = irqmap_info->num_vectors;
523 
524 	/* Check to make sure number of VF vectors mapped is not greater than
525 	 * number of VF vectors originally allocated, and check that
526 	 * there is actually at least a single VF queue vector mapped
527 	 */
528 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
529 	    vf->num_msix < num_q_vectors_mapped ||
530 	    !num_q_vectors_mapped) {
531 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
532 		goto error_param;
533 	}
534 
535 	vsi = ice_get_vf_vsi(vf);
536 	if (!vsi) {
537 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
538 		goto error_param;
539 	}
540 
541 	for (i = 0; i < num_q_vectors_mapped; i++) {
542 		struct ice_q_vector *q_vector;
543 
544 		map = &irqmap_info->vecmap[i];
545 
546 		vector_id = map->vector_id;
547 		vsi_id = map->vsi_id;
548 		/* vector_id is always 0-based for each VF, and can never be
549 		 * larger than or equal to the max allowed interrupts per VF
550 		 */
551 		if (!(vector_id < vf->num_msix) ||
552 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
553 		    (!vector_id && (map->rxq_map || map->txq_map))) {
554 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
555 			goto error_param;
556 		}
557 
558 		/* No need to map VF miscellaneous or rogue vector */
559 		if (!vector_id)
560 			continue;
561 
562 		/* Subtract non queue vector from vector_id passed by VF
563 		 * to get actual number of VSI queue vector array index
564 		 */
565 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
566 		if (!q_vector) {
567 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
568 			goto error_param;
569 		}
570 
571 		/* lookout for the invalid queue index */
572 		v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
573 		if (v_ret)
574 			goto error_param;
575 	}
576 
577 error_param:
578 	/* send the response to the VF */
579 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
580 				     NULL, 0);
581 }
582 
583 /**
584  * ice_vc_cfg_q_bw - Configure per queue bandwidth
585  * @vf: pointer to the VF info
586  * @msg: pointer to the msg buffer which holds the command descriptor
587  *
588  * Configure VF queues bandwidth.
589  *
590  * Return: 0 on success or negative error value.
591  */
592 static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
593 {
594 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
595 	struct virtchnl_queues_bw_cfg *qbw =
596 		(struct virtchnl_queues_bw_cfg *)msg;
597 	struct ice_vsi *vsi;
598 	u16 i;
599 
600 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
601 	    !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
602 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
603 		goto err;
604 	}
605 
606 	vsi = ice_get_vf_vsi(vf);
607 	if (!vsi) {
608 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
609 		goto err;
610 	}
611 
612 	if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
613 	    qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
614 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
615 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
616 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
617 		goto err;
618 	}
619 
620 	for (i = 0; i < qbw->num_queues; i++) {
621 		if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
622 		    qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
623 			dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
624 				 qbw->cfg[i].queue_id, vf->vf_id,
625 				 vf->max_tx_rate);
626 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
627 			goto err;
628 		}
629 		if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
630 		    qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
631 			dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
632 				 qbw->cfg[i].queue_id, vf->vf_id,
633 				 vf->min_tx_rate);
634 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
635 			goto err;
636 		}
637 		if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
638 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
639 				 vf->vf_id);
640 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
641 			goto err;
642 		}
643 		if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
644 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
645 				 vf->vf_id);
646 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
647 			goto err;
648 		}
649 	}
650 
651 	for (i = 0; i < qbw->num_queues; i++) {
652 		vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
653 		vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
654 		vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
655 		vf->qs_bw[i].tc = qbw->cfg[i].tc;
656 	}
657 
658 	if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
659 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
660 
661 err:
662 	/* send the response to the VF */
663 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
664 				    v_ret, NULL, 0);
665 }
666 
667 /**
668  * ice_vc_cfg_q_quanta - Configure per queue quanta
669  * @vf: pointer to the VF info
670  * @msg: pointer to the msg buffer which holds the command descriptor
671  *
672  * Configure VF queues quanta.
673  *
674  * Return: 0 on success or negative error value.
675  */
676 static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
677 {
678 	u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
679 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
680 	struct virtchnl_quanta_cfg *qquanta =
681 		(struct virtchnl_quanta_cfg *)msg;
682 	struct ice_vsi *vsi;
683 	int ret;
684 
685 	start_qid = qquanta->queue_select.start_queue_id;
686 	num_queues = qquanta->queue_select.num_queues;
687 
688 	if (check_add_overflow(start_qid, num_queues, &end_qid)) {
689 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
690 		goto err;
691 	}
692 
693 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
694 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
695 		goto err;
696 	}
697 
698 	vsi = ice_get_vf_vsi(vf);
699 	if (!vsi) {
700 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
701 		goto err;
702 	}
703 
704 	if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
705 	    end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
706 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
707 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
708 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
709 		goto err;
710 	}
711 
712 	quanta_size = qquanta->quanta_size;
713 	if (quanta_size > ICE_MAX_QUANTA_SIZE ||
714 	    quanta_size < ICE_MIN_QUANTA_SIZE) {
715 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
716 		goto err;
717 	}
718 
719 	if (quanta_size % 64) {
720 		dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
721 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
722 		goto err;
723 	}
724 
725 	ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
726 					  &quanta_prof_id);
727 	if (ret) {
728 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
729 		goto err;
730 	}
731 
732 	for (i = start_qid; i < end_qid; i++)
733 		vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
734 
735 err:
736 	/* send the response to the VF */
737 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
738 				     v_ret, NULL, 0);
739 }
740 
741 /**
742  * ice_vc_cfg_qs_msg
743  * @vf: pointer to the VF info
744  * @msg: pointer to the msg buffer
745  *
746  * called from the VF to configure the Rx/Tx queues
747  */
748 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
749 {
750 	struct virtchnl_vsi_queue_config_info *qci =
751 	    (struct virtchnl_vsi_queue_config_info *)msg;
752 	struct virtchnl_queue_pair_info *qpi;
753 	struct ice_pf *pf = vf->pf;
754 	struct ice_vsi *vsi;
755 	int i = -1, q_idx;
756 	bool ena_ts;
757 	u8 act_prt;
758 
759 	mutex_lock(&pf->lag_mutex);
760 	act_prt = ice_lag_prepare_vf_reset(pf->lag);
761 
762 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
763 		goto error_param;
764 
765 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
766 		goto error_param;
767 
768 	vsi = ice_get_vf_vsi(vf);
769 	if (!vsi)
770 		goto error_param;
771 
772 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
773 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
774 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
775 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
776 		goto error_param;
777 	}
778 
779 	for (i = 0; i < qci->num_queue_pairs; i++) {
780 		if (!qci->qpair[i].rxq.crc_disable)
781 			continue;
782 
783 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
784 		    vf->vlan_strip_ena)
785 			goto error_param;
786 	}
787 
788 	for (i = 0; i < qci->num_queue_pairs; i++) {
789 		qpi = &qci->qpair[i];
790 		if (qpi->txq.vsi_id != qci->vsi_id ||
791 		    qpi->rxq.vsi_id != qci->vsi_id ||
792 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
793 		    qpi->txq.headwb_enabled ||
794 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
795 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
796 		    !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
797 			goto error_param;
798 		}
799 
800 		q_idx = qpi->rxq.queue_id;
801 
802 		/* make sure selected "q_idx" is in valid range of queues
803 		 * for selected "vsi"
804 		 */
805 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
806 			goto error_param;
807 		}
808 
809 		/* copy Tx queue info from VF into VSI */
810 		if (qpi->txq.ring_len > 0) {
811 			vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
812 			vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
813 
814 			/* Disable any existing queue first */
815 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
816 				goto error_param;
817 
818 			/* Configure a queue with the requested settings */
819 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
820 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
821 					 vf->vf_id, q_idx);
822 				goto error_param;
823 			}
824 		}
825 
826 		/* copy Rx queue info from VF into VSI */
827 		if (qpi->rxq.ring_len > 0) {
828 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
829 			struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
830 			u32 rxdid;
831 
832 			ring->dma = qpi->rxq.dma_ring_addr;
833 			ring->count = qpi->rxq.ring_len;
834 
835 			if (qpi->rxq.crc_disable)
836 				ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
837 			else
838 				ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
839 
840 			if (qpi->rxq.databuffer_size != 0 &&
841 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
842 			     qpi->rxq.databuffer_size < 1024))
843 				goto error_param;
844 			ring->rx_buf_len = qpi->rxq.databuffer_size;
845 			if (qpi->rxq.max_pkt_size > max_frame_size ||
846 			    qpi->rxq.max_pkt_size < 64)
847 				goto error_param;
848 
849 			ring->max_frame = qpi->rxq.max_pkt_size;
850 			/* add space for the port VLAN since the VF driver is
851 			 * not expected to account for it in the MTU
852 			 * calculation
853 			 */
854 			if (ice_vf_is_port_vlan_ena(vf))
855 				ring->max_frame += VLAN_HLEN;
856 
857 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
858 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
859 					 vf->vf_id, q_idx);
860 				goto error_param;
861 			}
862 
863 			/* If Rx flex desc is supported, select RXDID for Rx
864 			 * queues. Otherwise, use legacy 32byte descriptor
865 			 * format. Legacy 16byte descriptor is not supported.
866 			 * If this RXDID is selected, return error.
867 			 */
868 			if (vf->driver_caps &
869 			    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
870 				rxdid = qpi->rxq.rxdid;
871 				if (!(BIT(rxdid) & pf->supported_rxdids))
872 					goto error_param;
873 			} else {
874 				rxdid = ICE_RXDID_LEGACY_1;
875 			}
876 
877 			ena_ts = ((vf->driver_caps &
878 				  VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
879 				  (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
880 				  (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
881 
882 			ice_write_qrxflxp_cntxt(&vsi->back->hw,
883 						vsi->rxq_map[q_idx], rxdid,
884 						ICE_RXDID_PRIO, ena_ts);
885 		}
886 	}
887 
888 	ice_lag_complete_vf_reset(pf->lag, act_prt);
889 	mutex_unlock(&pf->lag_mutex);
890 
891 	/* send the response to the VF */
892 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
893 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
894 error_param:
895 	/* disable whatever we can */
896 	for (; i >= 0; i--) {
897 		if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
898 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
899 				vf->vf_id, i);
900 		if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
901 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
902 				vf->vf_id, i);
903 	}
904 
905 	ice_lag_complete_vf_reset(pf->lag, act_prt);
906 	mutex_unlock(&pf->lag_mutex);
907 
908 	ice_lag_move_new_vf_nodes(vf);
909 
910 	/* send the response to the VF */
911 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
912 				     VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
913 }
914 
915 /**
916  * ice_vc_request_qs_msg
917  * @vf: pointer to the VF info
918  * @msg: pointer to the msg buffer
919  *
920  * VFs get a default number of queues but can use this message to request a
921  * different number. If the request is successful, PF will reset the VF and
922  * return 0. If unsuccessful, PF will send message informing VF of number of
923  * available queue pairs via virtchnl message response to VF.
924  */
925 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
926 {
927 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
928 	struct virtchnl_vf_res_request *vfres =
929 		(struct virtchnl_vf_res_request *)msg;
930 	u16 req_queues = vfres->num_queue_pairs;
931 	struct ice_pf *pf = vf->pf;
932 	u16 max_allowed_vf_queues;
933 	u16 tx_rx_queue_left;
934 	struct device *dev;
935 	u16 cur_queues;
936 
937 	dev = ice_pf_to_dev(pf);
938 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
939 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
940 		goto error_param;
941 	}
942 
943 	cur_queues = vf->num_vf_qs;
944 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
945 				 ice_get_avail_rxq_count(pf));
946 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
947 	if (!req_queues) {
948 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
949 			vf->vf_id);
950 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
951 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
952 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
953 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
954 	} else if (req_queues > cur_queues &&
955 		   req_queues - cur_queues > tx_rx_queue_left) {
956 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
957 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
958 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
959 					       ICE_MAX_RSS_QS_PER_VF);
960 	} else {
961 		/* request is successful, then reset VF */
962 		vf->num_req_qs = req_queues;
963 		ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
964 		dev_info(dev, "VF %d granted request of %u queues.\n",
965 			 vf->vf_id, req_queues);
966 		return 0;
967 	}
968 
969 error_param:
970 	/* send the response to the VF */
971 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
972 				     v_ret, (u8 *)vfres, sizeof(*vfres));
973 }
974 
975