xref: /linux/drivers/net/ethernet/intel/ice/virt/queues.c (revision 3061d214eead8a6fb652bf69135525f394a40e52)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "virtchnl.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice.h"
7 #include "ice_base.h"
8 #include "ice_lib.h"
9 
10 /**
11  * ice_vc_get_max_frame_size - get max frame size allowed for VF
12  * @vf: VF used to determine max frame size
13  *
14  * Max frame size is determined based on the current port's max frame size and
15  * whether a port VLAN is configured on this VF. The VF is not aware whether
16  * it's in a port VLAN so the PF needs to account for this in max frame size
17  * checks and sending the max frame size to the VF.
18  */
19 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
20 {
21 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
22 	u16 max_frame_size;
23 
24 	max_frame_size = pi->phy.link_info.max_frame_size;
25 
26 	if (ice_vf_is_port_vlan_ena(vf))
27 		max_frame_size -= VLAN_HLEN;
28 
29 	return max_frame_size;
30 }
31 
32 /**
33  * ice_vc_isvalid_q_id
34  * @vsi: VSI to check queue ID against
35  * @qid: VSI relative queue ID
36  *
37  * check for the valid queue ID
38  */
39 static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
40 {
41 	/* allocated Tx and Rx queues should be always equal for VF VSI */
42 	return qid < vsi->alloc_txq;
43 }
44 
45 /**
46  * ice_vc_isvalid_ring_len
47  * @ring_len: length of ring
48  *
49  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
50  * or zero
51  */
52 static bool ice_vc_isvalid_ring_len(u16 ring_len)
53 {
54 	return ring_len == 0 ||
55 	       (ring_len >= ICE_MIN_NUM_DESC &&
56 		ring_len <= ICE_MAX_NUM_DESC &&
57 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
58 }
59 
60 /**
61  * ice_vf_cfg_qs_bw - Configure per queue bandwidth
62  * @vf: pointer to the VF info
63  * @num_queues: number of queues to be configured
64  *
65  * Configure per queue bandwidth.
66  *
67  * Return: 0 on success or negative error value.
68  */
69 static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
70 {
71 	struct ice_hw *hw = &vf->pf->hw;
72 	struct ice_vsi *vsi;
73 	int ret;
74 	u16 i;
75 
76 	vsi = ice_get_vf_vsi(vf);
77 	if (!vsi)
78 		return -EINVAL;
79 
80 	for (i = 0; i < num_queues; i++) {
81 		u32 p_rate, min_rate;
82 		u8 tc;
83 
84 		p_rate = vf->qs_bw[i].peak;
85 		min_rate = vf->qs_bw[i].committed;
86 		tc = vf->qs_bw[i].tc;
87 		if (p_rate)
88 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
89 					       vf->qs_bw[i].queue_id,
90 					       ICE_MAX_BW, p_rate);
91 		else
92 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
93 						    vf->qs_bw[i].queue_id,
94 						    ICE_MAX_BW);
95 		if (ret)
96 			return ret;
97 
98 		if (min_rate)
99 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
100 					       vf->qs_bw[i].queue_id,
101 					       ICE_MIN_BW, min_rate);
102 		else
103 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
104 						    vf->qs_bw[i].queue_id,
105 						    ICE_MIN_BW);
106 
107 		if (ret)
108 			return ret;
109 	}
110 
111 	return 0;
112 }
113 
114 /**
115  * ice_vf_cfg_q_quanta_profile - Configure quanta profile
116  * @vf: pointer to the VF info
117  * @quanta_prof_idx: pointer to the quanta profile index
118  * @quanta_size: quanta size to be set
119  *
120  * This function chooses available quanta profile and configures the register.
121  * The quanta profile is evenly divided by the number of device ports, and then
122  * available to the specific PF and VFs. The first profile for each PF is a
123  * reserved default profile. Only quanta size of the rest unused profile can be
124  * modified.
125  *
126  * Return: 0 on success or negative error value.
127  */
128 static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
129 				       u16 *quanta_prof_idx)
130 {
131 	const u16 n_desc = calc_quanta_desc(quanta_size);
132 	struct ice_hw *hw = &vf->pf->hw;
133 	const u16 n_cmd = 2 * n_desc;
134 	struct ice_pf *pf = vf->pf;
135 	u16 per_pf, begin_id;
136 	u8 n_used;
137 	u32 reg;
138 
139 	begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
140 		   hw->logical_pf_id;
141 
142 	if (quanta_size == ICE_DFLT_QUANTA) {
143 		*quanta_prof_idx = begin_id;
144 	} else {
145 		per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
146 			 hw->dev_caps.num_funcs;
147 		n_used = pf->num_quanta_prof_used;
148 		if (n_used < per_pf) {
149 			*quanta_prof_idx = begin_id + 1 + n_used;
150 			pf->num_quanta_prof_used++;
151 		} else {
152 			return -EINVAL;
153 		}
154 	}
155 
156 	reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
157 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
158 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
159 	wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
160 
161 	return 0;
162 }
163 
164 /**
165  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
166  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
167  *
168  * Return true on successful validation, else false
169  */
170 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
171 {
172 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
173 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
174 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
175 		return false;
176 
177 	return true;
178 }
179 
180 /**
181  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
182  * @vsi: VSI of the VF to configure
183  * @q_idx: VF queue index used to determine the queue in the PF's space
184  */
185 void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
186 {
187 	struct ice_hw *hw = &vsi->back->hw;
188 	u32 pfq = vsi->txq_map[q_idx];
189 	u32 reg;
190 
191 	reg = rd32(hw, QINT_TQCTL(pfq));
192 
193 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
194 	 * this is most likely a poll mode VF driver, so don't enable an
195 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
196 	 */
197 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
198 		return;
199 
200 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
201 }
202 
203 /**
204  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
205  * @vsi: VSI of the VF to configure
206  * @q_idx: VF queue index used to determine the queue in the PF's space
207  */
208 void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
209 {
210 	struct ice_hw *hw = &vsi->back->hw;
211 	u32 pfq = vsi->rxq_map[q_idx];
212 	u32 reg;
213 
214 	reg = rd32(hw, QINT_RQCTL(pfq));
215 
216 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
217 	 * this is most likely a poll mode VF driver, so don't enable an
218 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
219 	 */
220 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
221 		return;
222 
223 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
224 }
225 
226 /**
227  * ice_vc_ena_qs_msg
228  * @vf: pointer to the VF info
229  * @msg: pointer to the msg buffer
230  *
231  * called from the VF to enable all or specific queue(s)
232  */
233 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
234 {
235 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
236 	struct virtchnl_queue_select *vqs =
237 	    (struct virtchnl_queue_select *)msg;
238 	struct ice_vsi *vsi;
239 	unsigned long q_map;
240 	u16 vf_q_id;
241 
242 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
243 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
244 		goto error_param;
245 	}
246 
247 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
248 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
249 		goto error_param;
250 	}
251 
252 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
253 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
254 		goto error_param;
255 	}
256 
257 	vsi = ice_get_vf_vsi(vf);
258 	if (!vsi) {
259 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
260 		goto error_param;
261 	}
262 
263 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
264 	 * Tx queue group list was configured and the context bits were
265 	 * programmed using ice_vsi_cfg_txqs
266 	 */
267 	q_map = vqs->rx_queues;
268 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
269 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
270 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
271 			goto error_param;
272 		}
273 
274 		/* Skip queue if enabled */
275 		if (test_bit(vf_q_id, vf->rxq_ena))
276 			continue;
277 
278 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
279 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
280 				vf_q_id, vsi->vsi_num);
281 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
282 			goto error_param;
283 		}
284 
285 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
286 		set_bit(vf_q_id, vf->rxq_ena);
287 	}
288 
289 	q_map = vqs->tx_queues;
290 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
291 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
292 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
293 			goto error_param;
294 		}
295 
296 		/* Skip queue if enabled */
297 		if (test_bit(vf_q_id, vf->txq_ena))
298 			continue;
299 
300 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
301 		set_bit(vf_q_id, vf->txq_ena);
302 	}
303 
304 	/* Set flag to indicate that queues are enabled */
305 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
306 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
307 
308 error_param:
309 	/* send the response to the VF */
310 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
311 				     NULL, 0);
312 }
313 
314 /**
315  * ice_vf_vsi_dis_single_txq - disable a single Tx queue
316  * @vf: VF to disable queue for
317  * @vsi: VSI for the VF
318  * @q_id: VF relative (0-based) queue ID
319  *
320  * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
321  * disabled then clear q_id bit in the enabled queues bitmap and return
322  * success. Otherwise return error.
323  */
324 int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
325 {
326 	struct ice_txq_meta txq_meta = { 0 };
327 	struct ice_tx_ring *ring;
328 	int err;
329 
330 	if (!test_bit(q_id, vf->txq_ena))
331 		dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
332 			q_id, vsi->vsi_num);
333 
334 	ring = vsi->tx_rings[q_id];
335 	if (!ring)
336 		return -EINVAL;
337 
338 	ice_fill_txq_meta(vsi, ring, &txq_meta);
339 
340 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
341 	if (err) {
342 		dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
343 			q_id, vsi->vsi_num);
344 		return err;
345 	}
346 
347 	/* Clear enabled queues flag */
348 	clear_bit(q_id, vf->txq_ena);
349 
350 	return 0;
351 }
352 
353 /**
354  * ice_vc_dis_qs_msg
355  * @vf: pointer to the VF info
356  * @msg: pointer to the msg buffer
357  *
358  * called from the VF to disable all or specific queue(s)
359  */
360 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
361 {
362 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
363 	struct virtchnl_queue_select *vqs =
364 	    (struct virtchnl_queue_select *)msg;
365 	struct ice_vsi *vsi;
366 	unsigned long q_map;
367 	u16 vf_q_id;
368 
369 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
370 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
371 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
372 		goto error_param;
373 	}
374 
375 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
376 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
377 		goto error_param;
378 	}
379 
380 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
381 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
382 		goto error_param;
383 	}
384 
385 	vsi = ice_get_vf_vsi(vf);
386 	if (!vsi) {
387 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
388 		goto error_param;
389 	}
390 
391 	if (vqs->tx_queues) {
392 		q_map = vqs->tx_queues;
393 
394 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
395 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
396 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
397 				goto error_param;
398 			}
399 
400 			if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
401 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
402 				goto error_param;
403 			}
404 		}
405 	}
406 
407 	q_map = vqs->rx_queues;
408 	/* speed up Rx queue disable by batching them if possible */
409 	if (q_map &&
410 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
411 		if (ice_vsi_stop_all_rx_rings(vsi)) {
412 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
413 				vsi->vsi_num);
414 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
415 			goto error_param;
416 		}
417 
418 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
419 	} else if (q_map) {
420 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
421 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
422 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
423 				goto error_param;
424 			}
425 
426 			/* Skip queue if not enabled */
427 			if (!test_bit(vf_q_id, vf->rxq_ena))
428 				continue;
429 
430 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
431 						     true)) {
432 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
433 					vf_q_id, vsi->vsi_num);
434 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
435 				goto error_param;
436 			}
437 
438 			/* Clear enabled queues flag */
439 			clear_bit(vf_q_id, vf->rxq_ena);
440 		}
441 	}
442 
443 	/* Clear enabled queues flag */
444 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
445 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
446 
447 error_param:
448 	/* send the response to the VF */
449 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
450 				     NULL, 0);
451 }
452 
453 /**
454  * ice_cfg_interrupt
455  * @vf: pointer to the VF info
456  * @vsi: the VSI being configured
457  * @map: vector map for mapping vectors to queues
458  * @q_vector: structure for interrupt vector
459  * configure the IRQ to queue map
460  */
461 static enum virtchnl_status_code
462 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
463 		  struct virtchnl_vector_map *map,
464 		  struct ice_q_vector *q_vector)
465 {
466 	u16 vsi_q_id, vsi_q_id_idx;
467 	unsigned long qmap;
468 
469 	q_vector->num_ring_rx = 0;
470 	q_vector->num_ring_tx = 0;
471 
472 	qmap = map->rxq_map;
473 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
474 		vsi_q_id = vsi_q_id_idx;
475 
476 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
477 			return VIRTCHNL_STATUS_ERR_PARAM;
478 
479 		q_vector->num_ring_rx++;
480 		q_vector->rx.itr_idx = map->rxitr_idx;
481 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
482 		ice_cfg_rxq_interrupt(vsi, vsi_q_id,
483 				      q_vector->vf_reg_idx,
484 				      q_vector->rx.itr_idx);
485 	}
486 
487 	qmap = map->txq_map;
488 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
489 		vsi_q_id = vsi_q_id_idx;
490 
491 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
492 			return VIRTCHNL_STATUS_ERR_PARAM;
493 
494 		q_vector->num_ring_tx++;
495 		q_vector->tx.itr_idx = map->txitr_idx;
496 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
497 		ice_cfg_txq_interrupt(vsi, vsi_q_id,
498 				      q_vector->vf_reg_idx,
499 				      q_vector->tx.itr_idx);
500 	}
501 
502 	return VIRTCHNL_STATUS_SUCCESS;
503 }
504 
505 /**
506  * ice_vc_cfg_irq_map_msg
507  * @vf: pointer to the VF info
508  * @msg: pointer to the msg buffer
509  *
510  * called from the VF to configure the IRQ to queue map
511  */
512 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
513 {
514 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
515 	u16 num_q_vectors_mapped, vsi_id, vector_id;
516 	struct virtchnl_irq_map_info *irqmap_info;
517 	struct virtchnl_vector_map *map;
518 	struct ice_vsi *vsi;
519 	int i;
520 
521 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
522 	num_q_vectors_mapped = irqmap_info->num_vectors;
523 
524 	/* Check to make sure number of VF vectors mapped is not greater than
525 	 * number of VF vectors originally allocated, and check that
526 	 * there is actually at least a single VF queue vector mapped
527 	 */
528 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
529 	    vf->num_msix < num_q_vectors_mapped ||
530 	    !num_q_vectors_mapped) {
531 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
532 		goto error_param;
533 	}
534 
535 	vsi = ice_get_vf_vsi(vf);
536 	if (!vsi) {
537 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
538 		goto error_param;
539 	}
540 
541 	for (i = 0; i < num_q_vectors_mapped; i++) {
542 		struct ice_q_vector *q_vector;
543 
544 		map = &irqmap_info->vecmap[i];
545 
546 		vector_id = map->vector_id;
547 		vsi_id = map->vsi_id;
548 		/* vector_id is always 0-based for each VF, and can never be
549 		 * larger than or equal to the max allowed interrupts per VF
550 		 */
551 		if (!(vector_id < vf->num_msix) ||
552 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
553 		    (!vector_id && (map->rxq_map || map->txq_map))) {
554 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
555 			goto error_param;
556 		}
557 
558 		/* No need to map VF miscellaneous or rogue vector */
559 		if (!vector_id)
560 			continue;
561 
562 		/* Subtract non queue vector from vector_id passed by VF
563 		 * to get actual number of VSI queue vector array index
564 		 */
565 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
566 		if (!q_vector) {
567 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
568 			goto error_param;
569 		}
570 
571 		/* lookout for the invalid queue index */
572 		v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
573 		if (v_ret)
574 			goto error_param;
575 	}
576 
577 error_param:
578 	/* send the response to the VF */
579 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
580 				     NULL, 0);
581 }
582 
583 /**
584  * ice_vc_cfg_q_bw - Configure per queue bandwidth
585  * @vf: pointer to the VF info
586  * @msg: pointer to the msg buffer which holds the command descriptor
587  *
588  * Configure VF queues bandwidth.
589  *
590  * Return: 0 on success or negative error value.
591  */
592 static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
593 {
594 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
595 	struct virtchnl_queues_bw_cfg *qbw =
596 		(struct virtchnl_queues_bw_cfg *)msg;
597 	struct ice_vsi *vsi;
598 	u16 i;
599 
600 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
601 	    !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
602 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
603 		goto err;
604 	}
605 
606 	vsi = ice_get_vf_vsi(vf);
607 	if (!vsi) {
608 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
609 		goto err;
610 	}
611 
612 	if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
613 	    qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
614 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
615 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
616 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
617 		goto err;
618 	}
619 
620 	for (i = 0; i < qbw->num_queues; i++) {
621 		if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
622 		    qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
623 			dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
624 				 qbw->cfg[i].queue_id, vf->vf_id,
625 				 vf->max_tx_rate);
626 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
627 			goto err;
628 		}
629 		if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
630 		    qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
631 			dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
632 				 qbw->cfg[i].queue_id, vf->vf_id,
633 				 vf->min_tx_rate);
634 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
635 			goto err;
636 		}
637 		if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
638 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
639 				 vf->vf_id);
640 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
641 			goto err;
642 		}
643 		if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
644 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
645 				 vf->vf_id);
646 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
647 			goto err;
648 		}
649 	}
650 
651 	for (i = 0; i < qbw->num_queues; i++) {
652 		vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
653 		vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
654 		vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
655 		vf->qs_bw[i].tc = qbw->cfg[i].tc;
656 	}
657 
658 	if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
659 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
660 
661 err:
662 	/* send the response to the VF */
663 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
664 				    v_ret, NULL, 0);
665 }
666 
667 /**
668  * ice_vc_cfg_q_quanta - Configure per queue quanta
669  * @vf: pointer to the VF info
670  * @msg: pointer to the msg buffer which holds the command descriptor
671  *
672  * Configure VF queues quanta.
673  *
674  * Return: 0 on success or negative error value.
675  */
676 static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
677 {
678 	u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
679 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
680 	struct virtchnl_quanta_cfg *qquanta =
681 		(struct virtchnl_quanta_cfg *)msg;
682 	struct ice_vsi *vsi;
683 	int ret;
684 
685 	start_qid = qquanta->queue_select.start_queue_id;
686 	num_queues = qquanta->queue_select.num_queues;
687 
688 	if (check_add_overflow(start_qid, num_queues, &end_qid)) {
689 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
690 		goto err;
691 	}
692 
693 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
694 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
695 		goto err;
696 	}
697 
698 	vsi = ice_get_vf_vsi(vf);
699 	if (!vsi) {
700 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
701 		goto err;
702 	}
703 
704 	if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
705 	    end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
706 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
707 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
708 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
709 		goto err;
710 	}
711 
712 	quanta_size = qquanta->quanta_size;
713 	if (quanta_size > ICE_MAX_QUANTA_SIZE ||
714 	    quanta_size < ICE_MIN_QUANTA_SIZE) {
715 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
716 		goto err;
717 	}
718 
719 	if (quanta_size % 64) {
720 		dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
721 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
722 		goto err;
723 	}
724 
725 	ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
726 					  &quanta_prof_id);
727 	if (ret) {
728 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
729 		goto err;
730 	}
731 
732 	for (i = start_qid; i < end_qid; i++)
733 		vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
734 
735 err:
736 	/* send the response to the VF */
737 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
738 				     v_ret, NULL, 0);
739 }
740 
741 /**
742  * ice_vc_cfg_qs_msg
743  * @vf: pointer to the VF info
744  * @msg: pointer to the msg buffer
745  *
746  * called from the VF to configure the Rx/Tx queues
747  */
748 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
749 {
750 	struct virtchnl_vsi_queue_config_info *qci =
751 	    (struct virtchnl_vsi_queue_config_info *)msg;
752 	struct virtchnl_queue_pair_info *qpi;
753 	struct ice_pf *pf = vf->pf;
754 	struct ice_vsi *vsi;
755 	int i = -1, q_idx;
756 	bool ena_ts;
757 	u8 act_prt;
758 
759 	mutex_lock(&pf->lag_mutex);
760 	act_prt = ice_lag_prepare_vf_reset(pf->lag);
761 
762 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
763 		goto error_param;
764 
765 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
766 		goto error_param;
767 
768 	vsi = ice_get_vf_vsi(vf);
769 	if (!vsi)
770 		goto error_param;
771 
772 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
773 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
774 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
775 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
776 		goto error_param;
777 	}
778 
779 	for (i = 0; i < qci->num_queue_pairs; i++) {
780 		if (!qci->qpair[i].rxq.crc_disable)
781 			continue;
782 
783 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
784 		    vf->vlan_strip_ena)
785 			goto error_param;
786 	}
787 
788 	for (i = 0; i < qci->num_queue_pairs; i++) {
789 		qpi = &qci->qpair[i];
790 		if (qpi->txq.vsi_id != qci->vsi_id ||
791 		    qpi->rxq.vsi_id != qci->vsi_id ||
792 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
793 		    qpi->txq.headwb_enabled ||
794 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
795 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
796 		    !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
797 			goto error_param;
798 		}
799 
800 		q_idx = qpi->rxq.queue_id;
801 
802 		/* make sure selected "q_idx" is in valid range of queues
803 		 * for selected "vsi"
804 		 */
805 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
806 			goto error_param;
807 		}
808 
809 		/* copy Tx queue info from VF into VSI */
810 		if (qpi->txq.ring_len > 0) {
811 			vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
812 			vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
813 
814 			/* Disable any existing queue first */
815 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
816 				goto error_param;
817 
818 			/* Configure a queue with the requested settings */
819 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
820 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
821 					 vf->vf_id, q_idx);
822 				goto error_param;
823 			}
824 		}
825 
826 		/* copy Rx queue info from VF into VSI */
827 		if (qpi->rxq.ring_len > 0) {
828 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
829 			struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
830 			u32 rxdid;
831 
832 			ring->dma = qpi->rxq.dma_ring_addr;
833 			ring->count = qpi->rxq.ring_len;
834 
835 			if (qpi->rxq.crc_disable)
836 				ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
837 			else
838 				ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
839 
840 			if (qpi->rxq.databuffer_size != 0 &&
841 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
842 			     qpi->rxq.databuffer_size < 1024))
843 				goto error_param;
844 			ring->rx_buf_len = qpi->rxq.databuffer_size;
845 			if (qpi->rxq.max_pkt_size > max_frame_size ||
846 			    qpi->rxq.max_pkt_size < 64)
847 				goto error_param;
848 
849 			ring->max_frame = qpi->rxq.max_pkt_size;
850 			/* add space for the port VLAN since the VF driver is
851 			 * not expected to account for it in the MTU
852 			 * calculation
853 			 */
854 			if (ice_vf_is_port_vlan_ena(vf))
855 				ring->max_frame += VLAN_HLEN;
856 
857 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
858 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
859 					 vf->vf_id, q_idx);
860 				goto error_param;
861 			}
862 
863 			/* If Rx flex desc is supported, select RXDID for Rx
864 			 * queues. Otherwise, use legacy 32byte descriptor
865 			 * format. Legacy 16byte descriptor is not supported.
866 			 * If this RXDID is selected, return error.
867 			 */
868 			if (vf->driver_caps &
869 			    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
870 				rxdid = qpi->rxq.rxdid;
871 				if (!(BIT(rxdid) & pf->supported_rxdids))
872 					goto error_param;
873 			} else {
874 				rxdid = ICE_RXDID_LEGACY_1;
875 			}
876 
877 			ena_ts = ((vf->driver_caps &
878 				  VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
879 				  (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
880 				  (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
881 
882 			ice_write_qrxflxp_cntxt(&vsi->back->hw,
883 						vsi->rxq_map[q_idx], rxdid,
884 						ICE_RXDID_PRIO, ena_ts);
885 		}
886 	}
887 
888 	ice_lag_complete_vf_reset(pf->lag, act_prt);
889 	mutex_unlock(&pf->lag_mutex);
890 
891 	/* send the response to the VF */
892 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
893 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
894 error_param:
895 	/* disable whatever we can */
896 	for (; i >= 0; i--) {
897 		if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
898 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
899 				vf->vf_id, i);
900 		if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
901 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
902 				vf->vf_id, i);
903 	}
904 
905 	ice_lag_complete_vf_reset(pf->lag, act_prt);
906 	mutex_unlock(&pf->lag_mutex);
907 
908 	ice_lag_move_new_vf_nodes(vf);
909 
910 	/* send the response to the VF */
911 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
912 				     VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
913 }
914 
915 /**
916  * ice_can_vf_change_mac
917  * @vf: pointer to the VF info
918  *
919  * Return true if the VF is allowed to change its MAC filters, false otherwise
920  */
921 static bool ice_can_vf_change_mac(struct ice_vf *vf)
922 {
923 	/* If the VF MAC address has been set administratively (via the
924 	 * ndo_set_vf_mac command), then deny permission to the VF to
925 	 * add/delete unicast MAC addresses, unless the VF is trusted
926 	 */
927 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
928 		return false;
929 
930 	return true;
931 }
932 
933 /**
934  * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
935  * @vc_ether_addr: used to extract the type
936  */
937 static u8
938 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
939 {
940 	return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
941 }
942 
943 /**
944  * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
945  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
946  */
947 static bool
948 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
949 {
950 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
951 
952 	return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
953 }
954 
955 /**
956  * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
957  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
958  *
959  * This function should only be called when the MAC address in
960  * virtchnl_ether_addr is a valid unicast MAC
961  */
962 static bool
963 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
964 {
965 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
966 
967 	return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
968 }
969 
970 /**
971  * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
972  * @vf: VF to update
973  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
974  */
975 static void
976 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
977 {
978 	u8 *mac_addr = vc_ether_addr->addr;
979 
980 	if (!is_valid_ether_addr(mac_addr))
981 		return;
982 
983 	/* only allow legacy VF drivers to set the device and hardware MAC if it
984 	 * is zero and allow new VF drivers to set the hardware MAC if the type
985 	 * was correctly specified over VIRTCHNL
986 	 */
987 	if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
988 	     is_zero_ether_addr(vf->hw_lan_addr)) ||
989 	    ice_is_vc_addr_primary(vc_ether_addr)) {
990 		ether_addr_copy(vf->dev_lan_addr, mac_addr);
991 		ether_addr_copy(vf->hw_lan_addr, mac_addr);
992 	}
993 
994 	/* hardware and device MACs are already set, but its possible that the
995 	 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
996 	 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
997 	 * away for the legacy VF driver case as it will be updated in the
998 	 * delete flow for this case
999 	 */
1000 	if (ice_is_vc_addr_legacy(vc_ether_addr)) {
1001 		ether_addr_copy(vf->legacy_last_added_umac.addr,
1002 				mac_addr);
1003 		vf->legacy_last_added_umac.time_modified = jiffies;
1004 	}
1005 }
1006 
1007 /**
1008  * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
1009  * @mac: address to check
1010  *
1011  * Return: true if the address is one of the three possible LLDP multicast
1012  *	   addresses, false otherwise.
1013  */
1014 static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
1015 {
1016 	const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
1017 
1018 	if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
1019 		return false;
1020 
1021 	return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
1022 }
1023 
1024 /**
1025  * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
1026  * @vf: a VF to add the address to
1027  * @mac: address to check
1028  *
1029  * Return: true if the VF is allowed to add such MAC address, false otherwise.
1030  */
1031 static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
1032 {
1033 	struct device *dev = ice_pf_to_dev(vf->pf);
1034 
1035 	if (is_unicast_ether_addr(mac) &&
1036 	    !ice_can_vf_change_mac((struct ice_vf *)vf)) {
1037 		dev_err(dev,
1038 			"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1039 		return false;
1040 	}
1041 
1042 	if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
1043 		dev_warn(dev,
1044 			 "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
1045 			 vf->vf_id);
1046 		return false;
1047 	}
1048 
1049 	return true;
1050 }
1051 
1052 /**
1053  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
1054  * @vf: pointer to the VF info
1055  * @vsi: pointer to the VF's VSI
1056  * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
1057  */
1058 static int
1059 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1060 		    struct virtchnl_ether_addr *vc_ether_addr)
1061 {
1062 	struct device *dev = ice_pf_to_dev(vf->pf);
1063 	u8 *mac_addr = vc_ether_addr->addr;
1064 	int ret;
1065 
1066 	/* device MAC already added */
1067 	if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
1068 		return 0;
1069 
1070 	if (!ice_vc_can_add_mac(vf, mac_addr))
1071 		return -EPERM;
1072 
1073 	ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1074 	if (ret == -EEXIST) {
1075 		dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
1076 			vf->vf_id);
1077 		/* don't return since we might need to update
1078 		 * the primary MAC in ice_vfhw_mac_add() below
1079 		 */
1080 	} else if (ret) {
1081 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
1082 			mac_addr, vf->vf_id, ret);
1083 		return ret;
1084 	} else {
1085 		vf->num_mac++;
1086 		if (ice_is_mc_lldp_eth_addr(mac_addr))
1087 			ice_vf_update_mac_lldp_num(vf, vsi, true);
1088 	}
1089 
1090 	ice_vfhw_mac_add(vf, vc_ether_addr);
1091 
1092 	return ret;
1093 }
1094 
1095 /**
1096  * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
1097  * @last_added_umac: structure used to check expiration
1098  */
1099 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
1100 {
1101 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME	msecs_to_jiffies(3000)
1102 	return time_is_before_jiffies(last_added_umac->time_modified +
1103 				      ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
1104 }
1105 
1106 /**
1107  * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
1108  * @vf: VF to update
1109  * @vc_ether_addr: structure from VIRTCHNL with MAC to check
1110  *
1111  * only update cached hardware MAC for legacy VF drivers on delete
1112  * because we cannot guarantee order/type of MAC from the VF driver
1113  */
1114 static void
1115 ice_update_legacy_cached_mac(struct ice_vf *vf,
1116 			     struct virtchnl_ether_addr *vc_ether_addr)
1117 {
1118 	if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
1119 	    ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
1120 		return;
1121 
1122 	ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
1123 	ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
1124 }
1125 
1126 /**
1127  * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
1128  * @vf: VF to update
1129  * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
1130  */
1131 static void
1132 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1133 {
1134 	u8 *mac_addr = vc_ether_addr->addr;
1135 
1136 	if (!is_valid_ether_addr(mac_addr) ||
1137 	    !ether_addr_equal(vf->dev_lan_addr, mac_addr))
1138 		return;
1139 
1140 	/* allow the device MAC to be repopulated in the add flow and don't
1141 	 * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
1142 	 * to be persistent on VM reboot and across driver unload/load, which
1143 	 * won't work if we clear the hardware MAC here
1144 	 */
1145 	eth_zero_addr(vf->dev_lan_addr);
1146 
1147 	ice_update_legacy_cached_mac(vf, vc_ether_addr);
1148 }
1149 
1150 /**
1151  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
1152  * @vf: pointer to the VF info
1153  * @vsi: pointer to the VF's VSI
1154  * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
1155  */
1156 static int
1157 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1158 		    struct virtchnl_ether_addr *vc_ether_addr)
1159 {
1160 	struct device *dev = ice_pf_to_dev(vf->pf);
1161 	u8 *mac_addr = vc_ether_addr->addr;
1162 	int status;
1163 
1164 	if (!ice_can_vf_change_mac(vf) &&
1165 	    ether_addr_equal(vf->dev_lan_addr, mac_addr))
1166 		return 0;
1167 
1168 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1169 	if (status == -ENOENT) {
1170 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
1171 			vf->vf_id);
1172 		return -ENOENT;
1173 	} else if (status) {
1174 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
1175 			mac_addr, vf->vf_id, status);
1176 		return -EIO;
1177 	}
1178 
1179 	ice_vfhw_mac_del(vf, vc_ether_addr);
1180 
1181 	vf->num_mac--;
1182 	if (ice_is_mc_lldp_eth_addr(mac_addr))
1183 		ice_vf_update_mac_lldp_num(vf, vsi, false);
1184 
1185 	return 0;
1186 }
1187 
1188 /**
1189  * ice_vc_handle_mac_addr_msg
1190  * @vf: pointer to the VF info
1191  * @msg: pointer to the msg buffer
1192  * @set: true if MAC filters are being set, false otherwise
1193  *
1194  * add guest MAC address filter
1195  */
1196 static int
1197 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
1198 {
1199 	int (*ice_vc_cfg_mac)
1200 		(struct ice_vf *vf, struct ice_vsi *vsi,
1201 		 struct virtchnl_ether_addr *virtchnl_ether_addr);
1202 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1203 	struct virtchnl_ether_addr_list *al =
1204 	    (struct virtchnl_ether_addr_list *)msg;
1205 	struct ice_pf *pf = vf->pf;
1206 	enum virtchnl_ops vc_op;
1207 	struct ice_vsi *vsi;
1208 	int i;
1209 
1210 	if (set) {
1211 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
1212 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
1213 	} else {
1214 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
1215 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
1216 	}
1217 
1218 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1219 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1220 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1221 		goto handle_mac_exit;
1222 	}
1223 
1224 	/* If this VF is not privileged, then we can't add more than a
1225 	 * limited number of addresses. Check to make sure that the
1226 	 * additions do not push us over the limit.
1227 	 */
1228 	if (set && !ice_is_vf_trusted(vf) &&
1229 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
1230 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
1231 			vf->vf_id);
1232 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1233 		goto handle_mac_exit;
1234 	}
1235 
1236 	vsi = ice_get_vf_vsi(vf);
1237 	if (!vsi) {
1238 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1239 		goto handle_mac_exit;
1240 	}
1241 
1242 	for (i = 0; i < al->num_elements; i++) {
1243 		u8 *mac_addr = al->list[i].addr;
1244 		int result;
1245 
1246 		if (is_broadcast_ether_addr(mac_addr) ||
1247 		    is_zero_ether_addr(mac_addr))
1248 			continue;
1249 
1250 		result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
1251 		if (result == -EEXIST || result == -ENOENT) {
1252 			continue;
1253 		} else if (result) {
1254 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1255 			goto handle_mac_exit;
1256 		}
1257 	}
1258 
1259 handle_mac_exit:
1260 	/* send the response to the VF */
1261 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1262 }
1263 
1264 /**
1265  * ice_vc_add_mac_addr_msg
1266  * @vf: pointer to the VF info
1267  * @msg: pointer to the msg buffer
1268  *
1269  * add guest MAC address filter
1270  */
1271 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1272 {
1273 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
1274 }
1275 
1276 /**
1277  * ice_vc_del_mac_addr_msg
1278  * @vf: pointer to the VF info
1279  * @msg: pointer to the msg buffer
1280  *
1281  * remove guest MAC address filter
1282  */
1283 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1284 {
1285 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
1286 }
1287 
1288 /**
1289  * ice_vc_request_qs_msg
1290  * @vf: pointer to the VF info
1291  * @msg: pointer to the msg buffer
1292  *
1293  * VFs get a default number of queues but can use this message to request a
1294  * different number. If the request is successful, PF will reset the VF and
1295  * return 0. If unsuccessful, PF will send message informing VF of number of
1296  * available queue pairs via virtchnl message response to VF.
1297  */
1298 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
1299 {
1300 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1301 	struct virtchnl_vf_res_request *vfres =
1302 		(struct virtchnl_vf_res_request *)msg;
1303 	u16 req_queues = vfres->num_queue_pairs;
1304 	struct ice_pf *pf = vf->pf;
1305 	u16 max_allowed_vf_queues;
1306 	u16 tx_rx_queue_left;
1307 	struct device *dev;
1308 	u16 cur_queues;
1309 
1310 	dev = ice_pf_to_dev(pf);
1311 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1312 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1313 		goto error_param;
1314 	}
1315 
1316 	cur_queues = vf->num_vf_qs;
1317 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
1318 				 ice_get_avail_rxq_count(pf));
1319 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
1320 	if (!req_queues) {
1321 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
1322 			vf->vf_id);
1323 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
1324 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
1325 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
1326 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
1327 	} else if (req_queues > cur_queues &&
1328 		   req_queues - cur_queues > tx_rx_queue_left) {
1329 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1330 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
1331 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
1332 					       ICE_MAX_RSS_QS_PER_VF);
1333 	} else {
1334 		/* request is successful, then reset VF */
1335 		vf->num_req_qs = req_queues;
1336 		ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1337 		dev_info(dev, "VF %d granted request of %u queues.\n",
1338 			 vf->vf_id, req_queues);
1339 		return 0;
1340 	}
1341 
1342 error_param:
1343 	/* send the response to the VF */
1344 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
1345 				     v_ret, (u8 *)vfres, sizeof(*vfres));
1346 }
1347 
1348 /**
1349  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
1350  * @caps: VF driver negotiated capabilities
1351  *
1352  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
1353  */
1354 static bool ice_vf_vlan_offload_ena(u32 caps)
1355 {
1356 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
1357 }
1358 
1359 /**
1360  * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
1361  * @vf: VF used to determine if VLAN promiscuous config is allowed
1362  */
1363 bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
1364 {
1365 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1366 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
1367 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
1368 		return true;
1369 
1370 	return false;
1371 }
1372 
1373 /**
1374  * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
1375  * @vf: VF to enable VLAN promisc on
1376  * @vsi: VF's VSI used to enable VLAN promiscuous mode
1377  * @vlan: VLAN used to enable VLAN promiscuous
1378  *
1379  * This function should only be called if VLAN promiscuous mode is allowed,
1380  * which can be determined via ice_is_vlan_promisc_allowed().
1381  */
1382 int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
1383 			    struct ice_vlan *vlan)
1384 {
1385 	u8 promisc_m = 0;
1386 	int status;
1387 
1388 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1389 		promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS;
1390 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1391 		promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS;
1392 
1393 	if (!promisc_m)
1394 		return 0;
1395 
1396 	status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
1397 					  vlan->vid);
1398 	if (status && status != -EEXIST)
1399 		return status;
1400 
1401 	return 0;
1402 }
1403 
1404 /**
1405  * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
1406  * @vsi: VF's VSI used to disable VLAN promiscuous mode for
1407  * @vlan: VLAN used to disable VLAN promiscuous
1408  *
1409  * This function should only be called if VLAN promiscuous mode is allowed,
1410  * which can be determined via ice_is_vlan_promisc_allowed().
1411  */
1412 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
1413 {
1414 	u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS;
1415 	int status;
1416 
1417 	status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
1418 					    vlan->vid);
1419 	if (status && status != -ENOENT)
1420 		return status;
1421 
1422 	return 0;
1423 }
1424 
1425 /**
1426  * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
1427  * @vf: VF to check against
1428  * @vsi: VF's VSI
1429  *
1430  * If the VF is trusted then the VF is allowed to add as many VLANs as it
1431  * wants to, so return false.
1432  *
1433  * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
1434  * allowed VLANs for an untrusted VF. Return the result of this comparison.
1435  */
1436 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
1437 {
1438 	if (ice_is_vf_trusted(vf))
1439 		return false;
1440 
1441 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS	1
1442 	return ((ice_vsi_num_non_zero_vlans(vsi) +
1443 		ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
1444 }
1445 
1446 /**
1447  * ice_vc_process_vlan_msg
1448  * @vf: pointer to the VF info
1449  * @msg: pointer to the msg buffer
1450  * @add_v: Add VLAN if true, otherwise delete VLAN
1451  *
1452  * Process virtchnl op to add or remove programmed guest VLAN ID
1453  */
1454 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
1455 {
1456 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1457 	struct virtchnl_vlan_filter_list *vfl =
1458 	    (struct virtchnl_vlan_filter_list *)msg;
1459 	struct ice_pf *pf = vf->pf;
1460 	bool vlan_promisc = false;
1461 	struct ice_vsi *vsi;
1462 	struct device *dev;
1463 	int status = 0;
1464 	int i;
1465 
1466 	dev = ice_pf_to_dev(pf);
1467 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1468 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1469 		goto error_param;
1470 	}
1471 
1472 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1473 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1474 		goto error_param;
1475 	}
1476 
1477 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
1478 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1479 		goto error_param;
1480 	}
1481 
1482 	for (i = 0; i < vfl->num_elements; i++) {
1483 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
1484 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1485 			dev_err(dev, "invalid VF VLAN id %d\n",
1486 				vfl->vlan_id[i]);
1487 			goto error_param;
1488 		}
1489 	}
1490 
1491 	vsi = ice_get_vf_vsi(vf);
1492 	if (!vsi) {
1493 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1494 		goto error_param;
1495 	}
1496 
1497 	if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
1498 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
1499 			 vf->vf_id);
1500 		/* There is no need to let VF know about being not trusted,
1501 		 * so we can just return success message here
1502 		 */
1503 		goto error_param;
1504 	}
1505 
1506 	/* in DVM a VF can add/delete inner VLAN filters when
1507 	 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
1508 	 */
1509 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
1510 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1511 		goto error_param;
1512 	}
1513 
1514 	/* in DVM VLAN promiscuous is based on the outer VLAN, which would be
1515 	 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
1516 	 * allow vlan_promisc = true in SVM and if no port VLAN is configured
1517 	 */
1518 	vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
1519 		!ice_is_dvm_ena(&pf->hw) &&
1520 		!ice_vf_is_port_vlan_ena(vf);
1521 
1522 	if (add_v) {
1523 		for (i = 0; i < vfl->num_elements; i++) {
1524 			u16 vid = vfl->vlan_id[i];
1525 			struct ice_vlan vlan;
1526 
1527 			if (ice_vf_has_max_vlans(vf, vsi)) {
1528 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
1529 					 vf->vf_id);
1530 				/* There is no need to let VF know about being
1531 				 * not trusted, so we can just return success
1532 				 * message here as well.
1533 				 */
1534 				goto error_param;
1535 			}
1536 
1537 			/* we add VLAN 0 by default for each VF so we can enable
1538 			 * Tx VLAN anti-spoof without triggering MDD events so
1539 			 * we don't need to add it again here
1540 			 */
1541 			if (!vid)
1542 				continue;
1543 
1544 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
1545 			status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
1546 			if (status) {
1547 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1548 				goto error_param;
1549 			}
1550 
1551 			/* Enable VLAN filtering on first non-zero VLAN */
1552 			if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
1553 				if (vf->spoofchk) {
1554 					status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
1555 					if (status) {
1556 						v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1557 						dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
1558 							vid, status);
1559 						goto error_param;
1560 					}
1561 				}
1562 				if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
1563 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1564 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
1565 						vid, status);
1566 					goto error_param;
1567 				}
1568 			} else if (vlan_promisc) {
1569 				status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
1570 				if (status) {
1571 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1572 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
1573 						vid, status);
1574 				}
1575 			}
1576 		}
1577 	} else {
1578 		/* In case of non_trusted VF, number of VLAN elements passed
1579 		 * to PF for removal might be greater than number of VLANs
1580 		 * filter programmed for that VF - So, use actual number of
1581 		 * VLANS added earlier with add VLAN opcode. In order to avoid
1582 		 * removing VLAN that doesn't exist, which result to sending
1583 		 * erroneous failed message back to the VF
1584 		 */
1585 		int num_vf_vlan;
1586 
1587 		num_vf_vlan = vsi->num_vlan;
1588 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1589 			u16 vid = vfl->vlan_id[i];
1590 			struct ice_vlan vlan;
1591 
1592 			/* we add VLAN 0 by default for each VF so we can enable
1593 			 * Tx VLAN anti-spoof without triggering MDD events so
1594 			 * we don't want a VIRTCHNL request to remove it
1595 			 */
1596 			if (!vid)
1597 				continue;
1598 
1599 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
1600 			status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
1601 			if (status) {
1602 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1603 				goto error_param;
1604 			}
1605 
1606 			/* Disable VLAN filtering when only VLAN 0 is left */
1607 			if (!ice_vsi_has_non_zero_vlans(vsi)) {
1608 				vsi->inner_vlan_ops.dis_tx_filtering(vsi);
1609 				vsi->inner_vlan_ops.dis_rx_filtering(vsi);
1610 			}
1611 
1612 			if (vlan_promisc)
1613 				ice_vf_dis_vlan_promisc(vsi, &vlan);
1614 		}
1615 	}
1616 
1617 error_param:
1618 	/* send the response to the VF */
1619 	if (add_v)
1620 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1621 					     NULL, 0);
1622 	else
1623 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1624 					     NULL, 0);
1625 }
1626 
1627 /**
1628  * ice_vc_add_vlan_msg
1629  * @vf: pointer to the VF info
1630  * @msg: pointer to the msg buffer
1631  *
1632  * Add and program guest VLAN ID
1633  */
1634 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
1635 {
1636 	return ice_vc_process_vlan_msg(vf, msg, true);
1637 }
1638 
1639 /**
1640  * ice_vc_remove_vlan_msg
1641  * @vf: pointer to the VF info
1642  * @msg: pointer to the msg buffer
1643  *
1644  * remove programmed guest VLAN ID
1645  */
1646 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
1647 {
1648 	return ice_vc_process_vlan_msg(vf, msg, false);
1649 }
1650 
1651 /**
1652  * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not
1653  * @vsi: pointer to the VF VSI info
1654  */
1655 static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi)
1656 {
1657 	unsigned int i;
1658 
1659 	ice_for_each_alloc_rxq(vsi, i)
1660 		if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS)
1661 			return true;
1662 
1663 	return false;
1664 }
1665 
1666 /**
1667  * ice_vc_ena_vlan_stripping
1668  * @vf: pointer to the VF info
1669  *
1670  * Enable VLAN header stripping for a given VF
1671  */
1672 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
1673 {
1674 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1675 	struct ice_vsi *vsi;
1676 
1677 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1678 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1679 		goto error_param;
1680 	}
1681 
1682 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1683 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1684 		goto error_param;
1685 	}
1686 
1687 	vsi = ice_get_vf_vsi(vf);
1688 	if (!vsi) {
1689 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1690 		goto error_param;
1691 	}
1692 
1693 	if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
1694 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1695 	else
1696 		vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
1697 
1698 error_param:
1699 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1700 				     v_ret, NULL, 0);
1701 }
1702 
1703 /**
1704  * ice_vc_dis_vlan_stripping
1705  * @vf: pointer to the VF info
1706  *
1707  * Disable VLAN header stripping for a given VF
1708  */
1709 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
1710 {
1711 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1712 	struct ice_vsi *vsi;
1713 
1714 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1715 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1716 		goto error_param;
1717 	}
1718 
1719 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1720 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1721 		goto error_param;
1722 	}
1723 
1724 	vsi = ice_get_vf_vsi(vf);
1725 	if (!vsi) {
1726 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1727 		goto error_param;
1728 	}
1729 
1730 	if (vsi->inner_vlan_ops.dis_stripping(vsi))
1731 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1732 	else
1733 		vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
1734 
1735 error_param:
1736 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1737 				     v_ret, NULL, 0);
1738 }
1739 
1740 /**
1741  * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
1742  * @vf: pointer to the VF info
1743  */
1744 static int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
1745 {
1746 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1747 	struct virtchnl_rss_hashcfg *vrh = NULL;
1748 	int len = 0, ret;
1749 
1750 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1751 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1752 		goto err;
1753 	}
1754 
1755 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1756 		dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
1757 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1758 		goto err;
1759 	}
1760 
1761 	len = sizeof(struct virtchnl_rss_hashcfg);
1762 	vrh = kzalloc(len, GFP_KERNEL);
1763 	if (!vrh) {
1764 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1765 		len = 0;
1766 		goto err;
1767 	}
1768 
1769 	vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
1770 err:
1771 	/* send the response back to the VF */
1772 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
1773 				    (u8 *)vrh, len);
1774 	kfree(vrh);
1775 	return ret;
1776 }
1777 
1778 /**
1779  * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
1780  * @vf: pointer to the VF info
1781  * @msg: pointer to the msg buffer
1782  */
1783 static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
1784 {
1785 	struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
1786 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1787 	struct ice_pf *pf = vf->pf;
1788 	struct ice_vsi *vsi;
1789 	struct device *dev;
1790 	int status;
1791 
1792 	dev = ice_pf_to_dev(pf);
1793 
1794 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1795 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1796 		goto err;
1797 	}
1798 
1799 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
1800 		dev_err(dev, "RSS not supported by PF\n");
1801 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1802 		goto err;
1803 	}
1804 
1805 	vsi = ice_get_vf_vsi(vf);
1806 	if (!vsi) {
1807 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1808 		goto err;
1809 	}
1810 
1811 	/* clear all previously programmed RSS configuration to allow VF drivers
1812 	 * the ability to customize the RSS configuration and/or completely
1813 	 * disable RSS
1814 	 */
1815 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
1816 	if (status && !vrh->hashcfg) {
1817 		/* only report failure to clear the current RSS configuration if
1818 		 * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
1819 		 */
1820 		v_ret = ice_err_to_virt_err(status);
1821 		goto err;
1822 	} else if (status) {
1823 		/* allow the VF to update the RSS configuration even on failure
1824 		 * to clear the current RSS confguration in an attempt to keep
1825 		 * RSS in a working state
1826 		 */
1827 		dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
1828 			 vf->vf_id);
1829 	}
1830 
1831 	if (vrh->hashcfg) {
1832 		status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
1833 		v_ret = ice_err_to_virt_err(status);
1834 	}
1835 
1836 	/* save the requested VF configuration */
1837 	if (!v_ret)
1838 		vf->rss_hashcfg = vrh->hashcfg;
1839 
1840 	/* send the response to the VF */
1841 err:
1842 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
1843 				     NULL, 0);
1844 }
1845 
1846 /**
1847  * ice_vc_query_rxdid - query RXDID supported by DDP package
1848  * @vf: pointer to VF info
1849  *
1850  * Called from VF to query a bitmap of supported flexible
1851  * descriptor RXDIDs of a DDP package.
1852  */
1853 static int ice_vc_query_rxdid(struct ice_vf *vf)
1854 {
1855 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1856 	struct ice_pf *pf = vf->pf;
1857 	u64 rxdid;
1858 
1859 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1860 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1861 		goto err;
1862 	}
1863 
1864 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
1865 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1866 		goto err;
1867 	}
1868 
1869 	rxdid = pf->supported_rxdids;
1870 
1871 err:
1872 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
1873 				     v_ret, (u8 *)&rxdid, sizeof(rxdid));
1874 }
1875 
1876 /**
1877  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
1878  * @vf: VF to enable/disable VLAN stripping for on initialization
1879  *
1880  * Set the default for VLAN stripping based on whether a port VLAN is configured
1881  * and the current VLAN mode of the device.
1882  */
1883 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
1884 {
1885 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1886 
1887 	vf->vlan_strip_ena = 0;
1888 
1889 	if (!vsi)
1890 		return -EINVAL;
1891 
1892 	/* don't modify stripping if port VLAN is configured in SVM since the
1893 	 * port VLAN is based on the inner/single VLAN in SVM
1894 	 */
1895 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
1896 		return 0;
1897 
1898 	if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
1899 		int err;
1900 
1901 		err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
1902 		if (!err)
1903 			vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
1904 		return err;
1905 	}
1906 
1907 	return vsi->inner_vlan_ops.dis_stripping(vsi);
1908 }
1909 
1910 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
1911 {
1912 	if (vf->trusted)
1913 		return VLAN_N_VID;
1914 	else
1915 		return ICE_MAX_VLAN_PER_VF;
1916 }
1917 
1918 /**
1919  * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
1920  * @vf: VF that being checked for
1921  *
1922  * When the device is in double VLAN mode, check whether or not the outer VLAN
1923  * is allowed.
1924  */
1925 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
1926 {
1927 	if (ice_vf_is_port_vlan_ena(vf))
1928 		return true;
1929 
1930 	return false;
1931 }
1932 
1933 /**
1934  * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
1935  * @vf: VF that capabilities are being set for
1936  * @caps: VLAN capabilities to populate
1937  *
1938  * Determine VLAN capabilities support based on whether a port VLAN is
1939  * configured. If a port VLAN is configured then the VF should use the inner
1940  * filtering/offload capabilities since the port VLAN is using the outer VLAN
1941  * capabilies.
1942  */
1943 static void
1944 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
1945 {
1946 	struct virtchnl_vlan_supported_caps *supported_caps;
1947 
1948 	if (ice_vf_outer_vlan_not_allowed(vf)) {
1949 		/* until support for inner VLAN filtering is added when a port
1950 		 * VLAN is configured, only support software offloaded inner
1951 		 * VLANs when a port VLAN is confgured in DVM
1952 		 */
1953 		supported_caps = &caps->filtering.filtering_support;
1954 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
1955 
1956 		supported_caps = &caps->offloads.stripping_support;
1957 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1958 					VIRTCHNL_VLAN_TOGGLE |
1959 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1960 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1961 
1962 		supported_caps = &caps->offloads.insertion_support;
1963 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1964 					VIRTCHNL_VLAN_TOGGLE |
1965 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1966 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1967 
1968 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
1969 		caps->offloads.ethertype_match =
1970 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
1971 	} else {
1972 		supported_caps = &caps->filtering.filtering_support;
1973 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
1974 		supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1975 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1976 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
1977 					VIRTCHNL_VLAN_ETHERTYPE_AND;
1978 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1979 						 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1980 						 VIRTCHNL_VLAN_ETHERTYPE_9100;
1981 
1982 		supported_caps = &caps->offloads.stripping_support;
1983 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
1984 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
1985 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1986 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
1987 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
1988 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1989 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
1990 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
1991 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
1992 
1993 		supported_caps = &caps->offloads.insertion_support;
1994 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
1995 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
1996 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1997 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
1998 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
1999 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2000 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2001 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2002 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
2003 
2004 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2005 
2006 		caps->offloads.ethertype_match =
2007 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2008 	}
2009 
2010 	caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2011 }
2012 
2013 /**
2014  * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
2015  * @vf: VF that capabilities are being set for
2016  * @caps: VLAN capabilities to populate
2017  *
2018  * Determine VLAN capabilities support based on whether a port VLAN is
2019  * configured. If a port VLAN is configured then the VF does not have any VLAN
2020  * filtering or offload capabilities since the port VLAN is using the inner VLAN
2021  * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
2022  * VLAN fitlering and offload capabilities.
2023  */
2024 static void
2025 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2026 {
2027 	struct virtchnl_vlan_supported_caps *supported_caps;
2028 
2029 	if (ice_vf_is_port_vlan_ena(vf)) {
2030 		supported_caps = &caps->filtering.filtering_support;
2031 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2032 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2033 
2034 		supported_caps = &caps->offloads.stripping_support;
2035 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2036 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2037 
2038 		supported_caps = &caps->offloads.insertion_support;
2039 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2040 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2041 
2042 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
2043 		caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
2044 		caps->filtering.max_filters = 0;
2045 	} else {
2046 		supported_caps = &caps->filtering.filtering_support;
2047 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
2048 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2049 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2050 
2051 		supported_caps = &caps->offloads.stripping_support;
2052 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2053 					VIRTCHNL_VLAN_TOGGLE |
2054 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2055 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2056 
2057 		supported_caps = &caps->offloads.insertion_support;
2058 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2059 					VIRTCHNL_VLAN_TOGGLE |
2060 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2061 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2062 
2063 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2064 		caps->offloads.ethertype_match =
2065 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2066 		caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2067 	}
2068 }
2069 
2070 /**
2071  * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
2072  * @vf: VF to determine VLAN capabilities for
2073  *
2074  * This will only be called if the VF and PF successfully negotiated
2075  * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2076  *
2077  * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
2078  * is configured or not.
2079  */
2080 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
2081 {
2082 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2083 	struct virtchnl_vlan_caps *caps = NULL;
2084 	int err, len = 0;
2085 
2086 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2087 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2088 		goto out;
2089 	}
2090 
2091 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2092 	if (!caps) {
2093 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2094 		goto out;
2095 	}
2096 	len = sizeof(*caps);
2097 
2098 	if (ice_is_dvm_ena(&vf->pf->hw))
2099 		ice_vc_set_dvm_caps(vf, caps);
2100 	else
2101 		ice_vc_set_svm_caps(vf, caps);
2102 
2103 	/* store negotiated caps to prevent invalid VF messages */
2104 	memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
2105 
2106 out:
2107 	err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
2108 				    v_ret, (u8 *)caps, len);
2109 	kfree(caps);
2110 	return err;
2111 }
2112 
2113 /**
2114  * ice_vc_validate_vlan_tpid - validate VLAN TPID
2115  * @filtering_caps: negotiated/supported VLAN filtering capabilities
2116  * @tpid: VLAN TPID used for validation
2117  *
2118  * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
2119  * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
2120  */
2121 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
2122 {
2123 	enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
2124 
2125 	switch (tpid) {
2126 	case ETH_P_8021Q:
2127 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
2128 		break;
2129 	case ETH_P_8021AD:
2130 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
2131 		break;
2132 	case ETH_P_QINQ1:
2133 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
2134 		break;
2135 	}
2136 
2137 	if (!(filtering_caps & vlan_ethertype))
2138 		return false;
2139 
2140 	return true;
2141 }
2142 
2143 /**
2144  * ice_vc_is_valid_vlan - validate the virtchnl_vlan
2145  * @vc_vlan: virtchnl_vlan to validate
2146  *
2147  * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
2148  * false. Otherwise return true.
2149  */
2150 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
2151 {
2152 	if (!vc_vlan->tci || !vc_vlan->tpid)
2153 		return false;
2154 
2155 	return true;
2156 }
2157 
2158 /**
2159  * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
2160  * @vfc: negotiated/supported VLAN filtering capabilities
2161  * @vfl: VLAN filter list from VF to validate
2162  *
2163  * Validate all of the filters in the VLAN filter list from the VF. If any of
2164  * the checks fail then return false. Otherwise return true.
2165  */
2166 static bool
2167 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
2168 				 struct virtchnl_vlan_filter_list_v2 *vfl)
2169 {
2170 	u16 i;
2171 
2172 	if (!vfl->num_elements)
2173 		return false;
2174 
2175 	for (i = 0; i < vfl->num_elements; i++) {
2176 		struct virtchnl_vlan_supported_caps *filtering_support =
2177 			&vfc->filtering_support;
2178 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2179 		struct virtchnl_vlan *outer = &vlan_fltr->outer;
2180 		struct virtchnl_vlan *inner = &vlan_fltr->inner;
2181 
2182 		if ((ice_vc_is_valid_vlan(outer) &&
2183 		     filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
2184 		    (ice_vc_is_valid_vlan(inner) &&
2185 		     filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
2186 			return false;
2187 
2188 		if ((outer->tci_mask &&
2189 		     !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
2190 		    (inner->tci_mask &&
2191 		     !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
2192 			return false;
2193 
2194 		if (((outer->tci & VLAN_PRIO_MASK) &&
2195 		     !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
2196 		    ((inner->tci & VLAN_PRIO_MASK) &&
2197 		     !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
2198 			return false;
2199 
2200 		if ((ice_vc_is_valid_vlan(outer) &&
2201 		     !ice_vc_validate_vlan_tpid(filtering_support->outer,
2202 						outer->tpid)) ||
2203 		    (ice_vc_is_valid_vlan(inner) &&
2204 		     !ice_vc_validate_vlan_tpid(filtering_support->inner,
2205 						inner->tpid)))
2206 			return false;
2207 	}
2208 
2209 	return true;
2210 }
2211 
2212 /**
2213  * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
2214  * @vc_vlan: struct virtchnl_vlan to transform
2215  */
2216 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
2217 {
2218 	struct ice_vlan vlan = { 0 };
2219 
2220 	vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
2221 	vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
2222 	vlan.tpid = vc_vlan->tpid;
2223 
2224 	return vlan;
2225 }
2226 
2227 /**
2228  * ice_vc_vlan_action - action to perform on the virthcnl_vlan
2229  * @vsi: VF's VSI used to perform the action
2230  * @vlan_action: function to perform the action with (i.e. add/del)
2231  * @vlan: VLAN filter to perform the action with
2232  */
2233 static int
2234 ice_vc_vlan_action(struct ice_vsi *vsi,
2235 		   int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
2236 		   struct ice_vlan *vlan)
2237 {
2238 	int err;
2239 
2240 	err = vlan_action(vsi, vlan);
2241 	if (err)
2242 		return err;
2243 
2244 	return 0;
2245 }
2246 
2247 /**
2248  * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
2249  * @vf: VF used to delete the VLAN(s)
2250  * @vsi: VF's VSI used to delete the VLAN(s)
2251  * @vfl: virthchnl filter list used to delete the filters
2252  */
2253 static int
2254 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2255 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2256 {
2257 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2258 	int err;
2259 	u16 i;
2260 
2261 	for (i = 0; i < vfl->num_elements; i++) {
2262 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2263 		struct virtchnl_vlan *vc_vlan;
2264 
2265 		vc_vlan = &vlan_fltr->outer;
2266 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2267 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2268 
2269 			err = ice_vc_vlan_action(vsi,
2270 						 vsi->outer_vlan_ops.del_vlan,
2271 						 &vlan);
2272 			if (err)
2273 				return err;
2274 
2275 			if (vlan_promisc)
2276 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2277 
2278 			/* Disable VLAN filtering when only VLAN 0 is left */
2279 			if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
2280 				err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
2281 				if (err)
2282 					return err;
2283 			}
2284 		}
2285 
2286 		vc_vlan = &vlan_fltr->inner;
2287 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2288 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2289 
2290 			err = ice_vc_vlan_action(vsi,
2291 						 vsi->inner_vlan_ops.del_vlan,
2292 						 &vlan);
2293 			if (err)
2294 				return err;
2295 
2296 			/* no support for VLAN promiscuous on inner VLAN unless
2297 			 * we are in Single VLAN Mode (SVM)
2298 			 */
2299 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
2300 				if (vlan_promisc)
2301 					ice_vf_dis_vlan_promisc(vsi, &vlan);
2302 
2303 				/* Disable VLAN filtering when only VLAN 0 is left */
2304 				if (!ice_vsi_has_non_zero_vlans(vsi)) {
2305 					err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
2306 					if (err)
2307 						return err;
2308 				}
2309 			}
2310 		}
2311 	}
2312 
2313 	return 0;
2314 }
2315 
2316 /**
2317  * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
2318  * @vf: VF the message was received from
2319  * @msg: message received from the VF
2320  */
2321 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2322 {
2323 	struct virtchnl_vlan_filter_list_v2 *vfl =
2324 		(struct virtchnl_vlan_filter_list_v2 *)msg;
2325 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2326 	struct ice_vsi *vsi;
2327 
2328 	if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
2329 					      vfl)) {
2330 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2331 		goto out;
2332 	}
2333 
2334 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2335 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2336 		goto out;
2337 	}
2338 
2339 	vsi = ice_get_vf_vsi(vf);
2340 	if (!vsi) {
2341 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2342 		goto out;
2343 	}
2344 
2345 	if (ice_vc_del_vlans(vf, vsi, vfl))
2346 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2347 
2348 out:
2349 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
2350 				     0);
2351 }
2352 
2353 /**
2354  * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
2355  * @vf: VF used to add the VLAN(s)
2356  * @vsi: VF's VSI used to add the VLAN(s)
2357  * @vfl: virthchnl filter list used to add the filters
2358  */
2359 static int
2360 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2361 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2362 {
2363 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2364 	int err;
2365 	u16 i;
2366 
2367 	for (i = 0; i < vfl->num_elements; i++) {
2368 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2369 		struct virtchnl_vlan *vc_vlan;
2370 
2371 		vc_vlan = &vlan_fltr->outer;
2372 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2373 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2374 
2375 			err = ice_vc_vlan_action(vsi,
2376 						 vsi->outer_vlan_ops.add_vlan,
2377 						 &vlan);
2378 			if (err)
2379 				return err;
2380 
2381 			if (vlan_promisc) {
2382 				err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
2383 				if (err)
2384 					return err;
2385 			}
2386 
2387 			/* Enable VLAN filtering on first non-zero VLAN */
2388 			if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
2389 				err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
2390 				if (err)
2391 					return err;
2392 			}
2393 		}
2394 
2395 		vc_vlan = &vlan_fltr->inner;
2396 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2397 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2398 
2399 			err = ice_vc_vlan_action(vsi,
2400 						 vsi->inner_vlan_ops.add_vlan,
2401 						 &vlan);
2402 			if (err)
2403 				return err;
2404 
2405 			/* no support for VLAN promiscuous on inner VLAN unless
2406 			 * we are in Single VLAN Mode (SVM)
2407 			 */
2408 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
2409 				if (vlan_promisc) {
2410 					err = ice_vf_ena_vlan_promisc(vf, vsi,
2411 								      &vlan);
2412 					if (err)
2413 						return err;
2414 				}
2415 
2416 				/* Enable VLAN filtering on first non-zero VLAN */
2417 				if (vf->spoofchk && vlan.vid) {
2418 					err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
2419 					if (err)
2420 						return err;
2421 				}
2422 			}
2423 		}
2424 	}
2425 
2426 	return 0;
2427 }
2428 
2429 /**
2430  * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
2431  * @vsi: VF VSI used to get number of existing VLAN filters
2432  * @vfc: negotiated/supported VLAN filtering capabilities
2433  * @vfl: VLAN filter list from VF to validate
2434  *
2435  * Validate all of the filters in the VLAN filter list from the VF during the
2436  * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
2437  * Otherwise return true.
2438  */
2439 static bool
2440 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
2441 				     struct virtchnl_vlan_filtering_caps *vfc,
2442 				     struct virtchnl_vlan_filter_list_v2 *vfl)
2443 {
2444 	u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
2445 		vfl->num_elements;
2446 
2447 	if (num_requested_filters > vfc->max_filters)
2448 		return false;
2449 
2450 	return ice_vc_validate_vlan_filter_list(vfc, vfl);
2451 }
2452 
2453 /**
2454  * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
2455  * @vf: VF the message was received from
2456  * @msg: message received from the VF
2457  */
2458 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2459 {
2460 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2461 	struct virtchnl_vlan_filter_list_v2 *vfl =
2462 		(struct virtchnl_vlan_filter_list_v2 *)msg;
2463 	struct ice_vsi *vsi;
2464 
2465 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2466 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2467 		goto out;
2468 	}
2469 
2470 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2471 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2472 		goto out;
2473 	}
2474 
2475 	vsi = ice_get_vf_vsi(vf);
2476 	if (!vsi) {
2477 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2478 		goto out;
2479 	}
2480 
2481 	if (!ice_vc_validate_add_vlan_filter_list(vsi,
2482 						  &vf->vlan_v2_caps.filtering,
2483 						  vfl)) {
2484 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2485 		goto out;
2486 	}
2487 
2488 	if (ice_vc_add_vlans(vf, vsi, vfl))
2489 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2490 
2491 out:
2492 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
2493 				     0);
2494 }
2495 
2496 /**
2497  * ice_vc_valid_vlan_setting - validate VLAN setting
2498  * @negotiated_settings: negotiated VLAN settings during VF init
2499  * @ethertype_setting: ethertype(s) requested for the VLAN setting
2500  */
2501 static bool
2502 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
2503 {
2504 	if (ethertype_setting && !(negotiated_settings & ethertype_setting))
2505 		return false;
2506 
2507 	/* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
2508 	 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
2509 	 */
2510 	if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
2511 	    hweight32(ethertype_setting) > 1)
2512 		return false;
2513 
2514 	/* ability to modify the VLAN setting was not negotiated */
2515 	if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
2516 		return false;
2517 
2518 	return true;
2519 }
2520 
2521 /**
2522  * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
2523  * @caps: negotiated VLAN settings during VF init
2524  * @msg: message to validate
2525  *
2526  * Used to validate any VLAN virtchnl message sent as a
2527  * virtchnl_vlan_setting structure. Validates the message against the
2528  * negotiated/supported caps during VF driver init.
2529  */
2530 static bool
2531 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
2532 			      struct virtchnl_vlan_setting *msg)
2533 {
2534 	if ((!msg->outer_ethertype_setting &&
2535 	     !msg->inner_ethertype_setting) ||
2536 	    (!caps->outer && !caps->inner))
2537 		return false;
2538 
2539 	if (msg->outer_ethertype_setting &&
2540 	    !ice_vc_valid_vlan_setting(caps->outer,
2541 				       msg->outer_ethertype_setting))
2542 		return false;
2543 
2544 	if (msg->inner_ethertype_setting &&
2545 	    !ice_vc_valid_vlan_setting(caps->inner,
2546 				       msg->inner_ethertype_setting))
2547 		return false;
2548 
2549 	return true;
2550 }
2551 
2552 /**
2553  * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
2554  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
2555  * @tpid: VLAN TPID to populate
2556  */
2557 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
2558 {
2559 	switch (ethertype_setting) {
2560 	case VIRTCHNL_VLAN_ETHERTYPE_8100:
2561 		*tpid = ETH_P_8021Q;
2562 		break;
2563 	case VIRTCHNL_VLAN_ETHERTYPE_88A8:
2564 		*tpid = ETH_P_8021AD;
2565 		break;
2566 	case VIRTCHNL_VLAN_ETHERTYPE_9100:
2567 		*tpid = ETH_P_QINQ1;
2568 		break;
2569 	default:
2570 		*tpid = 0;
2571 		return -EINVAL;
2572 	}
2573 
2574 	return 0;
2575 }
2576 
2577 /**
2578  * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
2579  * @vsi: VF's VSI used to enable the VLAN offload
2580  * @ena_offload: function used to enable the VLAN offload
2581  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
2582  */
2583 static int
2584 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
2585 			int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
2586 			u32 ethertype_setting)
2587 {
2588 	u16 tpid;
2589 	int err;
2590 
2591 	err = ice_vc_get_tpid(ethertype_setting, &tpid);
2592 	if (err)
2593 		return err;
2594 
2595 	err = ena_offload(vsi, tpid);
2596 	if (err)
2597 		return err;
2598 
2599 	return 0;
2600 }
2601 
2602 /**
2603  * ice_vc_ena_vlan_stripping_v2_msg
2604  * @vf: VF the message was received from
2605  * @msg: message received from the VF
2606  *
2607  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
2608  */
2609 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
2610 {
2611 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2612 	struct virtchnl_vlan_supported_caps *stripping_support;
2613 	struct virtchnl_vlan_setting *strip_msg =
2614 		(struct virtchnl_vlan_setting *)msg;
2615 	u32 ethertype_setting;
2616 	struct ice_vsi *vsi;
2617 
2618 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2619 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2620 		goto out;
2621 	}
2622 
2623 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
2624 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2625 		goto out;
2626 	}
2627 
2628 	vsi = ice_get_vf_vsi(vf);
2629 	if (!vsi) {
2630 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2631 		goto out;
2632 	}
2633 
2634 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
2635 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
2636 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2637 		goto out;
2638 	}
2639 
2640 	if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
2641 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2642 		goto out;
2643 	}
2644 
2645 	ethertype_setting = strip_msg->outer_ethertype_setting;
2646 	if (ethertype_setting) {
2647 		if (ice_vc_ena_vlan_offload(vsi,
2648 					    vsi->outer_vlan_ops.ena_stripping,
2649 					    ethertype_setting)) {
2650 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2651 			goto out;
2652 		} else {
2653 			enum ice_l2tsel l2tsel =
2654 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
2655 
2656 			/* PF tells the VF that the outer VLAN tag is always
2657 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
2658 			 * inner is always extracted to
2659 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
2660 			 * support outer stripping so the first tag always ends
2661 			 * up in L2TAG2_2ND and the second/inner tag, if
2662 			 * enabled, is extracted in L2TAG1.
2663 			 */
2664 			ice_vsi_update_l2tsel(vsi, l2tsel);
2665 
2666 			vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
2667 		}
2668 	}
2669 
2670 	ethertype_setting = strip_msg->inner_ethertype_setting;
2671 	if (ethertype_setting &&
2672 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
2673 				    ethertype_setting)) {
2674 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2675 		goto out;
2676 	}
2677 
2678 	if (ethertype_setting)
2679 		vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
2680 
2681 out:
2682 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
2683 				     v_ret, NULL, 0);
2684 }
2685 
2686 /**
2687  * ice_vc_dis_vlan_stripping_v2_msg
2688  * @vf: VF the message was received from
2689  * @msg: message received from the VF
2690  *
2691  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
2692  */
2693 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
2694 {
2695 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2696 	struct virtchnl_vlan_supported_caps *stripping_support;
2697 	struct virtchnl_vlan_setting *strip_msg =
2698 		(struct virtchnl_vlan_setting *)msg;
2699 	u32 ethertype_setting;
2700 	struct ice_vsi *vsi;
2701 
2702 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2703 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2704 		goto out;
2705 	}
2706 
2707 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
2708 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2709 		goto out;
2710 	}
2711 
2712 	vsi = ice_get_vf_vsi(vf);
2713 	if (!vsi) {
2714 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2715 		goto out;
2716 	}
2717 
2718 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
2719 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
2720 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2721 		goto out;
2722 	}
2723 
2724 	ethertype_setting = strip_msg->outer_ethertype_setting;
2725 	if (ethertype_setting) {
2726 		if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
2727 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2728 			goto out;
2729 		} else {
2730 			enum ice_l2tsel l2tsel =
2731 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
2732 
2733 			/* PF tells the VF that the outer VLAN tag is always
2734 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
2735 			 * inner is always extracted to
2736 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
2737 			 * support inner stripping while outer stripping is
2738 			 * disabled so that the first and only tag is extracted
2739 			 * in L2TAG1.
2740 			 */
2741 			ice_vsi_update_l2tsel(vsi, l2tsel);
2742 
2743 			vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
2744 		}
2745 	}
2746 
2747 	ethertype_setting = strip_msg->inner_ethertype_setting;
2748 	if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
2749 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2750 		goto out;
2751 	}
2752 
2753 	if (ethertype_setting)
2754 		vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
2755 
2756 out:
2757 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
2758 				     v_ret, NULL, 0);
2759 }
2760 
2761 /**
2762  * ice_vc_ena_vlan_insertion_v2_msg
2763  * @vf: VF the message was received from
2764  * @msg: message received from the VF
2765  *
2766  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
2767  */
2768 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
2769 {
2770 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2771 	struct virtchnl_vlan_supported_caps *insertion_support;
2772 	struct virtchnl_vlan_setting *insertion_msg =
2773 		(struct virtchnl_vlan_setting *)msg;
2774 	u32 ethertype_setting;
2775 	struct ice_vsi *vsi;
2776 
2777 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2778 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2779 		goto out;
2780 	}
2781 
2782 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
2783 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2784 		goto out;
2785 	}
2786 
2787 	vsi = ice_get_vf_vsi(vf);
2788 	if (!vsi) {
2789 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2790 		goto out;
2791 	}
2792 
2793 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
2794 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
2795 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2796 		goto out;
2797 	}
2798 
2799 	ethertype_setting = insertion_msg->outer_ethertype_setting;
2800 	if (ethertype_setting &&
2801 	    ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
2802 				    ethertype_setting)) {
2803 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2804 		goto out;
2805 	}
2806 
2807 	ethertype_setting = insertion_msg->inner_ethertype_setting;
2808 	if (ethertype_setting &&
2809 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
2810 				    ethertype_setting)) {
2811 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2812 		goto out;
2813 	}
2814 
2815 out:
2816 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
2817 				     v_ret, NULL, 0);
2818 }
2819 
2820 /**
2821  * ice_vc_dis_vlan_insertion_v2_msg
2822  * @vf: VF the message was received from
2823  * @msg: message received from the VF
2824  *
2825  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
2826  */
2827 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
2828 {
2829 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2830 	struct virtchnl_vlan_supported_caps *insertion_support;
2831 	struct virtchnl_vlan_setting *insertion_msg =
2832 		(struct virtchnl_vlan_setting *)msg;
2833 	u32 ethertype_setting;
2834 	struct ice_vsi *vsi;
2835 
2836 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2837 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2838 		goto out;
2839 	}
2840 
2841 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
2842 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2843 		goto out;
2844 	}
2845 
2846 	vsi = ice_get_vf_vsi(vf);
2847 	if (!vsi) {
2848 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2849 		goto out;
2850 	}
2851 
2852 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
2853 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
2854 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2855 		goto out;
2856 	}
2857 
2858 	ethertype_setting = insertion_msg->outer_ethertype_setting;
2859 	if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
2860 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2861 		goto out;
2862 	}
2863 
2864 	ethertype_setting = insertion_msg->inner_ethertype_setting;
2865 	if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
2866 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2867 		goto out;
2868 	}
2869 
2870 out:
2871 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
2872 				     v_ret, NULL, 0);
2873 }
2874 
2875 static int ice_vc_get_ptp_cap(struct ice_vf *vf,
2876 			      const struct virtchnl_ptp_caps *msg)
2877 {
2878 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2879 	u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
2880 		   VIRTCHNL_1588_PTP_CAP_READ_PHC;
2881 
2882 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
2883 		goto err;
2884 
2885 	v_ret = VIRTCHNL_STATUS_SUCCESS;
2886 
2887 	if (msg->caps & caps)
2888 		vf->ptp_caps = caps;
2889 
2890 err:
2891 	/* send the response back to the VF */
2892 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
2893 				     (u8 *)&vf->ptp_caps,
2894 				     sizeof(struct virtchnl_ptp_caps));
2895 }
2896 
2897 static int ice_vc_get_phc_time(struct ice_vf *vf)
2898 {
2899 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2900 	struct virtchnl_phc_time *phc_time = NULL;
2901 	struct ice_pf *pf = vf->pf;
2902 	u32 len = 0;
2903 	int ret;
2904 
2905 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
2906 		goto err;
2907 
2908 	v_ret = VIRTCHNL_STATUS_SUCCESS;
2909 
2910 	phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
2911 	if (!phc_time) {
2912 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2913 		goto err;
2914 	}
2915 
2916 	len = sizeof(*phc_time);
2917 
2918 	phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
2919 
2920 err:
2921 	/* send the response back to the VF */
2922 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
2923 				    (u8 *)phc_time, len);
2924 	kfree(phc_time);
2925 	return ret;
2926 }
2927 
2928 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
2929 	.get_ver_msg = ice_vc_get_ver_msg,
2930 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
2931 	.reset_vf = ice_vc_reset_vf_msg,
2932 	.add_mac_addr_msg = ice_vc_add_mac_addr_msg,
2933 	.del_mac_addr_msg = ice_vc_del_mac_addr_msg,
2934 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
2935 	.ena_qs_msg = ice_vc_ena_qs_msg,
2936 	.dis_qs_msg = ice_vc_dis_qs_msg,
2937 	.request_qs_msg = ice_vc_request_qs_msg,
2938 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
2939 	.config_rss_key = ice_vc_config_rss_key,
2940 	.config_rss_lut = ice_vc_config_rss_lut,
2941 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
2942 	.get_stats_msg = ice_vc_get_stats_msg,
2943 	.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
2944 	.add_vlan_msg = ice_vc_add_vlan_msg,
2945 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
2946 	.query_rxdid = ice_vc_query_rxdid,
2947 	.get_rss_hashcfg = ice_vc_get_rss_hashcfg,
2948 	.set_rss_hashcfg = ice_vc_set_rss_hashcfg,
2949 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
2950 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
2951 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
2952 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
2953 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
2954 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
2955 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
2956 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
2957 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
2958 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
2959 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
2960 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
2961 	.get_qos_caps = ice_vc_get_qos_caps,
2962 	.cfg_q_bw = ice_vc_cfg_q_bw,
2963 	.cfg_q_quanta = ice_vc_cfg_q_quanta,
2964 	.get_ptp_cap = ice_vc_get_ptp_cap,
2965 	.get_phc_time = ice_vc_get_phc_time,
2966 	/* If you add a new op here please make sure to add it to
2967 	 * ice_virtchnl_repr_ops as well.
2968 	 */
2969 };
2970 
2971 /**
2972  * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
2973  * @vf: the VF to switch ops
2974  */
2975 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
2976 {
2977 	vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
2978 }
2979 
2980 /**
2981  * ice_vc_repr_add_mac
2982  * @vf: pointer to VF
2983  * @msg: virtchannel message
2984  *
2985  * When port representors are created, we do not add MAC rule
2986  * to firmware, we store it so that PF could report same
2987  * MAC as VF.
2988  */
2989 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
2990 {
2991 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2992 	struct virtchnl_ether_addr_list *al =
2993 	    (struct virtchnl_ether_addr_list *)msg;
2994 	struct ice_vsi *vsi;
2995 	struct ice_pf *pf;
2996 	int i;
2997 
2998 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2999 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3000 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3001 		goto handle_mac_exit;
3002 	}
3003 
3004 	pf = vf->pf;
3005 
3006 	vsi = ice_get_vf_vsi(vf);
3007 	if (!vsi) {
3008 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3009 		goto handle_mac_exit;
3010 	}
3011 
3012 	for (i = 0; i < al->num_elements; i++) {
3013 		u8 *mac_addr = al->list[i].addr;
3014 
3015 		if (!is_unicast_ether_addr(mac_addr) ||
3016 		    ether_addr_equal(mac_addr, vf->hw_lan_addr))
3017 			continue;
3018 
3019 		if (vf->pf_set_mac) {
3020 			dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
3021 			v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3022 			goto handle_mac_exit;
3023 		}
3024 
3025 		ice_vfhw_mac_add(vf, &al->list[i]);
3026 		break;
3027 	}
3028 
3029 handle_mac_exit:
3030 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3031 				     v_ret, NULL, 0);
3032 }
3033 
3034 /**
3035  * ice_vc_repr_del_mac - response with success for deleting MAC
3036  * @vf: pointer to VF
3037  * @msg: virtchannel message
3038  *
3039  * Respond with success to not break normal VF flow.
3040  * For legacy VF driver try to update cached MAC address.
3041  */
3042 static int
3043 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
3044 {
3045 	struct virtchnl_ether_addr_list *al =
3046 		(struct virtchnl_ether_addr_list *)msg;
3047 
3048 	ice_update_legacy_cached_mac(vf, &al->list[0]);
3049 
3050 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
3051 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3052 }
3053 
3054 static int
3055 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
3056 {
3057 	dev_dbg(ice_pf_to_dev(vf->pf),
3058 		"Can't config promiscuous mode in switchdev mode for VF %d\n",
3059 		vf->vf_id);
3060 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3061 				     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3062 				     NULL, 0);
3063 }
3064 
3065 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
3066 	.get_ver_msg = ice_vc_get_ver_msg,
3067 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3068 	.reset_vf = ice_vc_reset_vf_msg,
3069 	.add_mac_addr_msg = ice_vc_repr_add_mac,
3070 	.del_mac_addr_msg = ice_vc_repr_del_mac,
3071 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3072 	.ena_qs_msg = ice_vc_ena_qs_msg,
3073 	.dis_qs_msg = ice_vc_dis_qs_msg,
3074 	.request_qs_msg = ice_vc_request_qs_msg,
3075 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3076 	.config_rss_key = ice_vc_config_rss_key,
3077 	.config_rss_lut = ice_vc_config_rss_lut,
3078 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
3079 	.get_stats_msg = ice_vc_get_stats_msg,
3080 	.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
3081 	.add_vlan_msg = ice_vc_add_vlan_msg,
3082 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3083 	.query_rxdid = ice_vc_query_rxdid,
3084 	.get_rss_hashcfg = ice_vc_get_rss_hashcfg,
3085 	.set_rss_hashcfg = ice_vc_set_rss_hashcfg,
3086 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3087 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3088 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3089 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3090 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3091 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3092 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3093 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3094 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3095 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3096 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3097 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3098 	.get_qos_caps = ice_vc_get_qos_caps,
3099 	.cfg_q_bw = ice_vc_cfg_q_bw,
3100 	.cfg_q_quanta = ice_vc_cfg_q_quanta,
3101 	.get_ptp_cap = ice_vc_get_ptp_cap,
3102 	.get_phc_time = ice_vc_get_phc_time,
3103 };
3104 
3105 /**
3106  * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
3107  * @vf: the VF to switch ops
3108  */
3109 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
3110 {
3111 	vf->virtchnl_ops = &ice_virtchnl_repr_ops;
3112 }
3113 
3114 /**
3115  * ice_is_malicious_vf - check if this vf might be overflowing mailbox
3116  * @vf: the VF to check
3117  * @mbxdata: data about the state of the mailbox
3118  *
3119  * Detect if a given VF might be malicious and attempting to overflow the PF
3120  * mailbox. If so, log a warning message and ignore this event.
3121  */
3122 static bool
3123 ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
3124 {
3125 	bool report_malvf = false;
3126 	struct device *dev;
3127 	struct ice_pf *pf;
3128 	int status;
3129 
3130 	pf = vf->pf;
3131 	dev = ice_pf_to_dev(pf);
3132 
3133 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
3134 		return vf->mbx_info.malicious;
3135 
3136 	/* check to see if we have a newly malicious VF */
3137 	status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info,
3138 					  &report_malvf);
3139 	if (status)
3140 		dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n",
3141 				     vf->vf_id, vf->dev_lan_addr, status);
3142 
3143 	if (report_malvf) {
3144 		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3145 		u8 zero_addr[ETH_ALEN] = {};
3146 
3147 		dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
3148 			 vf->dev_lan_addr,
3149 			 pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr);
3150 	}
3151 
3152 	return vf->mbx_info.malicious;
3153 }
3154 
3155 /**
3156  * ice_vc_process_vf_msg - Process request from VF
3157  * @pf: pointer to the PF structure
3158  * @event: pointer to the AQ event
3159  * @mbxdata: information used to detect VF attempting mailbox overflow
3160  *
3161  * Called from the common asq/arq handler to process request from VF. When this
3162  * flow is used for devices with hardware VF to PF message queue overflow
3163  * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
3164  * check is skipped.
3165  */
3166 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
3167 			   struct ice_mbx_data *mbxdata)
3168 {
3169 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3170 	s16 vf_id = le16_to_cpu(event->desc.retval);
3171 	const struct ice_virtchnl_ops *ops;
3172 	u16 msglen = event->msg_len;
3173 	u8 *msg = event->msg_buf;
3174 	struct ice_vf *vf = NULL;
3175 	struct device *dev;
3176 	int err = 0;
3177 
3178 	dev = ice_pf_to_dev(pf);
3179 
3180 	vf = ice_get_vf_by_id(pf, vf_id);
3181 	if (!vf) {
3182 		dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
3183 			vf_id, v_opcode, msglen);
3184 		return;
3185 	}
3186 
3187 	mutex_lock(&vf->cfg_lock);
3188 
3189 	/* Check if the VF is trying to overflow the mailbox */
3190 	if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
3191 		goto finish;
3192 
3193 	/* Check if VF is disabled. */
3194 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3195 		err = -EPERM;
3196 		goto error_handler;
3197 	}
3198 
3199 	ops = vf->virtchnl_ops;
3200 
3201 	/* Perform basic checks on the msg */
3202 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3203 	if (err) {
3204 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
3205 			err = -EPERM;
3206 		else
3207 			err = -EINVAL;
3208 	}
3209 
3210 error_handler:
3211 	if (err) {
3212 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3213 				      NULL, 0);
3214 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3215 			vf_id, v_opcode, msglen, err);
3216 		goto finish;
3217 	}
3218 
3219 	if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
3220 		ice_vc_send_msg_to_vf(vf, v_opcode,
3221 				      VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
3222 				      0);
3223 		goto finish;
3224 	}
3225 
3226 	switch (v_opcode) {
3227 	case VIRTCHNL_OP_VERSION:
3228 		err = ops->get_ver_msg(vf, msg);
3229 		break;
3230 	case VIRTCHNL_OP_GET_VF_RESOURCES:
3231 		err = ops->get_vf_res_msg(vf, msg);
3232 		if (ice_vf_init_vlan_stripping(vf))
3233 			dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
3234 				vf->vf_id);
3235 		ice_vc_notify_vf_link_state(vf);
3236 		break;
3237 	case VIRTCHNL_OP_RESET_VF:
3238 		ops->reset_vf(vf);
3239 		break;
3240 	case VIRTCHNL_OP_ADD_ETH_ADDR:
3241 		err = ops->add_mac_addr_msg(vf, msg);
3242 		break;
3243 	case VIRTCHNL_OP_DEL_ETH_ADDR:
3244 		err = ops->del_mac_addr_msg(vf, msg);
3245 		break;
3246 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3247 		err = ops->cfg_qs_msg(vf, msg);
3248 		break;
3249 	case VIRTCHNL_OP_ENABLE_QUEUES:
3250 		err = ops->ena_qs_msg(vf, msg);
3251 		ice_vc_notify_vf_link_state(vf);
3252 		break;
3253 	case VIRTCHNL_OP_DISABLE_QUEUES:
3254 		err = ops->dis_qs_msg(vf, msg);
3255 		break;
3256 	case VIRTCHNL_OP_REQUEST_QUEUES:
3257 		err = ops->request_qs_msg(vf, msg);
3258 		break;
3259 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3260 		err = ops->cfg_irq_map_msg(vf, msg);
3261 		break;
3262 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3263 		err = ops->config_rss_key(vf, msg);
3264 		break;
3265 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3266 		err = ops->config_rss_lut(vf, msg);
3267 		break;
3268 	case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
3269 		err = ops->config_rss_hfunc(vf, msg);
3270 		break;
3271 	case VIRTCHNL_OP_GET_STATS:
3272 		err = ops->get_stats_msg(vf, msg);
3273 		break;
3274 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3275 		err = ops->cfg_promiscuous_mode_msg(vf, msg);
3276 		break;
3277 	case VIRTCHNL_OP_ADD_VLAN:
3278 		err = ops->add_vlan_msg(vf, msg);
3279 		break;
3280 	case VIRTCHNL_OP_DEL_VLAN:
3281 		err = ops->remove_vlan_msg(vf, msg);
3282 		break;
3283 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
3284 		err = ops->query_rxdid(vf);
3285 		break;
3286 	case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
3287 		err = ops->get_rss_hashcfg(vf);
3288 		break;
3289 	case VIRTCHNL_OP_SET_RSS_HASHCFG:
3290 		err = ops->set_rss_hashcfg(vf, msg);
3291 		break;
3292 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3293 		err = ops->ena_vlan_stripping(vf);
3294 		break;
3295 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3296 		err = ops->dis_vlan_stripping(vf);
3297 		break;
3298 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
3299 		err = ops->add_fdir_fltr_msg(vf, msg);
3300 		break;
3301 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
3302 		err = ops->del_fdir_fltr_msg(vf, msg);
3303 		break;
3304 	case VIRTCHNL_OP_ADD_RSS_CFG:
3305 		err = ops->handle_rss_cfg_msg(vf, msg, true);
3306 		break;
3307 	case VIRTCHNL_OP_DEL_RSS_CFG:
3308 		err = ops->handle_rss_cfg_msg(vf, msg, false);
3309 		break;
3310 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
3311 		err = ops->get_offload_vlan_v2_caps(vf);
3312 		break;
3313 	case VIRTCHNL_OP_ADD_VLAN_V2:
3314 		err = ops->add_vlan_v2_msg(vf, msg);
3315 		break;
3316 	case VIRTCHNL_OP_DEL_VLAN_V2:
3317 		err = ops->remove_vlan_v2_msg(vf, msg);
3318 		break;
3319 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
3320 		err = ops->ena_vlan_stripping_v2_msg(vf, msg);
3321 		break;
3322 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
3323 		err = ops->dis_vlan_stripping_v2_msg(vf, msg);
3324 		break;
3325 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
3326 		err = ops->ena_vlan_insertion_v2_msg(vf, msg);
3327 		break;
3328 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
3329 		err = ops->dis_vlan_insertion_v2_msg(vf, msg);
3330 		break;
3331 	case VIRTCHNL_OP_GET_QOS_CAPS:
3332 		err = ops->get_qos_caps(vf);
3333 		break;
3334 	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
3335 		err = ops->cfg_q_bw(vf, msg);
3336 		break;
3337 	case VIRTCHNL_OP_CONFIG_QUANTA:
3338 		err = ops->cfg_q_quanta(vf, msg);
3339 		break;
3340 	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
3341 		err = ops->get_ptp_cap(vf, (const void *)msg);
3342 		break;
3343 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
3344 		err = ops->get_phc_time(vf);
3345 		break;
3346 	case VIRTCHNL_OP_UNKNOWN:
3347 	default:
3348 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3349 			vf_id);
3350 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
3351 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3352 					    NULL, 0);
3353 		break;
3354 	}
3355 	if (err) {
3356 		/* Helper function cares less about error return values here
3357 		 * as it is busy with pending work.
3358 		 */
3359 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3360 			 vf_id, v_opcode, err);
3361 	}
3362 
3363 finish:
3364 	mutex_unlock(&vf->cfg_lock);
3365 	ice_put_vf(vf);
3366 }
3367