xref: /linux/drivers/net/ethernet/intel/ice/virt/queues.c (revision ce5c0fd759c6b55fec0e266fa28375aff440f26a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "virtchnl.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice.h"
7 #include "ice_base.h"
8 #include "ice_lib.h"
9 
10 /**
11  * ice_vc_get_max_frame_size - get max frame size allowed for VF
12  * @vf: VF used to determine max frame size
13  *
14  * Max frame size is determined based on the current port's max frame size and
15  * whether a port VLAN is configured on this VF. The VF is not aware whether
16  * it's in a port VLAN so the PF needs to account for this in max frame size
17  * checks and sending the max frame size to the VF.
18  */
19 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
20 {
21 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
22 	u16 max_frame_size;
23 
24 	max_frame_size = pi->phy.link_info.max_frame_size;
25 
26 	if (ice_vf_is_port_vlan_ena(vf))
27 		max_frame_size -= VLAN_HLEN;
28 
29 	return max_frame_size;
30 }
31 
32 /**
33  * ice_vc_isvalid_q_id
34  * @vsi: VSI to check queue ID against
35  * @qid: VSI relative queue ID
36  *
37  * check for the valid queue ID
38  */
39 static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
40 {
41 	/* allocated Tx and Rx queues should be always equal for VF VSI */
42 	return qid < vsi->alloc_txq;
43 }
44 
45 /**
46  * ice_vc_isvalid_ring_len
47  * @ring_len: length of ring
48  *
49  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
50  * or zero
51  */
52 static bool ice_vc_isvalid_ring_len(u16 ring_len)
53 {
54 	return ring_len == 0 ||
55 	       (ring_len >= ICE_MIN_NUM_DESC &&
56 		ring_len <= ICE_MAX_NUM_DESC &&
57 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
58 }
59 
60 /**
61  * ice_vf_cfg_qs_bw - Configure per queue bandwidth
62  * @vf: pointer to the VF info
63  * @num_queues: number of queues to be configured
64  *
65  * Configure per queue bandwidth.
66  *
67  * Return: 0 on success or negative error value.
68  */
69 static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
70 {
71 	struct ice_hw *hw = &vf->pf->hw;
72 	struct ice_vsi *vsi;
73 	int ret;
74 	u16 i;
75 
76 	vsi = ice_get_vf_vsi(vf);
77 	if (!vsi)
78 		return -EINVAL;
79 
80 	for (i = 0; i < num_queues; i++) {
81 		u32 p_rate, min_rate;
82 		u8 tc;
83 
84 		p_rate = vf->qs_bw[i].peak;
85 		min_rate = vf->qs_bw[i].committed;
86 		tc = vf->qs_bw[i].tc;
87 		if (p_rate)
88 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
89 					       vf->qs_bw[i].queue_id,
90 					       ICE_MAX_BW, p_rate);
91 		else
92 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
93 						    vf->qs_bw[i].queue_id,
94 						    ICE_MAX_BW);
95 		if (ret)
96 			return ret;
97 
98 		if (min_rate)
99 			ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
100 					       vf->qs_bw[i].queue_id,
101 					       ICE_MIN_BW, min_rate);
102 		else
103 			ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
104 						    vf->qs_bw[i].queue_id,
105 						    ICE_MIN_BW);
106 
107 		if (ret)
108 			return ret;
109 	}
110 
111 	return 0;
112 }
113 
114 /**
115  * ice_vf_cfg_q_quanta_profile - Configure quanta profile
116  * @vf: pointer to the VF info
117  * @quanta_prof_idx: pointer to the quanta profile index
118  * @quanta_size: quanta size to be set
119  *
120  * This function chooses available quanta profile and configures the register.
121  * The quanta profile is evenly divided by the number of device ports, and then
122  * available to the specific PF and VFs. The first profile for each PF is a
123  * reserved default profile. Only quanta size of the rest unused profile can be
124  * modified.
125  *
126  * Return: 0 on success or negative error value.
127  */
128 static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
129 				       u16 *quanta_prof_idx)
130 {
131 	const u16 n_desc = calc_quanta_desc(quanta_size);
132 	struct ice_hw *hw = &vf->pf->hw;
133 	const u16 n_cmd = 2 * n_desc;
134 	struct ice_pf *pf = vf->pf;
135 	u16 per_pf, begin_id;
136 	u8 n_used;
137 	u32 reg;
138 
139 	begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
140 		   hw->logical_pf_id;
141 
142 	if (quanta_size == ICE_DFLT_QUANTA) {
143 		*quanta_prof_idx = begin_id;
144 	} else {
145 		per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
146 			 hw->dev_caps.num_funcs;
147 		n_used = pf->num_quanta_prof_used;
148 		if (n_used < per_pf) {
149 			*quanta_prof_idx = begin_id + 1 + n_used;
150 			pf->num_quanta_prof_used++;
151 		} else {
152 			return -EINVAL;
153 		}
154 	}
155 
156 	reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
157 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
158 	      FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
159 	wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
160 
161 	return 0;
162 }
163 
164 /**
165  * ice_vc_cfg_promiscuous_mode_msg
166  * @vf: pointer to the VF info
167  * @msg: pointer to the msg buffer
168  *
169  * called from the VF to configure VF VSIs promiscuous mode
170  */
171 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
172 {
173 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
174 	bool rm_promisc, alluni = false, allmulti = false;
175 	struct virtchnl_promisc_info *info =
176 	    (struct virtchnl_promisc_info *)msg;
177 	struct ice_vsi_vlan_ops *vlan_ops;
178 	int mcast_err = 0, ucast_err = 0;
179 	struct ice_pf *pf = vf->pf;
180 	struct ice_vsi *vsi;
181 	u8 mcast_m, ucast_m;
182 	struct device *dev;
183 	int ret = 0;
184 
185 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
186 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
187 		goto error_param;
188 	}
189 
190 	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
191 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
192 		goto error_param;
193 	}
194 
195 	vsi = ice_get_vf_vsi(vf);
196 	if (!vsi) {
197 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
198 		goto error_param;
199 	}
200 
201 	dev = ice_pf_to_dev(pf);
202 	if (!ice_is_vf_trusted(vf)) {
203 		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
204 			vf->vf_id);
205 		/* Leave v_ret alone, lie to the VF on purpose. */
206 		goto error_param;
207 	}
208 
209 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
210 		alluni = true;
211 
212 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
213 		allmulti = true;
214 
215 	rm_promisc = !allmulti && !alluni;
216 
217 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
218 	if (rm_promisc)
219 		ret = vlan_ops->ena_rx_filtering(vsi);
220 	else
221 		ret = vlan_ops->dis_rx_filtering(vsi);
222 	if (ret) {
223 		dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
224 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
225 		goto error_param;
226 	}
227 
228 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
229 
230 	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
231 		if (alluni) {
232 			/* in this case we're turning on promiscuous mode */
233 			ret = ice_set_dflt_vsi(vsi);
234 		} else {
235 			/* in this case we're turning off promiscuous mode */
236 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
237 				ret = ice_clear_dflt_vsi(vsi);
238 		}
239 
240 		/* in this case we're turning on/off only
241 		 * allmulticast
242 		 */
243 		if (allmulti)
244 			mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
245 		else
246 			mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
247 
248 		if (ret) {
249 			dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
250 				vf->vf_id, ret);
251 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
252 			goto error_param;
253 		}
254 	} else {
255 		if (alluni)
256 			ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
257 		else
258 			ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
259 
260 		if (allmulti)
261 			mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
262 		else
263 			mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
264 
265 		if (ucast_err || mcast_err)
266 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
267 	}
268 
269 	if (!mcast_err) {
270 		if (allmulti &&
271 		    !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
272 			dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
273 				 vf->vf_id);
274 		else if (!allmulti &&
275 			 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
276 					    vf->vf_states))
277 			dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
278 				 vf->vf_id);
279 	} else {
280 		dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
281 			vf->vf_id, mcast_err);
282 	}
283 
284 	if (!ucast_err) {
285 		if (alluni &&
286 		    !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
287 			dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
288 				 vf->vf_id);
289 		else if (!alluni &&
290 			 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
291 					    vf->vf_states))
292 			dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
293 				 vf->vf_id);
294 	} else {
295 		dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
296 			vf->vf_id, ucast_err);
297 	}
298 
299 error_param:
300 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
301 				     v_ret, NULL, 0);
302 }
303 
304 /**
305  * ice_vc_get_stats_msg
306  * @vf: pointer to the VF info
307  * @msg: pointer to the msg buffer
308  *
309  * called from the VF to get VSI stats
310  */
311 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
312 {
313 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
314 	struct virtchnl_queue_select *vqs =
315 		(struct virtchnl_queue_select *)msg;
316 	struct ice_eth_stats stats = { 0 };
317 	struct ice_vsi *vsi;
318 
319 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
320 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
321 		goto error_param;
322 	}
323 
324 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
325 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
326 		goto error_param;
327 	}
328 
329 	vsi = ice_get_vf_vsi(vf);
330 	if (!vsi) {
331 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
332 		goto error_param;
333 	}
334 
335 	ice_update_eth_stats(vsi);
336 
337 	stats = vsi->eth_stats;
338 
339 error_param:
340 	/* send the response to the VF */
341 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
342 				     (u8 *)&stats, sizeof(stats));
343 }
344 
345 /**
346  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
347  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
348  *
349  * Return true on successful validation, else false
350  */
351 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
352 {
353 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
354 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
355 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
356 		return false;
357 
358 	return true;
359 }
360 
361 /**
362  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
363  * @vsi: VSI of the VF to configure
364  * @q_idx: VF queue index used to determine the queue in the PF's space
365  */
366 void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
367 {
368 	struct ice_hw *hw = &vsi->back->hw;
369 	u32 pfq = vsi->txq_map[q_idx];
370 	u32 reg;
371 
372 	reg = rd32(hw, QINT_TQCTL(pfq));
373 
374 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
375 	 * this is most likely a poll mode VF driver, so don't enable an
376 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
377 	 */
378 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
379 		return;
380 
381 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
382 }
383 
384 /**
385  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
386  * @vsi: VSI of the VF to configure
387  * @q_idx: VF queue index used to determine the queue in the PF's space
388  */
389 void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
390 {
391 	struct ice_hw *hw = &vsi->back->hw;
392 	u32 pfq = vsi->rxq_map[q_idx];
393 	u32 reg;
394 
395 	reg = rd32(hw, QINT_RQCTL(pfq));
396 
397 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
398 	 * this is most likely a poll mode VF driver, so don't enable an
399 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
400 	 */
401 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
402 		return;
403 
404 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
405 }
406 
407 /**
408  * ice_vc_ena_qs_msg
409  * @vf: pointer to the VF info
410  * @msg: pointer to the msg buffer
411  *
412  * called from the VF to enable all or specific queue(s)
413  */
414 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
415 {
416 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
417 	struct virtchnl_queue_select *vqs =
418 	    (struct virtchnl_queue_select *)msg;
419 	struct ice_vsi *vsi;
420 	unsigned long q_map;
421 	u16 vf_q_id;
422 
423 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
424 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
425 		goto error_param;
426 	}
427 
428 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
429 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
430 		goto error_param;
431 	}
432 
433 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
434 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
435 		goto error_param;
436 	}
437 
438 	vsi = ice_get_vf_vsi(vf);
439 	if (!vsi) {
440 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
441 		goto error_param;
442 	}
443 
444 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
445 	 * Tx queue group list was configured and the context bits were
446 	 * programmed using ice_vsi_cfg_txqs
447 	 */
448 	q_map = vqs->rx_queues;
449 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
450 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
451 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
452 			goto error_param;
453 		}
454 
455 		/* Skip queue if enabled */
456 		if (test_bit(vf_q_id, vf->rxq_ena))
457 			continue;
458 
459 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
460 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
461 				vf_q_id, vsi->vsi_num);
462 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
463 			goto error_param;
464 		}
465 
466 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
467 		set_bit(vf_q_id, vf->rxq_ena);
468 	}
469 
470 	q_map = vqs->tx_queues;
471 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
472 		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
473 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
474 			goto error_param;
475 		}
476 
477 		/* Skip queue if enabled */
478 		if (test_bit(vf_q_id, vf->txq_ena))
479 			continue;
480 
481 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
482 		set_bit(vf_q_id, vf->txq_ena);
483 	}
484 
485 	/* Set flag to indicate that queues are enabled */
486 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
487 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
488 
489 error_param:
490 	/* send the response to the VF */
491 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
492 				     NULL, 0);
493 }
494 
495 /**
496  * ice_vf_vsi_dis_single_txq - disable a single Tx queue
497  * @vf: VF to disable queue for
498  * @vsi: VSI for the VF
499  * @q_id: VF relative (0-based) queue ID
500  *
501  * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
502  * disabled then clear q_id bit in the enabled queues bitmap and return
503  * success. Otherwise return error.
504  */
505 int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
506 {
507 	struct ice_txq_meta txq_meta = { 0 };
508 	struct ice_tx_ring *ring;
509 	int err;
510 
511 	if (!test_bit(q_id, vf->txq_ena))
512 		dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
513 			q_id, vsi->vsi_num);
514 
515 	ring = vsi->tx_rings[q_id];
516 	if (!ring)
517 		return -EINVAL;
518 
519 	ice_fill_txq_meta(vsi, ring, &txq_meta);
520 
521 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
522 	if (err) {
523 		dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
524 			q_id, vsi->vsi_num);
525 		return err;
526 	}
527 
528 	/* Clear enabled queues flag */
529 	clear_bit(q_id, vf->txq_ena);
530 
531 	return 0;
532 }
533 
534 /**
535  * ice_vc_dis_qs_msg
536  * @vf: pointer to the VF info
537  * @msg: pointer to the msg buffer
538  *
539  * called from the VF to disable all or specific queue(s)
540  */
541 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
542 {
543 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
544 	struct virtchnl_queue_select *vqs =
545 	    (struct virtchnl_queue_select *)msg;
546 	struct ice_vsi *vsi;
547 	unsigned long q_map;
548 	u16 vf_q_id;
549 
550 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
551 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
552 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
553 		goto error_param;
554 	}
555 
556 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
557 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
558 		goto error_param;
559 	}
560 
561 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
562 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
563 		goto error_param;
564 	}
565 
566 	vsi = ice_get_vf_vsi(vf);
567 	if (!vsi) {
568 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
569 		goto error_param;
570 	}
571 
572 	if (vqs->tx_queues) {
573 		q_map = vqs->tx_queues;
574 
575 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
576 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
577 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
578 				goto error_param;
579 			}
580 
581 			if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
582 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
583 				goto error_param;
584 			}
585 		}
586 	}
587 
588 	q_map = vqs->rx_queues;
589 	/* speed up Rx queue disable by batching them if possible */
590 	if (q_map &&
591 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
592 		if (ice_vsi_stop_all_rx_rings(vsi)) {
593 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
594 				vsi->vsi_num);
595 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
596 			goto error_param;
597 		}
598 
599 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
600 	} else if (q_map) {
601 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
602 			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
603 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
604 				goto error_param;
605 			}
606 
607 			/* Skip queue if not enabled */
608 			if (!test_bit(vf_q_id, vf->rxq_ena))
609 				continue;
610 
611 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
612 						     true)) {
613 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
614 					vf_q_id, vsi->vsi_num);
615 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
616 				goto error_param;
617 			}
618 
619 			/* Clear enabled queues flag */
620 			clear_bit(vf_q_id, vf->rxq_ena);
621 		}
622 	}
623 
624 	/* Clear enabled queues flag */
625 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
626 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
627 
628 error_param:
629 	/* send the response to the VF */
630 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
631 				     NULL, 0);
632 }
633 
634 /**
635  * ice_cfg_interrupt
636  * @vf: pointer to the VF info
637  * @vsi: the VSI being configured
638  * @map: vector map for mapping vectors to queues
639  * @q_vector: structure for interrupt vector
640  * configure the IRQ to queue map
641  */
642 static enum virtchnl_status_code
643 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
644 		  struct virtchnl_vector_map *map,
645 		  struct ice_q_vector *q_vector)
646 {
647 	u16 vsi_q_id, vsi_q_id_idx;
648 	unsigned long qmap;
649 
650 	q_vector->num_ring_rx = 0;
651 	q_vector->num_ring_tx = 0;
652 
653 	qmap = map->rxq_map;
654 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
655 		vsi_q_id = vsi_q_id_idx;
656 
657 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
658 			return VIRTCHNL_STATUS_ERR_PARAM;
659 
660 		q_vector->num_ring_rx++;
661 		q_vector->rx.itr_idx = map->rxitr_idx;
662 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
663 		ice_cfg_rxq_interrupt(vsi, vsi_q_id,
664 				      q_vector->vf_reg_idx,
665 				      q_vector->rx.itr_idx);
666 	}
667 
668 	qmap = map->txq_map;
669 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
670 		vsi_q_id = vsi_q_id_idx;
671 
672 		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
673 			return VIRTCHNL_STATUS_ERR_PARAM;
674 
675 		q_vector->num_ring_tx++;
676 		q_vector->tx.itr_idx = map->txitr_idx;
677 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
678 		ice_cfg_txq_interrupt(vsi, vsi_q_id,
679 				      q_vector->vf_reg_idx,
680 				      q_vector->tx.itr_idx);
681 	}
682 
683 	return VIRTCHNL_STATUS_SUCCESS;
684 }
685 
686 /**
687  * ice_vc_cfg_irq_map_msg
688  * @vf: pointer to the VF info
689  * @msg: pointer to the msg buffer
690  *
691  * called from the VF to configure the IRQ to queue map
692  */
693 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
694 {
695 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
696 	u16 num_q_vectors_mapped, vsi_id, vector_id;
697 	struct virtchnl_irq_map_info *irqmap_info;
698 	struct virtchnl_vector_map *map;
699 	struct ice_vsi *vsi;
700 	int i;
701 
702 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
703 	num_q_vectors_mapped = irqmap_info->num_vectors;
704 
705 	/* Check to make sure number of VF vectors mapped is not greater than
706 	 * number of VF vectors originally allocated, and check that
707 	 * there is actually at least a single VF queue vector mapped
708 	 */
709 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
710 	    vf->num_msix < num_q_vectors_mapped ||
711 	    !num_q_vectors_mapped) {
712 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
713 		goto error_param;
714 	}
715 
716 	vsi = ice_get_vf_vsi(vf);
717 	if (!vsi) {
718 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
719 		goto error_param;
720 	}
721 
722 	for (i = 0; i < num_q_vectors_mapped; i++) {
723 		struct ice_q_vector *q_vector;
724 
725 		map = &irqmap_info->vecmap[i];
726 
727 		vector_id = map->vector_id;
728 		vsi_id = map->vsi_id;
729 		/* vector_id is always 0-based for each VF, and can never be
730 		 * larger than or equal to the max allowed interrupts per VF
731 		 */
732 		if (!(vector_id < vf->num_msix) ||
733 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
734 		    (!vector_id && (map->rxq_map || map->txq_map))) {
735 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
736 			goto error_param;
737 		}
738 
739 		/* No need to map VF miscellaneous or rogue vector */
740 		if (!vector_id)
741 			continue;
742 
743 		/* Subtract non queue vector from vector_id passed by VF
744 		 * to get actual number of VSI queue vector array index
745 		 */
746 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
747 		if (!q_vector) {
748 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
749 			goto error_param;
750 		}
751 
752 		/* lookout for the invalid queue index */
753 		v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
754 		if (v_ret)
755 			goto error_param;
756 	}
757 
758 error_param:
759 	/* send the response to the VF */
760 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
761 				     NULL, 0);
762 }
763 
764 /**
765  * ice_vc_cfg_q_bw - Configure per queue bandwidth
766  * @vf: pointer to the VF info
767  * @msg: pointer to the msg buffer which holds the command descriptor
768  *
769  * Configure VF queues bandwidth.
770  *
771  * Return: 0 on success or negative error value.
772  */
773 static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
774 {
775 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
776 	struct virtchnl_queues_bw_cfg *qbw =
777 		(struct virtchnl_queues_bw_cfg *)msg;
778 	struct ice_vsi *vsi;
779 	u16 i;
780 
781 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
782 	    !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
783 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
784 		goto err;
785 	}
786 
787 	vsi = ice_get_vf_vsi(vf);
788 	if (!vsi) {
789 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
790 		goto err;
791 	}
792 
793 	if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
794 	    qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
795 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
796 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
797 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
798 		goto err;
799 	}
800 
801 	for (i = 0; i < qbw->num_queues; i++) {
802 		if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
803 		    qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
804 			dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
805 				 qbw->cfg[i].queue_id, vf->vf_id,
806 				 vf->max_tx_rate);
807 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
808 			goto err;
809 		}
810 		if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
811 		    qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
812 			dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
813 				 qbw->cfg[i].queue_id, vf->vf_id,
814 				 vf->min_tx_rate);
815 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
816 			goto err;
817 		}
818 		if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
819 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
820 				 vf->vf_id);
821 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
822 			goto err;
823 		}
824 		if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
825 			dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
826 				 vf->vf_id);
827 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
828 			goto err;
829 		}
830 	}
831 
832 	for (i = 0; i < qbw->num_queues; i++) {
833 		vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
834 		vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
835 		vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
836 		vf->qs_bw[i].tc = qbw->cfg[i].tc;
837 	}
838 
839 	if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
840 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
841 
842 err:
843 	/* send the response to the VF */
844 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
845 				    v_ret, NULL, 0);
846 }
847 
848 /**
849  * ice_vc_cfg_q_quanta - Configure per queue quanta
850  * @vf: pointer to the VF info
851  * @msg: pointer to the msg buffer which holds the command descriptor
852  *
853  * Configure VF queues quanta.
854  *
855  * Return: 0 on success or negative error value.
856  */
857 static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
858 {
859 	u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
860 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
861 	struct virtchnl_quanta_cfg *qquanta =
862 		(struct virtchnl_quanta_cfg *)msg;
863 	struct ice_vsi *vsi;
864 	int ret;
865 
866 	start_qid = qquanta->queue_select.start_queue_id;
867 	num_queues = qquanta->queue_select.num_queues;
868 
869 	if (check_add_overflow(start_qid, num_queues, &end_qid)) {
870 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
871 		goto err;
872 	}
873 
874 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
875 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
876 		goto err;
877 	}
878 
879 	vsi = ice_get_vf_vsi(vf);
880 	if (!vsi) {
881 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
882 		goto err;
883 	}
884 
885 	if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
886 	    end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
887 		dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
888 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
889 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
890 		goto err;
891 	}
892 
893 	quanta_size = qquanta->quanta_size;
894 	if (quanta_size > ICE_MAX_QUANTA_SIZE ||
895 	    quanta_size < ICE_MIN_QUANTA_SIZE) {
896 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
897 		goto err;
898 	}
899 
900 	if (quanta_size % 64) {
901 		dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
902 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
903 		goto err;
904 	}
905 
906 	ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
907 					  &quanta_prof_id);
908 	if (ret) {
909 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
910 		goto err;
911 	}
912 
913 	for (i = start_qid; i < end_qid; i++)
914 		vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
915 
916 err:
917 	/* send the response to the VF */
918 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
919 				     v_ret, NULL, 0);
920 }
921 
922 /**
923  * ice_vc_cfg_qs_msg
924  * @vf: pointer to the VF info
925  * @msg: pointer to the msg buffer
926  *
927  * called from the VF to configure the Rx/Tx queues
928  */
929 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
930 {
931 	struct virtchnl_vsi_queue_config_info *qci =
932 	    (struct virtchnl_vsi_queue_config_info *)msg;
933 	struct virtchnl_queue_pair_info *qpi;
934 	struct ice_pf *pf = vf->pf;
935 	struct ice_vsi *vsi;
936 	int i = -1, q_idx;
937 	bool ena_ts;
938 	u8 act_prt;
939 
940 	mutex_lock(&pf->lag_mutex);
941 	act_prt = ice_lag_prepare_vf_reset(pf->lag);
942 
943 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
944 		goto error_param;
945 
946 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
947 		goto error_param;
948 
949 	vsi = ice_get_vf_vsi(vf);
950 	if (!vsi)
951 		goto error_param;
952 
953 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
954 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
955 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
956 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
957 		goto error_param;
958 	}
959 
960 	for (i = 0; i < qci->num_queue_pairs; i++) {
961 		if (!qci->qpair[i].rxq.crc_disable)
962 			continue;
963 
964 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
965 		    vf->vlan_strip_ena)
966 			goto error_param;
967 	}
968 
969 	for (i = 0; i < qci->num_queue_pairs; i++) {
970 		qpi = &qci->qpair[i];
971 		if (qpi->txq.vsi_id != qci->vsi_id ||
972 		    qpi->rxq.vsi_id != qci->vsi_id ||
973 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
974 		    qpi->txq.headwb_enabled ||
975 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
976 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
977 		    !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
978 			goto error_param;
979 		}
980 
981 		q_idx = qpi->rxq.queue_id;
982 
983 		/* make sure selected "q_idx" is in valid range of queues
984 		 * for selected "vsi"
985 		 */
986 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
987 			goto error_param;
988 		}
989 
990 		/* copy Tx queue info from VF into VSI */
991 		if (qpi->txq.ring_len > 0) {
992 			vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
993 			vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
994 
995 			/* Disable any existing queue first */
996 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
997 				goto error_param;
998 
999 			/* Configure a queue with the requested settings */
1000 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
1001 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
1002 					 vf->vf_id, q_idx);
1003 				goto error_param;
1004 			}
1005 		}
1006 
1007 		/* copy Rx queue info from VF into VSI */
1008 		if (qpi->rxq.ring_len > 0) {
1009 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
1010 			struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
1011 			u32 rxdid;
1012 
1013 			ring->dma = qpi->rxq.dma_ring_addr;
1014 			ring->count = qpi->rxq.ring_len;
1015 
1016 			if (qpi->rxq.crc_disable)
1017 				ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1018 			else
1019 				ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1020 
1021 			if (qpi->rxq.databuffer_size != 0 &&
1022 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
1023 			     qpi->rxq.databuffer_size < 1024))
1024 				goto error_param;
1025 			ring->rx_buf_len = qpi->rxq.databuffer_size;
1026 			if (qpi->rxq.max_pkt_size > max_frame_size ||
1027 			    qpi->rxq.max_pkt_size < 64)
1028 				goto error_param;
1029 
1030 			ring->max_frame = qpi->rxq.max_pkt_size;
1031 			/* add space for the port VLAN since the VF driver is
1032 			 * not expected to account for it in the MTU
1033 			 * calculation
1034 			 */
1035 			if (ice_vf_is_port_vlan_ena(vf))
1036 				ring->max_frame += VLAN_HLEN;
1037 
1038 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
1039 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
1040 					 vf->vf_id, q_idx);
1041 				goto error_param;
1042 			}
1043 
1044 			/* If Rx flex desc is supported, select RXDID for Rx
1045 			 * queues. Otherwise, use legacy 32byte descriptor
1046 			 * format. Legacy 16byte descriptor is not supported.
1047 			 * If this RXDID is selected, return error.
1048 			 */
1049 			if (vf->driver_caps &
1050 			    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
1051 				rxdid = qpi->rxq.rxdid;
1052 				if (!(BIT(rxdid) & pf->supported_rxdids))
1053 					goto error_param;
1054 			} else {
1055 				rxdid = ICE_RXDID_LEGACY_1;
1056 			}
1057 
1058 			ena_ts = ((vf->driver_caps &
1059 				  VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
1060 				  (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
1061 				  (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
1062 
1063 			ice_write_qrxflxp_cntxt(&vsi->back->hw,
1064 						vsi->rxq_map[q_idx], rxdid,
1065 						ICE_RXDID_PRIO, ena_ts);
1066 		}
1067 	}
1068 
1069 	ice_lag_complete_vf_reset(pf->lag, act_prt);
1070 	mutex_unlock(&pf->lag_mutex);
1071 
1072 	/* send the response to the VF */
1073 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1074 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
1075 error_param:
1076 	/* disable whatever we can */
1077 	for (; i >= 0; i--) {
1078 		if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
1079 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
1080 				vf->vf_id, i);
1081 		if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
1082 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
1083 				vf->vf_id, i);
1084 	}
1085 
1086 	ice_lag_complete_vf_reset(pf->lag, act_prt);
1087 	mutex_unlock(&pf->lag_mutex);
1088 
1089 	ice_lag_move_new_vf_nodes(vf);
1090 
1091 	/* send the response to the VF */
1092 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1093 				     VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
1094 }
1095 
1096 /**
1097  * ice_can_vf_change_mac
1098  * @vf: pointer to the VF info
1099  *
1100  * Return true if the VF is allowed to change its MAC filters, false otherwise
1101  */
1102 static bool ice_can_vf_change_mac(struct ice_vf *vf)
1103 {
1104 	/* If the VF MAC address has been set administratively (via the
1105 	 * ndo_set_vf_mac command), then deny permission to the VF to
1106 	 * add/delete unicast MAC addresses, unless the VF is trusted
1107 	 */
1108 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
1109 		return false;
1110 
1111 	return true;
1112 }
1113 
1114 /**
1115  * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
1116  * @vc_ether_addr: used to extract the type
1117  */
1118 static u8
1119 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
1120 {
1121 	return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
1122 }
1123 
1124 /**
1125  * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
1126  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1127  */
1128 static bool
1129 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
1130 {
1131 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1132 
1133 	return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
1134 }
1135 
1136 /**
1137  * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
1138  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1139  *
1140  * This function should only be called when the MAC address in
1141  * virtchnl_ether_addr is a valid unicast MAC
1142  */
1143 static bool
1144 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
1145 {
1146 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1147 
1148 	return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
1149 }
1150 
1151 /**
1152  * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
1153  * @vf: VF to update
1154  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
1155  */
1156 static void
1157 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1158 {
1159 	u8 *mac_addr = vc_ether_addr->addr;
1160 
1161 	if (!is_valid_ether_addr(mac_addr))
1162 		return;
1163 
1164 	/* only allow legacy VF drivers to set the device and hardware MAC if it
1165 	 * is zero and allow new VF drivers to set the hardware MAC if the type
1166 	 * was correctly specified over VIRTCHNL
1167 	 */
1168 	if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
1169 	     is_zero_ether_addr(vf->hw_lan_addr)) ||
1170 	    ice_is_vc_addr_primary(vc_ether_addr)) {
1171 		ether_addr_copy(vf->dev_lan_addr, mac_addr);
1172 		ether_addr_copy(vf->hw_lan_addr, mac_addr);
1173 	}
1174 
1175 	/* hardware and device MACs are already set, but its possible that the
1176 	 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
1177 	 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
1178 	 * away for the legacy VF driver case as it will be updated in the
1179 	 * delete flow for this case
1180 	 */
1181 	if (ice_is_vc_addr_legacy(vc_ether_addr)) {
1182 		ether_addr_copy(vf->legacy_last_added_umac.addr,
1183 				mac_addr);
1184 		vf->legacy_last_added_umac.time_modified = jiffies;
1185 	}
1186 }
1187 
1188 /**
1189  * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
1190  * @mac: address to check
1191  *
1192  * Return: true if the address is one of the three possible LLDP multicast
1193  *	   addresses, false otherwise.
1194  */
1195 static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
1196 {
1197 	const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
1198 
1199 	if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
1200 		return false;
1201 
1202 	return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
1203 }
1204 
1205 /**
1206  * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
1207  * @vf: a VF to add the address to
1208  * @mac: address to check
1209  *
1210  * Return: true if the VF is allowed to add such MAC address, false otherwise.
1211  */
1212 static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
1213 {
1214 	struct device *dev = ice_pf_to_dev(vf->pf);
1215 
1216 	if (is_unicast_ether_addr(mac) &&
1217 	    !ice_can_vf_change_mac((struct ice_vf *)vf)) {
1218 		dev_err(dev,
1219 			"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1220 		return false;
1221 	}
1222 
1223 	if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
1224 		dev_warn(dev,
1225 			 "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
1226 			 vf->vf_id);
1227 		return false;
1228 	}
1229 
1230 	return true;
1231 }
1232 
1233 /**
1234  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
1235  * @vf: pointer to the VF info
1236  * @vsi: pointer to the VF's VSI
1237  * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
1238  */
1239 static int
1240 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1241 		    struct virtchnl_ether_addr *vc_ether_addr)
1242 {
1243 	struct device *dev = ice_pf_to_dev(vf->pf);
1244 	u8 *mac_addr = vc_ether_addr->addr;
1245 	int ret;
1246 
1247 	/* device MAC already added */
1248 	if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
1249 		return 0;
1250 
1251 	if (!ice_vc_can_add_mac(vf, mac_addr))
1252 		return -EPERM;
1253 
1254 	ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1255 	if (ret == -EEXIST) {
1256 		dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
1257 			vf->vf_id);
1258 		/* don't return since we might need to update
1259 		 * the primary MAC in ice_vfhw_mac_add() below
1260 		 */
1261 	} else if (ret) {
1262 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
1263 			mac_addr, vf->vf_id, ret);
1264 		return ret;
1265 	} else {
1266 		vf->num_mac++;
1267 		if (ice_is_mc_lldp_eth_addr(mac_addr))
1268 			ice_vf_update_mac_lldp_num(vf, vsi, true);
1269 	}
1270 
1271 	ice_vfhw_mac_add(vf, vc_ether_addr);
1272 
1273 	return ret;
1274 }
1275 
1276 /**
1277  * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
1278  * @last_added_umac: structure used to check expiration
1279  */
1280 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
1281 {
1282 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME	msecs_to_jiffies(3000)
1283 	return time_is_before_jiffies(last_added_umac->time_modified +
1284 				      ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
1285 }
1286 
1287 /**
1288  * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
1289  * @vf: VF to update
1290  * @vc_ether_addr: structure from VIRTCHNL with MAC to check
1291  *
1292  * only update cached hardware MAC for legacy VF drivers on delete
1293  * because we cannot guarantee order/type of MAC from the VF driver
1294  */
1295 static void
1296 ice_update_legacy_cached_mac(struct ice_vf *vf,
1297 			     struct virtchnl_ether_addr *vc_ether_addr)
1298 {
1299 	if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
1300 	    ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
1301 		return;
1302 
1303 	ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
1304 	ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
1305 }
1306 
1307 /**
1308  * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
1309  * @vf: VF to update
1310  * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
1311  */
1312 static void
1313 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1314 {
1315 	u8 *mac_addr = vc_ether_addr->addr;
1316 
1317 	if (!is_valid_ether_addr(mac_addr) ||
1318 	    !ether_addr_equal(vf->dev_lan_addr, mac_addr))
1319 		return;
1320 
1321 	/* allow the device MAC to be repopulated in the add flow and don't
1322 	 * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
1323 	 * to be persistent on VM reboot and across driver unload/load, which
1324 	 * won't work if we clear the hardware MAC here
1325 	 */
1326 	eth_zero_addr(vf->dev_lan_addr);
1327 
1328 	ice_update_legacy_cached_mac(vf, vc_ether_addr);
1329 }
1330 
1331 /**
1332  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
1333  * @vf: pointer to the VF info
1334  * @vsi: pointer to the VF's VSI
1335  * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
1336  */
1337 static int
1338 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1339 		    struct virtchnl_ether_addr *vc_ether_addr)
1340 {
1341 	struct device *dev = ice_pf_to_dev(vf->pf);
1342 	u8 *mac_addr = vc_ether_addr->addr;
1343 	int status;
1344 
1345 	if (!ice_can_vf_change_mac(vf) &&
1346 	    ether_addr_equal(vf->dev_lan_addr, mac_addr))
1347 		return 0;
1348 
1349 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1350 	if (status == -ENOENT) {
1351 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
1352 			vf->vf_id);
1353 		return -ENOENT;
1354 	} else if (status) {
1355 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
1356 			mac_addr, vf->vf_id, status);
1357 		return -EIO;
1358 	}
1359 
1360 	ice_vfhw_mac_del(vf, vc_ether_addr);
1361 
1362 	vf->num_mac--;
1363 	if (ice_is_mc_lldp_eth_addr(mac_addr))
1364 		ice_vf_update_mac_lldp_num(vf, vsi, false);
1365 
1366 	return 0;
1367 }
1368 
1369 /**
1370  * ice_vc_handle_mac_addr_msg
1371  * @vf: pointer to the VF info
1372  * @msg: pointer to the msg buffer
1373  * @set: true if MAC filters are being set, false otherwise
1374  *
1375  * add guest MAC address filter
1376  */
1377 static int
1378 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
1379 {
1380 	int (*ice_vc_cfg_mac)
1381 		(struct ice_vf *vf, struct ice_vsi *vsi,
1382 		 struct virtchnl_ether_addr *virtchnl_ether_addr);
1383 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1384 	struct virtchnl_ether_addr_list *al =
1385 	    (struct virtchnl_ether_addr_list *)msg;
1386 	struct ice_pf *pf = vf->pf;
1387 	enum virtchnl_ops vc_op;
1388 	struct ice_vsi *vsi;
1389 	int i;
1390 
1391 	if (set) {
1392 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
1393 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
1394 	} else {
1395 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
1396 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
1397 	}
1398 
1399 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1400 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1401 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1402 		goto handle_mac_exit;
1403 	}
1404 
1405 	/* If this VF is not privileged, then we can't add more than a
1406 	 * limited number of addresses. Check to make sure that the
1407 	 * additions do not push us over the limit.
1408 	 */
1409 	if (set && !ice_is_vf_trusted(vf) &&
1410 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
1411 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
1412 			vf->vf_id);
1413 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1414 		goto handle_mac_exit;
1415 	}
1416 
1417 	vsi = ice_get_vf_vsi(vf);
1418 	if (!vsi) {
1419 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1420 		goto handle_mac_exit;
1421 	}
1422 
1423 	for (i = 0; i < al->num_elements; i++) {
1424 		u8 *mac_addr = al->list[i].addr;
1425 		int result;
1426 
1427 		if (is_broadcast_ether_addr(mac_addr) ||
1428 		    is_zero_ether_addr(mac_addr))
1429 			continue;
1430 
1431 		result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
1432 		if (result == -EEXIST || result == -ENOENT) {
1433 			continue;
1434 		} else if (result) {
1435 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1436 			goto handle_mac_exit;
1437 		}
1438 	}
1439 
1440 handle_mac_exit:
1441 	/* send the response to the VF */
1442 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1443 }
1444 
1445 /**
1446  * ice_vc_add_mac_addr_msg
1447  * @vf: pointer to the VF info
1448  * @msg: pointer to the msg buffer
1449  *
1450  * add guest MAC address filter
1451  */
1452 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1453 {
1454 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
1455 }
1456 
1457 /**
1458  * ice_vc_del_mac_addr_msg
1459  * @vf: pointer to the VF info
1460  * @msg: pointer to the msg buffer
1461  *
1462  * remove guest MAC address filter
1463  */
1464 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1465 {
1466 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
1467 }
1468 
1469 /**
1470  * ice_vc_request_qs_msg
1471  * @vf: pointer to the VF info
1472  * @msg: pointer to the msg buffer
1473  *
1474  * VFs get a default number of queues but can use this message to request a
1475  * different number. If the request is successful, PF will reset the VF and
1476  * return 0. If unsuccessful, PF will send message informing VF of number of
1477  * available queue pairs via virtchnl message response to VF.
1478  */
1479 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
1480 {
1481 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1482 	struct virtchnl_vf_res_request *vfres =
1483 		(struct virtchnl_vf_res_request *)msg;
1484 	u16 req_queues = vfres->num_queue_pairs;
1485 	struct ice_pf *pf = vf->pf;
1486 	u16 max_allowed_vf_queues;
1487 	u16 tx_rx_queue_left;
1488 	struct device *dev;
1489 	u16 cur_queues;
1490 
1491 	dev = ice_pf_to_dev(pf);
1492 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1493 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1494 		goto error_param;
1495 	}
1496 
1497 	cur_queues = vf->num_vf_qs;
1498 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
1499 				 ice_get_avail_rxq_count(pf));
1500 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
1501 	if (!req_queues) {
1502 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
1503 			vf->vf_id);
1504 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
1505 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
1506 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
1507 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
1508 	} else if (req_queues > cur_queues &&
1509 		   req_queues - cur_queues > tx_rx_queue_left) {
1510 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1511 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
1512 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
1513 					       ICE_MAX_RSS_QS_PER_VF);
1514 	} else {
1515 		/* request is successful, then reset VF */
1516 		vf->num_req_qs = req_queues;
1517 		ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1518 		dev_info(dev, "VF %d granted request of %u queues.\n",
1519 			 vf->vf_id, req_queues);
1520 		return 0;
1521 	}
1522 
1523 error_param:
1524 	/* send the response to the VF */
1525 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
1526 				     v_ret, (u8 *)vfres, sizeof(*vfres));
1527 }
1528 
1529 /**
1530  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
1531  * @caps: VF driver negotiated capabilities
1532  *
1533  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
1534  */
1535 static bool ice_vf_vlan_offload_ena(u32 caps)
1536 {
1537 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
1538 }
1539 
1540 /**
1541  * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
1542  * @vf: VF used to determine if VLAN promiscuous config is allowed
1543  */
1544 bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
1545 {
1546 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1547 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
1548 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
1549 		return true;
1550 
1551 	return false;
1552 }
1553 
1554 /**
1555  * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
1556  * @vf: VF to enable VLAN promisc on
1557  * @vsi: VF's VSI used to enable VLAN promiscuous mode
1558  * @vlan: VLAN used to enable VLAN promiscuous
1559  *
1560  * This function should only be called if VLAN promiscuous mode is allowed,
1561  * which can be determined via ice_is_vlan_promisc_allowed().
1562  */
1563 int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
1564 			    struct ice_vlan *vlan)
1565 {
1566 	u8 promisc_m = 0;
1567 	int status;
1568 
1569 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1570 		promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS;
1571 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1572 		promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS;
1573 
1574 	if (!promisc_m)
1575 		return 0;
1576 
1577 	status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
1578 					  vlan->vid);
1579 	if (status && status != -EEXIST)
1580 		return status;
1581 
1582 	return 0;
1583 }
1584 
1585 /**
1586  * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
1587  * @vsi: VF's VSI used to disable VLAN promiscuous mode for
1588  * @vlan: VLAN used to disable VLAN promiscuous
1589  *
1590  * This function should only be called if VLAN promiscuous mode is allowed,
1591  * which can be determined via ice_is_vlan_promisc_allowed().
1592  */
1593 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
1594 {
1595 	u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS;
1596 	int status;
1597 
1598 	status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
1599 					    vlan->vid);
1600 	if (status && status != -ENOENT)
1601 		return status;
1602 
1603 	return 0;
1604 }
1605 
1606 /**
1607  * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
1608  * @vf: VF to check against
1609  * @vsi: VF's VSI
1610  *
1611  * If the VF is trusted then the VF is allowed to add as many VLANs as it
1612  * wants to, so return false.
1613  *
1614  * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
1615  * allowed VLANs for an untrusted VF. Return the result of this comparison.
1616  */
1617 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
1618 {
1619 	if (ice_is_vf_trusted(vf))
1620 		return false;
1621 
1622 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS	1
1623 	return ((ice_vsi_num_non_zero_vlans(vsi) +
1624 		ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
1625 }
1626 
1627 /**
1628  * ice_vc_process_vlan_msg
1629  * @vf: pointer to the VF info
1630  * @msg: pointer to the msg buffer
1631  * @add_v: Add VLAN if true, otherwise delete VLAN
1632  *
1633  * Process virtchnl op to add or remove programmed guest VLAN ID
1634  */
1635 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
1636 {
1637 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1638 	struct virtchnl_vlan_filter_list *vfl =
1639 	    (struct virtchnl_vlan_filter_list *)msg;
1640 	struct ice_pf *pf = vf->pf;
1641 	bool vlan_promisc = false;
1642 	struct ice_vsi *vsi;
1643 	struct device *dev;
1644 	int status = 0;
1645 	int i;
1646 
1647 	dev = ice_pf_to_dev(pf);
1648 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1649 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1650 		goto error_param;
1651 	}
1652 
1653 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1654 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1655 		goto error_param;
1656 	}
1657 
1658 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
1659 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1660 		goto error_param;
1661 	}
1662 
1663 	for (i = 0; i < vfl->num_elements; i++) {
1664 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
1665 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1666 			dev_err(dev, "invalid VF VLAN id %d\n",
1667 				vfl->vlan_id[i]);
1668 			goto error_param;
1669 		}
1670 	}
1671 
1672 	vsi = ice_get_vf_vsi(vf);
1673 	if (!vsi) {
1674 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1675 		goto error_param;
1676 	}
1677 
1678 	if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
1679 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
1680 			 vf->vf_id);
1681 		/* There is no need to let VF know about being not trusted,
1682 		 * so we can just return success message here
1683 		 */
1684 		goto error_param;
1685 	}
1686 
1687 	/* in DVM a VF can add/delete inner VLAN filters when
1688 	 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
1689 	 */
1690 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
1691 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1692 		goto error_param;
1693 	}
1694 
1695 	/* in DVM VLAN promiscuous is based on the outer VLAN, which would be
1696 	 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
1697 	 * allow vlan_promisc = true in SVM and if no port VLAN is configured
1698 	 */
1699 	vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
1700 		!ice_is_dvm_ena(&pf->hw) &&
1701 		!ice_vf_is_port_vlan_ena(vf);
1702 
1703 	if (add_v) {
1704 		for (i = 0; i < vfl->num_elements; i++) {
1705 			u16 vid = vfl->vlan_id[i];
1706 			struct ice_vlan vlan;
1707 
1708 			if (ice_vf_has_max_vlans(vf, vsi)) {
1709 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
1710 					 vf->vf_id);
1711 				/* There is no need to let VF know about being
1712 				 * not trusted, so we can just return success
1713 				 * message here as well.
1714 				 */
1715 				goto error_param;
1716 			}
1717 
1718 			/* we add VLAN 0 by default for each VF so we can enable
1719 			 * Tx VLAN anti-spoof without triggering MDD events so
1720 			 * we don't need to add it again here
1721 			 */
1722 			if (!vid)
1723 				continue;
1724 
1725 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
1726 			status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
1727 			if (status) {
1728 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1729 				goto error_param;
1730 			}
1731 
1732 			/* Enable VLAN filtering on first non-zero VLAN */
1733 			if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
1734 				if (vf->spoofchk) {
1735 					status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
1736 					if (status) {
1737 						v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1738 						dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
1739 							vid, status);
1740 						goto error_param;
1741 					}
1742 				}
1743 				if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
1744 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1745 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
1746 						vid, status);
1747 					goto error_param;
1748 				}
1749 			} else if (vlan_promisc) {
1750 				status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
1751 				if (status) {
1752 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1753 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
1754 						vid, status);
1755 				}
1756 			}
1757 		}
1758 	} else {
1759 		/* In case of non_trusted VF, number of VLAN elements passed
1760 		 * to PF for removal might be greater than number of VLANs
1761 		 * filter programmed for that VF - So, use actual number of
1762 		 * VLANS added earlier with add VLAN opcode. In order to avoid
1763 		 * removing VLAN that doesn't exist, which result to sending
1764 		 * erroneous failed message back to the VF
1765 		 */
1766 		int num_vf_vlan;
1767 
1768 		num_vf_vlan = vsi->num_vlan;
1769 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1770 			u16 vid = vfl->vlan_id[i];
1771 			struct ice_vlan vlan;
1772 
1773 			/* we add VLAN 0 by default for each VF so we can enable
1774 			 * Tx VLAN anti-spoof without triggering MDD events so
1775 			 * we don't want a VIRTCHNL request to remove it
1776 			 */
1777 			if (!vid)
1778 				continue;
1779 
1780 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
1781 			status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
1782 			if (status) {
1783 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1784 				goto error_param;
1785 			}
1786 
1787 			/* Disable VLAN filtering when only VLAN 0 is left */
1788 			if (!ice_vsi_has_non_zero_vlans(vsi)) {
1789 				vsi->inner_vlan_ops.dis_tx_filtering(vsi);
1790 				vsi->inner_vlan_ops.dis_rx_filtering(vsi);
1791 			}
1792 
1793 			if (vlan_promisc)
1794 				ice_vf_dis_vlan_promisc(vsi, &vlan);
1795 		}
1796 	}
1797 
1798 error_param:
1799 	/* send the response to the VF */
1800 	if (add_v)
1801 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1802 					     NULL, 0);
1803 	else
1804 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1805 					     NULL, 0);
1806 }
1807 
1808 /**
1809  * ice_vc_add_vlan_msg
1810  * @vf: pointer to the VF info
1811  * @msg: pointer to the msg buffer
1812  *
1813  * Add and program guest VLAN ID
1814  */
1815 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
1816 {
1817 	return ice_vc_process_vlan_msg(vf, msg, true);
1818 }
1819 
1820 /**
1821  * ice_vc_remove_vlan_msg
1822  * @vf: pointer to the VF info
1823  * @msg: pointer to the msg buffer
1824  *
1825  * remove programmed guest VLAN ID
1826  */
1827 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
1828 {
1829 	return ice_vc_process_vlan_msg(vf, msg, false);
1830 }
1831 
1832 /**
1833  * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not
1834  * @vsi: pointer to the VF VSI info
1835  */
1836 static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi)
1837 {
1838 	unsigned int i;
1839 
1840 	ice_for_each_alloc_rxq(vsi, i)
1841 		if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS)
1842 			return true;
1843 
1844 	return false;
1845 }
1846 
1847 /**
1848  * ice_vc_ena_vlan_stripping
1849  * @vf: pointer to the VF info
1850  *
1851  * Enable VLAN header stripping for a given VF
1852  */
1853 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
1854 {
1855 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1856 	struct ice_vsi *vsi;
1857 
1858 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1859 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1860 		goto error_param;
1861 	}
1862 
1863 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1864 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1865 		goto error_param;
1866 	}
1867 
1868 	vsi = ice_get_vf_vsi(vf);
1869 	if (!vsi) {
1870 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1871 		goto error_param;
1872 	}
1873 
1874 	if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
1875 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1876 	else
1877 		vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
1878 
1879 error_param:
1880 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1881 				     v_ret, NULL, 0);
1882 }
1883 
1884 /**
1885  * ice_vc_dis_vlan_stripping
1886  * @vf: pointer to the VF info
1887  *
1888  * Disable VLAN header stripping for a given VF
1889  */
1890 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
1891 {
1892 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1893 	struct ice_vsi *vsi;
1894 
1895 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1896 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1897 		goto error_param;
1898 	}
1899 
1900 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1901 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1902 		goto error_param;
1903 	}
1904 
1905 	vsi = ice_get_vf_vsi(vf);
1906 	if (!vsi) {
1907 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1908 		goto error_param;
1909 	}
1910 
1911 	if (vsi->inner_vlan_ops.dis_stripping(vsi))
1912 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1913 	else
1914 		vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
1915 
1916 error_param:
1917 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1918 				     v_ret, NULL, 0);
1919 }
1920 
1921 /**
1922  * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
1923  * @vf: pointer to the VF info
1924  */
1925 static int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
1926 {
1927 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1928 	struct virtchnl_rss_hashcfg *vrh = NULL;
1929 	int len = 0, ret;
1930 
1931 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1932 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1933 		goto err;
1934 	}
1935 
1936 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1937 		dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
1938 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1939 		goto err;
1940 	}
1941 
1942 	len = sizeof(struct virtchnl_rss_hashcfg);
1943 	vrh = kzalloc(len, GFP_KERNEL);
1944 	if (!vrh) {
1945 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1946 		len = 0;
1947 		goto err;
1948 	}
1949 
1950 	vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
1951 err:
1952 	/* send the response back to the VF */
1953 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
1954 				    (u8 *)vrh, len);
1955 	kfree(vrh);
1956 	return ret;
1957 }
1958 
1959 /**
1960  * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
1961  * @vf: pointer to the VF info
1962  * @msg: pointer to the msg buffer
1963  */
1964 static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
1965 {
1966 	struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
1967 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1968 	struct ice_pf *pf = vf->pf;
1969 	struct ice_vsi *vsi;
1970 	struct device *dev;
1971 	int status;
1972 
1973 	dev = ice_pf_to_dev(pf);
1974 
1975 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1976 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1977 		goto err;
1978 	}
1979 
1980 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
1981 		dev_err(dev, "RSS not supported by PF\n");
1982 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1983 		goto err;
1984 	}
1985 
1986 	vsi = ice_get_vf_vsi(vf);
1987 	if (!vsi) {
1988 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1989 		goto err;
1990 	}
1991 
1992 	/* clear all previously programmed RSS configuration to allow VF drivers
1993 	 * the ability to customize the RSS configuration and/or completely
1994 	 * disable RSS
1995 	 */
1996 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
1997 	if (status && !vrh->hashcfg) {
1998 		/* only report failure to clear the current RSS configuration if
1999 		 * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
2000 		 */
2001 		v_ret = ice_err_to_virt_err(status);
2002 		goto err;
2003 	} else if (status) {
2004 		/* allow the VF to update the RSS configuration even on failure
2005 		 * to clear the current RSS confguration in an attempt to keep
2006 		 * RSS in a working state
2007 		 */
2008 		dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
2009 			 vf->vf_id);
2010 	}
2011 
2012 	if (vrh->hashcfg) {
2013 		status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
2014 		v_ret = ice_err_to_virt_err(status);
2015 	}
2016 
2017 	/* save the requested VF configuration */
2018 	if (!v_ret)
2019 		vf->rss_hashcfg = vrh->hashcfg;
2020 
2021 	/* send the response to the VF */
2022 err:
2023 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
2024 				     NULL, 0);
2025 }
2026 
2027 /**
2028  * ice_vc_query_rxdid - query RXDID supported by DDP package
2029  * @vf: pointer to VF info
2030  *
2031  * Called from VF to query a bitmap of supported flexible
2032  * descriptor RXDIDs of a DDP package.
2033  */
2034 static int ice_vc_query_rxdid(struct ice_vf *vf)
2035 {
2036 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2037 	struct ice_pf *pf = vf->pf;
2038 	u64 rxdid;
2039 
2040 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2041 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2042 		goto err;
2043 	}
2044 
2045 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
2046 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2047 		goto err;
2048 	}
2049 
2050 	rxdid = pf->supported_rxdids;
2051 
2052 err:
2053 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
2054 				     v_ret, (u8 *)&rxdid, sizeof(rxdid));
2055 }
2056 
2057 /**
2058  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
2059  * @vf: VF to enable/disable VLAN stripping for on initialization
2060  *
2061  * Set the default for VLAN stripping based on whether a port VLAN is configured
2062  * and the current VLAN mode of the device.
2063  */
2064 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
2065 {
2066 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
2067 
2068 	vf->vlan_strip_ena = 0;
2069 
2070 	if (!vsi)
2071 		return -EINVAL;
2072 
2073 	/* don't modify stripping if port VLAN is configured in SVM since the
2074 	 * port VLAN is based on the inner/single VLAN in SVM
2075 	 */
2076 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
2077 		return 0;
2078 
2079 	if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
2080 		int err;
2081 
2082 		err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
2083 		if (!err)
2084 			vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
2085 		return err;
2086 	}
2087 
2088 	return vsi->inner_vlan_ops.dis_stripping(vsi);
2089 }
2090 
2091 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
2092 {
2093 	if (vf->trusted)
2094 		return VLAN_N_VID;
2095 	else
2096 		return ICE_MAX_VLAN_PER_VF;
2097 }
2098 
2099 /**
2100  * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
2101  * @vf: VF that being checked for
2102  *
2103  * When the device is in double VLAN mode, check whether or not the outer VLAN
2104  * is allowed.
2105  */
2106 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
2107 {
2108 	if (ice_vf_is_port_vlan_ena(vf))
2109 		return true;
2110 
2111 	return false;
2112 }
2113 
2114 /**
2115  * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
2116  * @vf: VF that capabilities are being set for
2117  * @caps: VLAN capabilities to populate
2118  *
2119  * Determine VLAN capabilities support based on whether a port VLAN is
2120  * configured. If a port VLAN is configured then the VF should use the inner
2121  * filtering/offload capabilities since the port VLAN is using the outer VLAN
2122  * capabilies.
2123  */
2124 static void
2125 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2126 {
2127 	struct virtchnl_vlan_supported_caps *supported_caps;
2128 
2129 	if (ice_vf_outer_vlan_not_allowed(vf)) {
2130 		/* until support for inner VLAN filtering is added when a port
2131 		 * VLAN is configured, only support software offloaded inner
2132 		 * VLANs when a port VLAN is confgured in DVM
2133 		 */
2134 		supported_caps = &caps->filtering.filtering_support;
2135 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2136 
2137 		supported_caps = &caps->offloads.stripping_support;
2138 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2139 					VIRTCHNL_VLAN_TOGGLE |
2140 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2141 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2142 
2143 		supported_caps = &caps->offloads.insertion_support;
2144 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2145 					VIRTCHNL_VLAN_TOGGLE |
2146 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2147 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2148 
2149 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2150 		caps->offloads.ethertype_match =
2151 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2152 	} else {
2153 		supported_caps = &caps->filtering.filtering_support;
2154 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2155 		supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2156 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2157 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2158 					VIRTCHNL_VLAN_ETHERTYPE_AND;
2159 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2160 						 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2161 						 VIRTCHNL_VLAN_ETHERTYPE_9100;
2162 
2163 		supported_caps = &caps->offloads.stripping_support;
2164 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2165 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2166 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2167 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2168 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2169 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2170 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2171 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2172 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
2173 
2174 		supported_caps = &caps->offloads.insertion_support;
2175 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2176 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2177 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2178 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2179 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2180 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2181 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2182 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2183 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
2184 
2185 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2186 
2187 		caps->offloads.ethertype_match =
2188 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2189 	}
2190 
2191 	caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2192 }
2193 
2194 /**
2195  * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
2196  * @vf: VF that capabilities are being set for
2197  * @caps: VLAN capabilities to populate
2198  *
2199  * Determine VLAN capabilities support based on whether a port VLAN is
2200  * configured. If a port VLAN is configured then the VF does not have any VLAN
2201  * filtering or offload capabilities since the port VLAN is using the inner VLAN
2202  * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
2203  * VLAN fitlering and offload capabilities.
2204  */
2205 static void
2206 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2207 {
2208 	struct virtchnl_vlan_supported_caps *supported_caps;
2209 
2210 	if (ice_vf_is_port_vlan_ena(vf)) {
2211 		supported_caps = &caps->filtering.filtering_support;
2212 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2213 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2214 
2215 		supported_caps = &caps->offloads.stripping_support;
2216 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2217 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2218 
2219 		supported_caps = &caps->offloads.insertion_support;
2220 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2221 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2222 
2223 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
2224 		caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
2225 		caps->filtering.max_filters = 0;
2226 	} else {
2227 		supported_caps = &caps->filtering.filtering_support;
2228 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
2229 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2230 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2231 
2232 		supported_caps = &caps->offloads.stripping_support;
2233 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2234 					VIRTCHNL_VLAN_TOGGLE |
2235 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2236 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2237 
2238 		supported_caps = &caps->offloads.insertion_support;
2239 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2240 					VIRTCHNL_VLAN_TOGGLE |
2241 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2242 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2243 
2244 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2245 		caps->offloads.ethertype_match =
2246 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2247 		caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2248 	}
2249 }
2250 
2251 /**
2252  * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
2253  * @vf: VF to determine VLAN capabilities for
2254  *
2255  * This will only be called if the VF and PF successfully negotiated
2256  * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2257  *
2258  * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
2259  * is configured or not.
2260  */
2261 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
2262 {
2263 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2264 	struct virtchnl_vlan_caps *caps = NULL;
2265 	int err, len = 0;
2266 
2267 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2268 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2269 		goto out;
2270 	}
2271 
2272 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2273 	if (!caps) {
2274 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2275 		goto out;
2276 	}
2277 	len = sizeof(*caps);
2278 
2279 	if (ice_is_dvm_ena(&vf->pf->hw))
2280 		ice_vc_set_dvm_caps(vf, caps);
2281 	else
2282 		ice_vc_set_svm_caps(vf, caps);
2283 
2284 	/* store negotiated caps to prevent invalid VF messages */
2285 	memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
2286 
2287 out:
2288 	err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
2289 				    v_ret, (u8 *)caps, len);
2290 	kfree(caps);
2291 	return err;
2292 }
2293 
2294 /**
2295  * ice_vc_validate_vlan_tpid - validate VLAN TPID
2296  * @filtering_caps: negotiated/supported VLAN filtering capabilities
2297  * @tpid: VLAN TPID used for validation
2298  *
2299  * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
2300  * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
2301  */
2302 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
2303 {
2304 	enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
2305 
2306 	switch (tpid) {
2307 	case ETH_P_8021Q:
2308 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
2309 		break;
2310 	case ETH_P_8021AD:
2311 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
2312 		break;
2313 	case ETH_P_QINQ1:
2314 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
2315 		break;
2316 	}
2317 
2318 	if (!(filtering_caps & vlan_ethertype))
2319 		return false;
2320 
2321 	return true;
2322 }
2323 
2324 /**
2325  * ice_vc_is_valid_vlan - validate the virtchnl_vlan
2326  * @vc_vlan: virtchnl_vlan to validate
2327  *
2328  * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
2329  * false. Otherwise return true.
2330  */
2331 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
2332 {
2333 	if (!vc_vlan->tci || !vc_vlan->tpid)
2334 		return false;
2335 
2336 	return true;
2337 }
2338 
2339 /**
2340  * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
2341  * @vfc: negotiated/supported VLAN filtering capabilities
2342  * @vfl: VLAN filter list from VF to validate
2343  *
2344  * Validate all of the filters in the VLAN filter list from the VF. If any of
2345  * the checks fail then return false. Otherwise return true.
2346  */
2347 static bool
2348 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
2349 				 struct virtchnl_vlan_filter_list_v2 *vfl)
2350 {
2351 	u16 i;
2352 
2353 	if (!vfl->num_elements)
2354 		return false;
2355 
2356 	for (i = 0; i < vfl->num_elements; i++) {
2357 		struct virtchnl_vlan_supported_caps *filtering_support =
2358 			&vfc->filtering_support;
2359 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2360 		struct virtchnl_vlan *outer = &vlan_fltr->outer;
2361 		struct virtchnl_vlan *inner = &vlan_fltr->inner;
2362 
2363 		if ((ice_vc_is_valid_vlan(outer) &&
2364 		     filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
2365 		    (ice_vc_is_valid_vlan(inner) &&
2366 		     filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
2367 			return false;
2368 
2369 		if ((outer->tci_mask &&
2370 		     !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
2371 		    (inner->tci_mask &&
2372 		     !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
2373 			return false;
2374 
2375 		if (((outer->tci & VLAN_PRIO_MASK) &&
2376 		     !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
2377 		    ((inner->tci & VLAN_PRIO_MASK) &&
2378 		     !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
2379 			return false;
2380 
2381 		if ((ice_vc_is_valid_vlan(outer) &&
2382 		     !ice_vc_validate_vlan_tpid(filtering_support->outer,
2383 						outer->tpid)) ||
2384 		    (ice_vc_is_valid_vlan(inner) &&
2385 		     !ice_vc_validate_vlan_tpid(filtering_support->inner,
2386 						inner->tpid)))
2387 			return false;
2388 	}
2389 
2390 	return true;
2391 }
2392 
2393 /**
2394  * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
2395  * @vc_vlan: struct virtchnl_vlan to transform
2396  */
2397 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
2398 {
2399 	struct ice_vlan vlan = { 0 };
2400 
2401 	vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
2402 	vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
2403 	vlan.tpid = vc_vlan->tpid;
2404 
2405 	return vlan;
2406 }
2407 
2408 /**
2409  * ice_vc_vlan_action - action to perform on the virthcnl_vlan
2410  * @vsi: VF's VSI used to perform the action
2411  * @vlan_action: function to perform the action with (i.e. add/del)
2412  * @vlan: VLAN filter to perform the action with
2413  */
2414 static int
2415 ice_vc_vlan_action(struct ice_vsi *vsi,
2416 		   int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
2417 		   struct ice_vlan *vlan)
2418 {
2419 	int err;
2420 
2421 	err = vlan_action(vsi, vlan);
2422 	if (err)
2423 		return err;
2424 
2425 	return 0;
2426 }
2427 
2428 /**
2429  * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
2430  * @vf: VF used to delete the VLAN(s)
2431  * @vsi: VF's VSI used to delete the VLAN(s)
2432  * @vfl: virthchnl filter list used to delete the filters
2433  */
2434 static int
2435 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2436 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2437 {
2438 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2439 	int err;
2440 	u16 i;
2441 
2442 	for (i = 0; i < vfl->num_elements; i++) {
2443 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2444 		struct virtchnl_vlan *vc_vlan;
2445 
2446 		vc_vlan = &vlan_fltr->outer;
2447 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2448 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2449 
2450 			err = ice_vc_vlan_action(vsi,
2451 						 vsi->outer_vlan_ops.del_vlan,
2452 						 &vlan);
2453 			if (err)
2454 				return err;
2455 
2456 			if (vlan_promisc)
2457 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2458 
2459 			/* Disable VLAN filtering when only VLAN 0 is left */
2460 			if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
2461 				err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
2462 				if (err)
2463 					return err;
2464 			}
2465 		}
2466 
2467 		vc_vlan = &vlan_fltr->inner;
2468 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2469 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2470 
2471 			err = ice_vc_vlan_action(vsi,
2472 						 vsi->inner_vlan_ops.del_vlan,
2473 						 &vlan);
2474 			if (err)
2475 				return err;
2476 
2477 			/* no support for VLAN promiscuous on inner VLAN unless
2478 			 * we are in Single VLAN Mode (SVM)
2479 			 */
2480 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
2481 				if (vlan_promisc)
2482 					ice_vf_dis_vlan_promisc(vsi, &vlan);
2483 
2484 				/* Disable VLAN filtering when only VLAN 0 is left */
2485 				if (!ice_vsi_has_non_zero_vlans(vsi)) {
2486 					err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
2487 					if (err)
2488 						return err;
2489 				}
2490 			}
2491 		}
2492 	}
2493 
2494 	return 0;
2495 }
2496 
2497 /**
2498  * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
2499  * @vf: VF the message was received from
2500  * @msg: message received from the VF
2501  */
2502 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2503 {
2504 	struct virtchnl_vlan_filter_list_v2 *vfl =
2505 		(struct virtchnl_vlan_filter_list_v2 *)msg;
2506 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2507 	struct ice_vsi *vsi;
2508 
2509 	if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
2510 					      vfl)) {
2511 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2512 		goto out;
2513 	}
2514 
2515 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2516 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2517 		goto out;
2518 	}
2519 
2520 	vsi = ice_get_vf_vsi(vf);
2521 	if (!vsi) {
2522 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2523 		goto out;
2524 	}
2525 
2526 	if (ice_vc_del_vlans(vf, vsi, vfl))
2527 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2528 
2529 out:
2530 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
2531 				     0);
2532 }
2533 
2534 /**
2535  * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
2536  * @vf: VF used to add the VLAN(s)
2537  * @vsi: VF's VSI used to add the VLAN(s)
2538  * @vfl: virthchnl filter list used to add the filters
2539  */
2540 static int
2541 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2542 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2543 {
2544 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2545 	int err;
2546 	u16 i;
2547 
2548 	for (i = 0; i < vfl->num_elements; i++) {
2549 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2550 		struct virtchnl_vlan *vc_vlan;
2551 
2552 		vc_vlan = &vlan_fltr->outer;
2553 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2554 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2555 
2556 			err = ice_vc_vlan_action(vsi,
2557 						 vsi->outer_vlan_ops.add_vlan,
2558 						 &vlan);
2559 			if (err)
2560 				return err;
2561 
2562 			if (vlan_promisc) {
2563 				err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
2564 				if (err)
2565 					return err;
2566 			}
2567 
2568 			/* Enable VLAN filtering on first non-zero VLAN */
2569 			if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
2570 				err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
2571 				if (err)
2572 					return err;
2573 			}
2574 		}
2575 
2576 		vc_vlan = &vlan_fltr->inner;
2577 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2578 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2579 
2580 			err = ice_vc_vlan_action(vsi,
2581 						 vsi->inner_vlan_ops.add_vlan,
2582 						 &vlan);
2583 			if (err)
2584 				return err;
2585 
2586 			/* no support for VLAN promiscuous on inner VLAN unless
2587 			 * we are in Single VLAN Mode (SVM)
2588 			 */
2589 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
2590 				if (vlan_promisc) {
2591 					err = ice_vf_ena_vlan_promisc(vf, vsi,
2592 								      &vlan);
2593 					if (err)
2594 						return err;
2595 				}
2596 
2597 				/* Enable VLAN filtering on first non-zero VLAN */
2598 				if (vf->spoofchk && vlan.vid) {
2599 					err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
2600 					if (err)
2601 						return err;
2602 				}
2603 			}
2604 		}
2605 	}
2606 
2607 	return 0;
2608 }
2609 
2610 /**
2611  * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
2612  * @vsi: VF VSI used to get number of existing VLAN filters
2613  * @vfc: negotiated/supported VLAN filtering capabilities
2614  * @vfl: VLAN filter list from VF to validate
2615  *
2616  * Validate all of the filters in the VLAN filter list from the VF during the
2617  * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
2618  * Otherwise return true.
2619  */
2620 static bool
2621 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
2622 				     struct virtchnl_vlan_filtering_caps *vfc,
2623 				     struct virtchnl_vlan_filter_list_v2 *vfl)
2624 {
2625 	u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
2626 		vfl->num_elements;
2627 
2628 	if (num_requested_filters > vfc->max_filters)
2629 		return false;
2630 
2631 	return ice_vc_validate_vlan_filter_list(vfc, vfl);
2632 }
2633 
2634 /**
2635  * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
2636  * @vf: VF the message was received from
2637  * @msg: message received from the VF
2638  */
2639 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2640 {
2641 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2642 	struct virtchnl_vlan_filter_list_v2 *vfl =
2643 		(struct virtchnl_vlan_filter_list_v2 *)msg;
2644 	struct ice_vsi *vsi;
2645 
2646 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2647 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2648 		goto out;
2649 	}
2650 
2651 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2652 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2653 		goto out;
2654 	}
2655 
2656 	vsi = ice_get_vf_vsi(vf);
2657 	if (!vsi) {
2658 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2659 		goto out;
2660 	}
2661 
2662 	if (!ice_vc_validate_add_vlan_filter_list(vsi,
2663 						  &vf->vlan_v2_caps.filtering,
2664 						  vfl)) {
2665 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2666 		goto out;
2667 	}
2668 
2669 	if (ice_vc_add_vlans(vf, vsi, vfl))
2670 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2671 
2672 out:
2673 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
2674 				     0);
2675 }
2676 
2677 /**
2678  * ice_vc_valid_vlan_setting - validate VLAN setting
2679  * @negotiated_settings: negotiated VLAN settings during VF init
2680  * @ethertype_setting: ethertype(s) requested for the VLAN setting
2681  */
2682 static bool
2683 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
2684 {
2685 	if (ethertype_setting && !(negotiated_settings & ethertype_setting))
2686 		return false;
2687 
2688 	/* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
2689 	 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
2690 	 */
2691 	if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
2692 	    hweight32(ethertype_setting) > 1)
2693 		return false;
2694 
2695 	/* ability to modify the VLAN setting was not negotiated */
2696 	if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
2697 		return false;
2698 
2699 	return true;
2700 }
2701 
2702 /**
2703  * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
2704  * @caps: negotiated VLAN settings during VF init
2705  * @msg: message to validate
2706  *
2707  * Used to validate any VLAN virtchnl message sent as a
2708  * virtchnl_vlan_setting structure. Validates the message against the
2709  * negotiated/supported caps during VF driver init.
2710  */
2711 static bool
2712 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
2713 			      struct virtchnl_vlan_setting *msg)
2714 {
2715 	if ((!msg->outer_ethertype_setting &&
2716 	     !msg->inner_ethertype_setting) ||
2717 	    (!caps->outer && !caps->inner))
2718 		return false;
2719 
2720 	if (msg->outer_ethertype_setting &&
2721 	    !ice_vc_valid_vlan_setting(caps->outer,
2722 				       msg->outer_ethertype_setting))
2723 		return false;
2724 
2725 	if (msg->inner_ethertype_setting &&
2726 	    !ice_vc_valid_vlan_setting(caps->inner,
2727 				       msg->inner_ethertype_setting))
2728 		return false;
2729 
2730 	return true;
2731 }
2732 
2733 /**
2734  * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
2735  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
2736  * @tpid: VLAN TPID to populate
2737  */
2738 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
2739 {
2740 	switch (ethertype_setting) {
2741 	case VIRTCHNL_VLAN_ETHERTYPE_8100:
2742 		*tpid = ETH_P_8021Q;
2743 		break;
2744 	case VIRTCHNL_VLAN_ETHERTYPE_88A8:
2745 		*tpid = ETH_P_8021AD;
2746 		break;
2747 	case VIRTCHNL_VLAN_ETHERTYPE_9100:
2748 		*tpid = ETH_P_QINQ1;
2749 		break;
2750 	default:
2751 		*tpid = 0;
2752 		return -EINVAL;
2753 	}
2754 
2755 	return 0;
2756 }
2757 
2758 /**
2759  * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
2760  * @vsi: VF's VSI used to enable the VLAN offload
2761  * @ena_offload: function used to enable the VLAN offload
2762  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
2763  */
2764 static int
2765 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
2766 			int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
2767 			u32 ethertype_setting)
2768 {
2769 	u16 tpid;
2770 	int err;
2771 
2772 	err = ice_vc_get_tpid(ethertype_setting, &tpid);
2773 	if (err)
2774 		return err;
2775 
2776 	err = ena_offload(vsi, tpid);
2777 	if (err)
2778 		return err;
2779 
2780 	return 0;
2781 }
2782 
2783 /**
2784  * ice_vc_ena_vlan_stripping_v2_msg
2785  * @vf: VF the message was received from
2786  * @msg: message received from the VF
2787  *
2788  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
2789  */
2790 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
2791 {
2792 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2793 	struct virtchnl_vlan_supported_caps *stripping_support;
2794 	struct virtchnl_vlan_setting *strip_msg =
2795 		(struct virtchnl_vlan_setting *)msg;
2796 	u32 ethertype_setting;
2797 	struct ice_vsi *vsi;
2798 
2799 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2800 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2801 		goto out;
2802 	}
2803 
2804 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
2805 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2806 		goto out;
2807 	}
2808 
2809 	vsi = ice_get_vf_vsi(vf);
2810 	if (!vsi) {
2811 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2812 		goto out;
2813 	}
2814 
2815 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
2816 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
2817 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2818 		goto out;
2819 	}
2820 
2821 	if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
2822 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2823 		goto out;
2824 	}
2825 
2826 	ethertype_setting = strip_msg->outer_ethertype_setting;
2827 	if (ethertype_setting) {
2828 		if (ice_vc_ena_vlan_offload(vsi,
2829 					    vsi->outer_vlan_ops.ena_stripping,
2830 					    ethertype_setting)) {
2831 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2832 			goto out;
2833 		} else {
2834 			enum ice_l2tsel l2tsel =
2835 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
2836 
2837 			/* PF tells the VF that the outer VLAN tag is always
2838 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
2839 			 * inner is always extracted to
2840 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
2841 			 * support outer stripping so the first tag always ends
2842 			 * up in L2TAG2_2ND and the second/inner tag, if
2843 			 * enabled, is extracted in L2TAG1.
2844 			 */
2845 			ice_vsi_update_l2tsel(vsi, l2tsel);
2846 
2847 			vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
2848 		}
2849 	}
2850 
2851 	ethertype_setting = strip_msg->inner_ethertype_setting;
2852 	if (ethertype_setting &&
2853 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
2854 				    ethertype_setting)) {
2855 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2856 		goto out;
2857 	}
2858 
2859 	if (ethertype_setting)
2860 		vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
2861 
2862 out:
2863 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
2864 				     v_ret, NULL, 0);
2865 }
2866 
2867 /**
2868  * ice_vc_dis_vlan_stripping_v2_msg
2869  * @vf: VF the message was received from
2870  * @msg: message received from the VF
2871  *
2872  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
2873  */
2874 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
2875 {
2876 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2877 	struct virtchnl_vlan_supported_caps *stripping_support;
2878 	struct virtchnl_vlan_setting *strip_msg =
2879 		(struct virtchnl_vlan_setting *)msg;
2880 	u32 ethertype_setting;
2881 	struct ice_vsi *vsi;
2882 
2883 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2884 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2885 		goto out;
2886 	}
2887 
2888 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
2889 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2890 		goto out;
2891 	}
2892 
2893 	vsi = ice_get_vf_vsi(vf);
2894 	if (!vsi) {
2895 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2896 		goto out;
2897 	}
2898 
2899 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
2900 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
2901 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2902 		goto out;
2903 	}
2904 
2905 	ethertype_setting = strip_msg->outer_ethertype_setting;
2906 	if (ethertype_setting) {
2907 		if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
2908 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2909 			goto out;
2910 		} else {
2911 			enum ice_l2tsel l2tsel =
2912 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
2913 
2914 			/* PF tells the VF that the outer VLAN tag is always
2915 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
2916 			 * inner is always extracted to
2917 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
2918 			 * support inner stripping while outer stripping is
2919 			 * disabled so that the first and only tag is extracted
2920 			 * in L2TAG1.
2921 			 */
2922 			ice_vsi_update_l2tsel(vsi, l2tsel);
2923 
2924 			vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
2925 		}
2926 	}
2927 
2928 	ethertype_setting = strip_msg->inner_ethertype_setting;
2929 	if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
2930 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2931 		goto out;
2932 	}
2933 
2934 	if (ethertype_setting)
2935 		vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
2936 
2937 out:
2938 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
2939 				     v_ret, NULL, 0);
2940 }
2941 
2942 /**
2943  * ice_vc_ena_vlan_insertion_v2_msg
2944  * @vf: VF the message was received from
2945  * @msg: message received from the VF
2946  *
2947  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
2948  */
2949 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
2950 {
2951 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2952 	struct virtchnl_vlan_supported_caps *insertion_support;
2953 	struct virtchnl_vlan_setting *insertion_msg =
2954 		(struct virtchnl_vlan_setting *)msg;
2955 	u32 ethertype_setting;
2956 	struct ice_vsi *vsi;
2957 
2958 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2959 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2960 		goto out;
2961 	}
2962 
2963 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
2964 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2965 		goto out;
2966 	}
2967 
2968 	vsi = ice_get_vf_vsi(vf);
2969 	if (!vsi) {
2970 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2971 		goto out;
2972 	}
2973 
2974 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
2975 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
2976 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2977 		goto out;
2978 	}
2979 
2980 	ethertype_setting = insertion_msg->outer_ethertype_setting;
2981 	if (ethertype_setting &&
2982 	    ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
2983 				    ethertype_setting)) {
2984 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2985 		goto out;
2986 	}
2987 
2988 	ethertype_setting = insertion_msg->inner_ethertype_setting;
2989 	if (ethertype_setting &&
2990 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
2991 				    ethertype_setting)) {
2992 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2993 		goto out;
2994 	}
2995 
2996 out:
2997 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
2998 				     v_ret, NULL, 0);
2999 }
3000 
3001 /**
3002  * ice_vc_dis_vlan_insertion_v2_msg
3003  * @vf: VF the message was received from
3004  * @msg: message received from the VF
3005  *
3006  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
3007  */
3008 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3009 {
3010 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3011 	struct virtchnl_vlan_supported_caps *insertion_support;
3012 	struct virtchnl_vlan_setting *insertion_msg =
3013 		(struct virtchnl_vlan_setting *)msg;
3014 	u32 ethertype_setting;
3015 	struct ice_vsi *vsi;
3016 
3017 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3018 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3019 		goto out;
3020 	}
3021 
3022 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3023 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3024 		goto out;
3025 	}
3026 
3027 	vsi = ice_get_vf_vsi(vf);
3028 	if (!vsi) {
3029 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3030 		goto out;
3031 	}
3032 
3033 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3034 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3035 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3036 		goto out;
3037 	}
3038 
3039 	ethertype_setting = insertion_msg->outer_ethertype_setting;
3040 	if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
3041 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3042 		goto out;
3043 	}
3044 
3045 	ethertype_setting = insertion_msg->inner_ethertype_setting;
3046 	if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
3047 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3048 		goto out;
3049 	}
3050 
3051 out:
3052 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
3053 				     v_ret, NULL, 0);
3054 }
3055 
3056 static int ice_vc_get_ptp_cap(struct ice_vf *vf,
3057 			      const struct virtchnl_ptp_caps *msg)
3058 {
3059 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3060 	u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
3061 		   VIRTCHNL_1588_PTP_CAP_READ_PHC;
3062 
3063 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
3064 		goto err;
3065 
3066 	v_ret = VIRTCHNL_STATUS_SUCCESS;
3067 
3068 	if (msg->caps & caps)
3069 		vf->ptp_caps = caps;
3070 
3071 err:
3072 	/* send the response back to the VF */
3073 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
3074 				     (u8 *)&vf->ptp_caps,
3075 				     sizeof(struct virtchnl_ptp_caps));
3076 }
3077 
3078 static int ice_vc_get_phc_time(struct ice_vf *vf)
3079 {
3080 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3081 	struct virtchnl_phc_time *phc_time = NULL;
3082 	struct ice_pf *pf = vf->pf;
3083 	u32 len = 0;
3084 	int ret;
3085 
3086 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
3087 		goto err;
3088 
3089 	v_ret = VIRTCHNL_STATUS_SUCCESS;
3090 
3091 	phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
3092 	if (!phc_time) {
3093 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
3094 		goto err;
3095 	}
3096 
3097 	len = sizeof(*phc_time);
3098 
3099 	phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
3100 
3101 err:
3102 	/* send the response back to the VF */
3103 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
3104 				    (u8 *)phc_time, len);
3105 	kfree(phc_time);
3106 	return ret;
3107 }
3108 
3109 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
3110 	.get_ver_msg = ice_vc_get_ver_msg,
3111 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3112 	.reset_vf = ice_vc_reset_vf_msg,
3113 	.add_mac_addr_msg = ice_vc_add_mac_addr_msg,
3114 	.del_mac_addr_msg = ice_vc_del_mac_addr_msg,
3115 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3116 	.ena_qs_msg = ice_vc_ena_qs_msg,
3117 	.dis_qs_msg = ice_vc_dis_qs_msg,
3118 	.request_qs_msg = ice_vc_request_qs_msg,
3119 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3120 	.config_rss_key = ice_vc_config_rss_key,
3121 	.config_rss_lut = ice_vc_config_rss_lut,
3122 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
3123 	.get_stats_msg = ice_vc_get_stats_msg,
3124 	.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
3125 	.add_vlan_msg = ice_vc_add_vlan_msg,
3126 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3127 	.query_rxdid = ice_vc_query_rxdid,
3128 	.get_rss_hashcfg = ice_vc_get_rss_hashcfg,
3129 	.set_rss_hashcfg = ice_vc_set_rss_hashcfg,
3130 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3131 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3132 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3133 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3134 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3135 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3136 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3137 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3138 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3139 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3140 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3141 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3142 	.get_qos_caps = ice_vc_get_qos_caps,
3143 	.cfg_q_bw = ice_vc_cfg_q_bw,
3144 	.cfg_q_quanta = ice_vc_cfg_q_quanta,
3145 	.get_ptp_cap = ice_vc_get_ptp_cap,
3146 	.get_phc_time = ice_vc_get_phc_time,
3147 	/* If you add a new op here please make sure to add it to
3148 	 * ice_virtchnl_repr_ops as well.
3149 	 */
3150 };
3151 
3152 /**
3153  * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
3154  * @vf: the VF to switch ops
3155  */
3156 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
3157 {
3158 	vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
3159 }
3160 
3161 /**
3162  * ice_vc_repr_add_mac
3163  * @vf: pointer to VF
3164  * @msg: virtchannel message
3165  *
3166  * When port representors are created, we do not add MAC rule
3167  * to firmware, we store it so that PF could report same
3168  * MAC as VF.
3169  */
3170 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
3171 {
3172 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3173 	struct virtchnl_ether_addr_list *al =
3174 	    (struct virtchnl_ether_addr_list *)msg;
3175 	struct ice_vsi *vsi;
3176 	struct ice_pf *pf;
3177 	int i;
3178 
3179 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3180 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3181 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3182 		goto handle_mac_exit;
3183 	}
3184 
3185 	pf = vf->pf;
3186 
3187 	vsi = ice_get_vf_vsi(vf);
3188 	if (!vsi) {
3189 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3190 		goto handle_mac_exit;
3191 	}
3192 
3193 	for (i = 0; i < al->num_elements; i++) {
3194 		u8 *mac_addr = al->list[i].addr;
3195 
3196 		if (!is_unicast_ether_addr(mac_addr) ||
3197 		    ether_addr_equal(mac_addr, vf->hw_lan_addr))
3198 			continue;
3199 
3200 		if (vf->pf_set_mac) {
3201 			dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
3202 			v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3203 			goto handle_mac_exit;
3204 		}
3205 
3206 		ice_vfhw_mac_add(vf, &al->list[i]);
3207 		break;
3208 	}
3209 
3210 handle_mac_exit:
3211 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3212 				     v_ret, NULL, 0);
3213 }
3214 
3215 /**
3216  * ice_vc_repr_del_mac - response with success for deleting MAC
3217  * @vf: pointer to VF
3218  * @msg: virtchannel message
3219  *
3220  * Respond with success to not break normal VF flow.
3221  * For legacy VF driver try to update cached MAC address.
3222  */
3223 static int
3224 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
3225 {
3226 	struct virtchnl_ether_addr_list *al =
3227 		(struct virtchnl_ether_addr_list *)msg;
3228 
3229 	ice_update_legacy_cached_mac(vf, &al->list[0]);
3230 
3231 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
3232 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3233 }
3234 
3235 static int
3236 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
3237 {
3238 	dev_dbg(ice_pf_to_dev(vf->pf),
3239 		"Can't config promiscuous mode in switchdev mode for VF %d\n",
3240 		vf->vf_id);
3241 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3242 				     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3243 				     NULL, 0);
3244 }
3245 
3246 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
3247 	.get_ver_msg = ice_vc_get_ver_msg,
3248 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3249 	.reset_vf = ice_vc_reset_vf_msg,
3250 	.add_mac_addr_msg = ice_vc_repr_add_mac,
3251 	.del_mac_addr_msg = ice_vc_repr_del_mac,
3252 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3253 	.ena_qs_msg = ice_vc_ena_qs_msg,
3254 	.dis_qs_msg = ice_vc_dis_qs_msg,
3255 	.request_qs_msg = ice_vc_request_qs_msg,
3256 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3257 	.config_rss_key = ice_vc_config_rss_key,
3258 	.config_rss_lut = ice_vc_config_rss_lut,
3259 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
3260 	.get_stats_msg = ice_vc_get_stats_msg,
3261 	.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
3262 	.add_vlan_msg = ice_vc_add_vlan_msg,
3263 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3264 	.query_rxdid = ice_vc_query_rxdid,
3265 	.get_rss_hashcfg = ice_vc_get_rss_hashcfg,
3266 	.set_rss_hashcfg = ice_vc_set_rss_hashcfg,
3267 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3268 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3269 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3270 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3271 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3272 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3273 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3274 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3275 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3276 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3277 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3278 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3279 	.get_qos_caps = ice_vc_get_qos_caps,
3280 	.cfg_q_bw = ice_vc_cfg_q_bw,
3281 	.cfg_q_quanta = ice_vc_cfg_q_quanta,
3282 	.get_ptp_cap = ice_vc_get_ptp_cap,
3283 	.get_phc_time = ice_vc_get_phc_time,
3284 };
3285 
3286 /**
3287  * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
3288  * @vf: the VF to switch ops
3289  */
3290 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
3291 {
3292 	vf->virtchnl_ops = &ice_virtchnl_repr_ops;
3293 }
3294 
3295 /**
3296  * ice_is_malicious_vf - check if this vf might be overflowing mailbox
3297  * @vf: the VF to check
3298  * @mbxdata: data about the state of the mailbox
3299  *
3300  * Detect if a given VF might be malicious and attempting to overflow the PF
3301  * mailbox. If so, log a warning message and ignore this event.
3302  */
3303 static bool
3304 ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
3305 {
3306 	bool report_malvf = false;
3307 	struct device *dev;
3308 	struct ice_pf *pf;
3309 	int status;
3310 
3311 	pf = vf->pf;
3312 	dev = ice_pf_to_dev(pf);
3313 
3314 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
3315 		return vf->mbx_info.malicious;
3316 
3317 	/* check to see if we have a newly malicious VF */
3318 	status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info,
3319 					  &report_malvf);
3320 	if (status)
3321 		dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n",
3322 				     vf->vf_id, vf->dev_lan_addr, status);
3323 
3324 	if (report_malvf) {
3325 		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3326 		u8 zero_addr[ETH_ALEN] = {};
3327 
3328 		dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
3329 			 vf->dev_lan_addr,
3330 			 pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr);
3331 	}
3332 
3333 	return vf->mbx_info.malicious;
3334 }
3335 
3336 /**
3337  * ice_vc_process_vf_msg - Process request from VF
3338  * @pf: pointer to the PF structure
3339  * @event: pointer to the AQ event
3340  * @mbxdata: information used to detect VF attempting mailbox overflow
3341  *
3342  * Called from the common asq/arq handler to process request from VF. When this
3343  * flow is used for devices with hardware VF to PF message queue overflow
3344  * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
3345  * check is skipped.
3346  */
3347 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
3348 			   struct ice_mbx_data *mbxdata)
3349 {
3350 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3351 	s16 vf_id = le16_to_cpu(event->desc.retval);
3352 	const struct ice_virtchnl_ops *ops;
3353 	u16 msglen = event->msg_len;
3354 	u8 *msg = event->msg_buf;
3355 	struct ice_vf *vf = NULL;
3356 	struct device *dev;
3357 	int err = 0;
3358 
3359 	dev = ice_pf_to_dev(pf);
3360 
3361 	vf = ice_get_vf_by_id(pf, vf_id);
3362 	if (!vf) {
3363 		dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
3364 			vf_id, v_opcode, msglen);
3365 		return;
3366 	}
3367 
3368 	mutex_lock(&vf->cfg_lock);
3369 
3370 	/* Check if the VF is trying to overflow the mailbox */
3371 	if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
3372 		goto finish;
3373 
3374 	/* Check if VF is disabled. */
3375 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3376 		err = -EPERM;
3377 		goto error_handler;
3378 	}
3379 
3380 	ops = vf->virtchnl_ops;
3381 
3382 	/* Perform basic checks on the msg */
3383 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3384 	if (err) {
3385 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
3386 			err = -EPERM;
3387 		else
3388 			err = -EINVAL;
3389 	}
3390 
3391 error_handler:
3392 	if (err) {
3393 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3394 				      NULL, 0);
3395 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3396 			vf_id, v_opcode, msglen, err);
3397 		goto finish;
3398 	}
3399 
3400 	if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
3401 		ice_vc_send_msg_to_vf(vf, v_opcode,
3402 				      VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
3403 				      0);
3404 		goto finish;
3405 	}
3406 
3407 	switch (v_opcode) {
3408 	case VIRTCHNL_OP_VERSION:
3409 		err = ops->get_ver_msg(vf, msg);
3410 		break;
3411 	case VIRTCHNL_OP_GET_VF_RESOURCES:
3412 		err = ops->get_vf_res_msg(vf, msg);
3413 		if (ice_vf_init_vlan_stripping(vf))
3414 			dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
3415 				vf->vf_id);
3416 		ice_vc_notify_vf_link_state(vf);
3417 		break;
3418 	case VIRTCHNL_OP_RESET_VF:
3419 		ops->reset_vf(vf);
3420 		break;
3421 	case VIRTCHNL_OP_ADD_ETH_ADDR:
3422 		err = ops->add_mac_addr_msg(vf, msg);
3423 		break;
3424 	case VIRTCHNL_OP_DEL_ETH_ADDR:
3425 		err = ops->del_mac_addr_msg(vf, msg);
3426 		break;
3427 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3428 		err = ops->cfg_qs_msg(vf, msg);
3429 		break;
3430 	case VIRTCHNL_OP_ENABLE_QUEUES:
3431 		err = ops->ena_qs_msg(vf, msg);
3432 		ice_vc_notify_vf_link_state(vf);
3433 		break;
3434 	case VIRTCHNL_OP_DISABLE_QUEUES:
3435 		err = ops->dis_qs_msg(vf, msg);
3436 		break;
3437 	case VIRTCHNL_OP_REQUEST_QUEUES:
3438 		err = ops->request_qs_msg(vf, msg);
3439 		break;
3440 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3441 		err = ops->cfg_irq_map_msg(vf, msg);
3442 		break;
3443 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3444 		err = ops->config_rss_key(vf, msg);
3445 		break;
3446 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3447 		err = ops->config_rss_lut(vf, msg);
3448 		break;
3449 	case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
3450 		err = ops->config_rss_hfunc(vf, msg);
3451 		break;
3452 	case VIRTCHNL_OP_GET_STATS:
3453 		err = ops->get_stats_msg(vf, msg);
3454 		break;
3455 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3456 		err = ops->cfg_promiscuous_mode_msg(vf, msg);
3457 		break;
3458 	case VIRTCHNL_OP_ADD_VLAN:
3459 		err = ops->add_vlan_msg(vf, msg);
3460 		break;
3461 	case VIRTCHNL_OP_DEL_VLAN:
3462 		err = ops->remove_vlan_msg(vf, msg);
3463 		break;
3464 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
3465 		err = ops->query_rxdid(vf);
3466 		break;
3467 	case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
3468 		err = ops->get_rss_hashcfg(vf);
3469 		break;
3470 	case VIRTCHNL_OP_SET_RSS_HASHCFG:
3471 		err = ops->set_rss_hashcfg(vf, msg);
3472 		break;
3473 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3474 		err = ops->ena_vlan_stripping(vf);
3475 		break;
3476 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3477 		err = ops->dis_vlan_stripping(vf);
3478 		break;
3479 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
3480 		err = ops->add_fdir_fltr_msg(vf, msg);
3481 		break;
3482 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
3483 		err = ops->del_fdir_fltr_msg(vf, msg);
3484 		break;
3485 	case VIRTCHNL_OP_ADD_RSS_CFG:
3486 		err = ops->handle_rss_cfg_msg(vf, msg, true);
3487 		break;
3488 	case VIRTCHNL_OP_DEL_RSS_CFG:
3489 		err = ops->handle_rss_cfg_msg(vf, msg, false);
3490 		break;
3491 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
3492 		err = ops->get_offload_vlan_v2_caps(vf);
3493 		break;
3494 	case VIRTCHNL_OP_ADD_VLAN_V2:
3495 		err = ops->add_vlan_v2_msg(vf, msg);
3496 		break;
3497 	case VIRTCHNL_OP_DEL_VLAN_V2:
3498 		err = ops->remove_vlan_v2_msg(vf, msg);
3499 		break;
3500 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
3501 		err = ops->ena_vlan_stripping_v2_msg(vf, msg);
3502 		break;
3503 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
3504 		err = ops->dis_vlan_stripping_v2_msg(vf, msg);
3505 		break;
3506 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
3507 		err = ops->ena_vlan_insertion_v2_msg(vf, msg);
3508 		break;
3509 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
3510 		err = ops->dis_vlan_insertion_v2_msg(vf, msg);
3511 		break;
3512 	case VIRTCHNL_OP_GET_QOS_CAPS:
3513 		err = ops->get_qos_caps(vf);
3514 		break;
3515 	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
3516 		err = ops->cfg_q_bw(vf, msg);
3517 		break;
3518 	case VIRTCHNL_OP_CONFIG_QUANTA:
3519 		err = ops->cfg_q_quanta(vf, msg);
3520 		break;
3521 	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
3522 		err = ops->get_ptp_cap(vf, (const void *)msg);
3523 		break;
3524 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
3525 		err = ops->get_phc_time(vf);
3526 		break;
3527 	case VIRTCHNL_OP_UNKNOWN:
3528 	default:
3529 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3530 			vf_id);
3531 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
3532 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3533 					    NULL, 0);
3534 		break;
3535 	}
3536 	if (err) {
3537 		/* Helper function cares less about error return values here
3538 		 * as it is busy with pending work.
3539 		 */
3540 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3541 			 vf_id, v_opcode, err);
3542 	}
3543 
3544 finish:
3545 	mutex_unlock(&vf->cfg_lock);
3546 	ice_put_vf(vf);
3547 }
3548