xref: /linux/drivers/net/ethernet/intel/ice/virt/rss.c (revision 4c2ce64efd0df8bf209c7c3bcb85ae4a62c14be6)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 
7 #define FIELD_SELECTOR(proto_hdr_field) \
8 		BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
9 
10 struct ice_vc_hdr_match_type {
11 	u32 vc_hdr;	/* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
12 	u32 ice_hdr;	/* ice headers (ICE_FLOW_SEG_HDR_XXX) */
13 };
14 
15 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
16 	{VIRTCHNL_PROTO_HDR_NONE,	ICE_FLOW_SEG_HDR_NONE},
17 	{VIRTCHNL_PROTO_HDR_ETH,	ICE_FLOW_SEG_HDR_ETH},
18 	{VIRTCHNL_PROTO_HDR_S_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
19 	{VIRTCHNL_PROTO_HDR_C_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
20 	{VIRTCHNL_PROTO_HDR_IPV4,	ICE_FLOW_SEG_HDR_IPV4 |
21 					ICE_FLOW_SEG_HDR_IPV_OTHER},
22 	{VIRTCHNL_PROTO_HDR_IPV6,	ICE_FLOW_SEG_HDR_IPV6 |
23 					ICE_FLOW_SEG_HDR_IPV_OTHER},
24 	{VIRTCHNL_PROTO_HDR_TCP,	ICE_FLOW_SEG_HDR_TCP},
25 	{VIRTCHNL_PROTO_HDR_UDP,	ICE_FLOW_SEG_HDR_UDP},
26 	{VIRTCHNL_PROTO_HDR_SCTP,	ICE_FLOW_SEG_HDR_SCTP},
27 	{VIRTCHNL_PROTO_HDR_PPPOE,	ICE_FLOW_SEG_HDR_PPPOE},
28 	{VIRTCHNL_PROTO_HDR_GTPU_IP,	ICE_FLOW_SEG_HDR_GTPU_IP},
29 	{VIRTCHNL_PROTO_HDR_GTPU_EH,	ICE_FLOW_SEG_HDR_GTPU_EH},
30 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
31 					ICE_FLOW_SEG_HDR_GTPU_DWN},
32 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
33 					ICE_FLOW_SEG_HDR_GTPU_UP},
34 	{VIRTCHNL_PROTO_HDR_L2TPV3,	ICE_FLOW_SEG_HDR_L2TPV3},
35 	{VIRTCHNL_PROTO_HDR_ESP,	ICE_FLOW_SEG_HDR_ESP},
36 	{VIRTCHNL_PROTO_HDR_AH,		ICE_FLOW_SEG_HDR_AH},
37 	{VIRTCHNL_PROTO_HDR_PFCP,	ICE_FLOW_SEG_HDR_PFCP_SESSION},
38 };
39 
40 struct ice_vc_hash_field_match_type {
41 	u32 vc_hdr;		/* virtchnl headers
42 				 * (VIRTCHNL_PROTO_HDR_XXX)
43 				 */
44 	u32 vc_hash_field;	/* virtchnl hash fields selector
45 				 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
46 				 */
47 	u64 ice_hash_field;	/* ice hash fields
48 				 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
49 				 */
50 };
51 
52 static const struct
53 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
54 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
55 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
56 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
57 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
58 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
59 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
60 		ICE_FLOW_HASH_ETH},
61 	{VIRTCHNL_PROTO_HDR_ETH,
62 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
63 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
64 	{VIRTCHNL_PROTO_HDR_S_VLAN,
65 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
66 		BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
67 	{VIRTCHNL_PROTO_HDR_C_VLAN,
68 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
69 		BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
70 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
71 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
72 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
73 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
74 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
75 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
76 		ICE_FLOW_HASH_IPV4},
77 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
78 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
79 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
80 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
81 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
82 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
83 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
84 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
85 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
86 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
87 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
88 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
89 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
90 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
91 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
92 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
93 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
94 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
95 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
96 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
97 		ICE_FLOW_HASH_IPV6},
98 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
99 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
100 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
101 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
102 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
103 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
104 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
105 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
106 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
107 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
108 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
109 		ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
110 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
111 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
112 	{VIRTCHNL_PROTO_HDR_TCP,
113 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
114 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
115 	{VIRTCHNL_PROTO_HDR_TCP,
116 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
117 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
118 	{VIRTCHNL_PROTO_HDR_TCP,
119 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
120 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
121 		ICE_FLOW_HASH_TCP_PORT},
122 	{VIRTCHNL_PROTO_HDR_UDP,
123 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
124 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
125 	{VIRTCHNL_PROTO_HDR_UDP,
126 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
127 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
128 	{VIRTCHNL_PROTO_HDR_UDP,
129 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
130 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
131 		ICE_FLOW_HASH_UDP_PORT},
132 	{VIRTCHNL_PROTO_HDR_SCTP,
133 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
134 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
135 	{VIRTCHNL_PROTO_HDR_SCTP,
136 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
137 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
138 	{VIRTCHNL_PROTO_HDR_SCTP,
139 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
140 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
141 		ICE_FLOW_HASH_SCTP_PORT},
142 	{VIRTCHNL_PROTO_HDR_PPPOE,
143 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
144 		BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
145 	{VIRTCHNL_PROTO_HDR_GTPU_IP,
146 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
147 		BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
148 	{VIRTCHNL_PROTO_HDR_L2TPV3,
149 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
150 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
151 	{VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
152 		BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
153 	{VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
154 		BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
155 	{VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
156 		BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
157 };
158 
159 /**
160  * ice_vc_validate_pattern
161  * @vf: pointer to the VF info
162  * @proto: virtchnl protocol headers
163  *
164  * validate the pattern is supported or not.
165  *
166  * Return: true on success, false on error.
167  */
168 bool
169 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
170 {
171 	bool is_ipv4 = false;
172 	bool is_ipv6 = false;
173 	bool is_udp = false;
174 	u16 ptype = -1;
175 	int i = 0;
176 
177 	while (i < proto->count &&
178 	       proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
179 		switch (proto->proto_hdr[i].type) {
180 		case VIRTCHNL_PROTO_HDR_ETH:
181 			ptype = ICE_PTYPE_MAC_PAY;
182 			break;
183 		case VIRTCHNL_PROTO_HDR_IPV4:
184 			ptype = ICE_PTYPE_IPV4_PAY;
185 			is_ipv4 = true;
186 			break;
187 		case VIRTCHNL_PROTO_HDR_IPV6:
188 			ptype = ICE_PTYPE_IPV6_PAY;
189 			is_ipv6 = true;
190 			break;
191 		case VIRTCHNL_PROTO_HDR_UDP:
192 			if (is_ipv4)
193 				ptype = ICE_PTYPE_IPV4_UDP_PAY;
194 			else if (is_ipv6)
195 				ptype = ICE_PTYPE_IPV6_UDP_PAY;
196 			is_udp = true;
197 			break;
198 		case VIRTCHNL_PROTO_HDR_TCP:
199 			if (is_ipv4)
200 				ptype = ICE_PTYPE_IPV4_TCP_PAY;
201 			else if (is_ipv6)
202 				ptype = ICE_PTYPE_IPV6_TCP_PAY;
203 			break;
204 		case VIRTCHNL_PROTO_HDR_SCTP:
205 			if (is_ipv4)
206 				ptype = ICE_PTYPE_IPV4_SCTP_PAY;
207 			else if (is_ipv6)
208 				ptype = ICE_PTYPE_IPV6_SCTP_PAY;
209 			break;
210 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
211 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
212 			if (is_ipv4)
213 				ptype = ICE_MAC_IPV4_GTPU;
214 			else if (is_ipv6)
215 				ptype = ICE_MAC_IPV6_GTPU;
216 			goto out;
217 		case VIRTCHNL_PROTO_HDR_L2TPV3:
218 			if (is_ipv4)
219 				ptype = ICE_MAC_IPV4_L2TPV3;
220 			else if (is_ipv6)
221 				ptype = ICE_MAC_IPV6_L2TPV3;
222 			goto out;
223 		case VIRTCHNL_PROTO_HDR_ESP:
224 			if (is_ipv4)
225 				ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
226 						ICE_MAC_IPV4_ESP;
227 			else if (is_ipv6)
228 				ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
229 						ICE_MAC_IPV6_ESP;
230 			goto out;
231 		case VIRTCHNL_PROTO_HDR_AH:
232 			if (is_ipv4)
233 				ptype = ICE_MAC_IPV4_AH;
234 			else if (is_ipv6)
235 				ptype = ICE_MAC_IPV6_AH;
236 			goto out;
237 		case VIRTCHNL_PROTO_HDR_PFCP:
238 			if (is_ipv4)
239 				ptype = ICE_MAC_IPV4_PFCP_SESSION;
240 			else if (is_ipv6)
241 				ptype = ICE_MAC_IPV6_PFCP_SESSION;
242 			goto out;
243 		default:
244 			break;
245 		}
246 		i++;
247 	}
248 
249 out:
250 	return ice_hw_ptype_ena(&vf->pf->hw, ptype);
251 }
252 
253 /**
254  * ice_vc_parse_rss_cfg - parses hash fields and headers from
255  * a specific virtchnl RSS cfg
256  * @hw: pointer to the hardware
257  * @rss_cfg: pointer to the virtchnl RSS cfg
258  * @hash_cfg: pointer to the HW hash configuration
259  *
260  * Return true if all the protocol header and hash fields in the RSS cfg could
261  * be parsed, else return false
262  *
263  * This function parses the virtchnl RSS cfg to be the intended
264  * hash fields and the intended header for RSS configuration
265  */
266 static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
267 				 struct virtchnl_rss_cfg *rss_cfg,
268 				 struct ice_rss_hash_cfg *hash_cfg)
269 {
270 	const struct ice_vc_hash_field_match_type *hf_list;
271 	const struct ice_vc_hdr_match_type *hdr_list;
272 	int i, hf_list_len, hdr_list_len;
273 	u32 *addl_hdrs = &hash_cfg->addl_hdrs;
274 	u64 *hash_flds = &hash_cfg->hash_flds;
275 
276 	/* set outer layer RSS as default */
277 	hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
278 
279 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
280 		hash_cfg->symm = true;
281 	else
282 		hash_cfg->symm = false;
283 
284 	hf_list = ice_vc_hash_field_list;
285 	hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
286 	hdr_list = ice_vc_hdr_list;
287 	hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
288 
289 	for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
290 		struct virtchnl_proto_hdr *proto_hdr =
291 					&rss_cfg->proto_hdrs.proto_hdr[i];
292 		bool hdr_found = false;
293 		int j;
294 
295 		/* Find matched ice headers according to virtchnl headers. */
296 		for (j = 0; j < hdr_list_len; j++) {
297 			struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
298 
299 			if (proto_hdr->type == hdr_map.vc_hdr) {
300 				*addl_hdrs |= hdr_map.ice_hdr;
301 				hdr_found = true;
302 			}
303 		}
304 
305 		if (!hdr_found)
306 			return false;
307 
308 		/* Find matched ice hash fields according to
309 		 * virtchnl hash fields.
310 		 */
311 		for (j = 0; j < hf_list_len; j++) {
312 			struct ice_vc_hash_field_match_type hf_map = hf_list[j];
313 
314 			if (proto_hdr->type == hf_map.vc_hdr &&
315 			    proto_hdr->field_selector == hf_map.vc_hash_field) {
316 				*hash_flds |= hf_map.ice_hash_field;
317 				break;
318 			}
319 		}
320 	}
321 
322 	return true;
323 }
324 
325 /**
326  * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
327  * RSS offloads
328  * @caps: VF driver negotiated capabilities
329  *
330  * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
331  * else return false
332  */
333 static bool ice_vf_adv_rss_offload_ena(u32 caps)
334 {
335 	return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
336 }
337 
338 /**
339  * ice_vc_handle_rss_cfg
340  * @vf: pointer to the VF info
341  * @msg: pointer to the message buffer
342  * @add: add a RSS config if true, otherwise delete a RSS config
343  *
344  * This function adds/deletes a RSS config
345  */
346 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
347 {
348 	u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
349 	struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
350 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
351 	struct device *dev = ice_pf_to_dev(vf->pf);
352 	struct ice_hw *hw = &vf->pf->hw;
353 	struct ice_vsi *vsi;
354 
355 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
356 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
357 			vf->vf_id);
358 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
359 		goto error_param;
360 	}
361 
362 	if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
363 		dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
364 			vf->vf_id);
365 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
366 		goto error_param;
367 	}
368 
369 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
370 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
371 		goto error_param;
372 	}
373 
374 	if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
375 	    rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
376 	    rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
377 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
378 			vf->vf_id);
379 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
380 		goto error_param;
381 	}
382 
383 	vsi = ice_get_vf_vsi(vf);
384 	if (!vsi) {
385 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
386 		goto error_param;
387 	}
388 
389 	if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
390 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
391 		goto error_param;
392 	}
393 
394 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
395 		struct ice_vsi_ctx *ctx;
396 		u8 lut_type, hash_type;
397 		int status;
398 
399 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
400 		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
401 				ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
402 
403 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
404 		if (!ctx) {
405 			v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
406 			goto error_param;
407 		}
408 
409 		ctx->info.q_opt_rss =
410 			FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
411 			FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
412 
413 		/* Preserve existing queueing option setting */
414 		ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
415 					  ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
416 		ctx->info.q_opt_tc = vsi->info.q_opt_tc;
417 		ctx->info.q_opt_flags = vsi->info.q_opt_rss;
418 
419 		ctx->info.valid_sections =
420 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
421 
422 		status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
423 		if (status) {
424 			dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
425 				status, libie_aq_str(hw->adminq.sq_last_status));
426 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
427 		} else {
428 			vsi->info.q_opt_rss = ctx->info.q_opt_rss;
429 		}
430 
431 		kfree(ctx);
432 	} else {
433 		struct ice_rss_hash_cfg cfg;
434 
435 		/* Only check for none raw pattern case */
436 		if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
437 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
438 			goto error_param;
439 		}
440 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
441 		cfg.hash_flds = ICE_HASH_INVALID;
442 		cfg.hdr_type = ICE_RSS_ANY_HEADERS;
443 
444 		if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
445 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
446 			goto error_param;
447 		}
448 
449 		if (add) {
450 			if (ice_add_rss_cfg(hw, vsi, &cfg)) {
451 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
452 				dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
453 					vsi->vsi_num, v_ret);
454 			}
455 		} else {
456 			int status;
457 
458 			status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
459 			/* We just ignore -ENOENT, because if two configurations
460 			 * share the same profile remove one of them actually
461 			 * removes both, since the profile is deleted.
462 			 */
463 			if (status && status != -ENOENT) {
464 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
465 				dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
466 					vf->vf_id, status);
467 			}
468 		}
469 	}
470 
471 error_param:
472 	return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
473 }
474 
475 /**
476  * ice_vc_config_rss_key
477  * @vf: pointer to the VF info
478  * @msg: pointer to the msg buffer
479  *
480  * Configure the VF's RSS key
481  */
482 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
483 {
484 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
485 	struct virtchnl_rss_key *vrk =
486 		(struct virtchnl_rss_key *)msg;
487 	struct ice_vsi *vsi;
488 
489 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
490 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
491 		goto error_param;
492 	}
493 
494 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
495 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
496 		goto error_param;
497 	}
498 
499 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
500 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
501 		goto error_param;
502 	}
503 
504 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
505 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
506 		goto error_param;
507 	}
508 
509 	vsi = ice_get_vf_vsi(vf);
510 	if (!vsi) {
511 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
512 		goto error_param;
513 	}
514 
515 	if (ice_set_rss_key(vsi, vrk->key))
516 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
517 error_param:
518 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
519 				     NULL, 0);
520 }
521 
522 /**
523  * ice_vc_config_rss_lut
524  * @vf: pointer to the VF info
525  * @msg: pointer to the msg buffer
526  *
527  * Configure the VF's RSS LUT
528  */
529 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
530 {
531 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
532 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
533 	struct ice_vsi *vsi;
534 
535 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
536 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
537 		goto error_param;
538 	}
539 
540 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
541 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
542 		goto error_param;
543 	}
544 
545 	if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
546 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
547 		goto error_param;
548 	}
549 
550 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
551 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
552 		goto error_param;
553 	}
554 
555 	vsi = ice_get_vf_vsi(vf);
556 	if (!vsi) {
557 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
558 		goto error_param;
559 	}
560 
561 	if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
562 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
563 error_param:
564 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
565 				     NULL, 0);
566 }
567 
568 /**
569  * ice_vc_config_rss_hfunc
570  * @vf: pointer to the VF info
571  * @msg: pointer to the msg buffer
572  *
573  * Configure the VF's RSS Hash function
574  */
575 static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
576 {
577 	struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
578 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
579 	u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
580 	struct ice_vsi *vsi;
581 
582 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
583 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
584 		goto error_param;
585 	}
586 
587 	if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
588 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
589 		goto error_param;
590 	}
591 
592 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
593 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
594 		goto error_param;
595 	}
596 
597 	vsi = ice_get_vf_vsi(vf);
598 	if (!vsi) {
599 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
600 		goto error_param;
601 	}
602 
603 	if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
604 		hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
605 
606 	if (ice_set_rss_hfunc(vsi, hfunc))
607 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
608 error_param:
609 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
610 				     NULL, 0);
611 }
612 
613 /**
614  * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
615  * @vf: pointer to the VF info
616  */
617 static int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
618 {
619 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
620 	struct virtchnl_rss_hashcfg *vrh = NULL;
621 	int len = 0, ret;
622 
623 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
624 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
625 		goto err;
626 	}
627 
628 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
629 		dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
630 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
631 		goto err;
632 	}
633 
634 	len = sizeof(struct virtchnl_rss_hashcfg);
635 	vrh = kzalloc(len, GFP_KERNEL);
636 	if (!vrh) {
637 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
638 		len = 0;
639 		goto err;
640 	}
641 
642 	vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
643 err:
644 	/* send the response back to the VF */
645 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
646 				    (u8 *)vrh, len);
647 	kfree(vrh);
648 	return ret;
649 }
650 
651 /**
652  * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
653  * @vf: pointer to the VF info
654  * @msg: pointer to the msg buffer
655  */
656 static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
657 {
658 	struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
659 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
660 	struct ice_pf *pf = vf->pf;
661 	struct ice_vsi *vsi;
662 	struct device *dev;
663 	int status;
664 
665 	dev = ice_pf_to_dev(pf);
666 
667 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
668 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
669 		goto err;
670 	}
671 
672 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
673 		dev_err(dev, "RSS not supported by PF\n");
674 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
675 		goto err;
676 	}
677 
678 	vsi = ice_get_vf_vsi(vf);
679 	if (!vsi) {
680 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
681 		goto err;
682 	}
683 
684 	/* clear all previously programmed RSS configuration to allow VF drivers
685 	 * the ability to customize the RSS configuration and/or completely
686 	 * disable RSS
687 	 */
688 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
689 	if (status && !vrh->hashcfg) {
690 		/* only report failure to clear the current RSS configuration if
691 		 * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
692 		 */
693 		v_ret = ice_err_to_virt_err(status);
694 		goto err;
695 	} else if (status) {
696 		/* allow the VF to update the RSS configuration even on failure
697 		 * to clear the current RSS confguration in an attempt to keep
698 		 * RSS in a working state
699 		 */
700 		dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
701 			 vf->vf_id);
702 	}
703 
704 	if (vrh->hashcfg) {
705 		status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
706 		v_ret = ice_err_to_virt_err(status);
707 	}
708 
709 	/* save the requested VF configuration */
710 	if (!v_ret)
711 		vf->rss_hashcfg = vrh->hashcfg;
712 
713 	/* send the response to the VF */
714 err:
715 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
716 				     NULL, 0);
717 }
718 
719 /**
720  * ice_vc_query_rxdid - query RXDID supported by DDP package
721  * @vf: pointer to VF info
722  *
723  * Called from VF to query a bitmap of supported flexible
724  * descriptor RXDIDs of a DDP package.
725  */
726 static int ice_vc_query_rxdid(struct ice_vf *vf)
727 {
728 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
729 	struct ice_pf *pf = vf->pf;
730 	u64 rxdid;
731 
732 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
733 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
734 		goto err;
735 	}
736 
737 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
738 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
739 		goto err;
740 	}
741 
742 	rxdid = pf->supported_rxdids;
743 
744 err:
745 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
746 				     v_ret, (u8 *)&rxdid, sizeof(rxdid));
747 }
748 
749 /**
750  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
751  * @vf: VF to enable/disable VLAN stripping for on initialization
752  *
753  * Set the default for VLAN stripping based on whether a port VLAN is configured
754  * and the current VLAN mode of the device.
755  */
756 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
757 {
758 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
759 
760 	vf->vlan_strip_ena = 0;
761 
762 	if (!vsi)
763 		return -EINVAL;
764 
765 	/* don't modify stripping if port VLAN is configured in SVM since the
766 	 * port VLAN is based on the inner/single VLAN in SVM
767 	 */
768 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
769 		return 0;
770 
771 	if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
772 		int err;
773 
774 		err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
775 		if (!err)
776 			vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
777 		return err;
778 	}
779 
780 	return vsi->inner_vlan_ops.dis_stripping(vsi);
781 }
782 
783 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
784 {
785 	if (vf->trusted)
786 		return VLAN_N_VID;
787 	else
788 		return ICE_MAX_VLAN_PER_VF;
789 }
790 
791 /**
792  * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
793  * @vf: VF that being checked for
794  *
795  * When the device is in double VLAN mode, check whether or not the outer VLAN
796  * is allowed.
797  */
798 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
799 {
800 	if (ice_vf_is_port_vlan_ena(vf))
801 		return true;
802 
803 	return false;
804 }
805 
806 /**
807  * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
808  * @vf: VF that capabilities are being set for
809  * @caps: VLAN capabilities to populate
810  *
811  * Determine VLAN capabilities support based on whether a port VLAN is
812  * configured. If a port VLAN is configured then the VF should use the inner
813  * filtering/offload capabilities since the port VLAN is using the outer VLAN
814  * capabilies.
815  */
816 static void
817 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
818 {
819 	struct virtchnl_vlan_supported_caps *supported_caps;
820 
821 	if (ice_vf_outer_vlan_not_allowed(vf)) {
822 		/* until support for inner VLAN filtering is added when a port
823 		 * VLAN is configured, only support software offloaded inner
824 		 * VLANs when a port VLAN is confgured in DVM
825 		 */
826 		supported_caps = &caps->filtering.filtering_support;
827 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
828 
829 		supported_caps = &caps->offloads.stripping_support;
830 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
831 					VIRTCHNL_VLAN_TOGGLE |
832 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
833 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
834 
835 		supported_caps = &caps->offloads.insertion_support;
836 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
837 					VIRTCHNL_VLAN_TOGGLE |
838 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
839 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
840 
841 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
842 		caps->offloads.ethertype_match =
843 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
844 	} else {
845 		supported_caps = &caps->filtering.filtering_support;
846 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
847 		supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
848 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
849 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
850 					VIRTCHNL_VLAN_ETHERTYPE_AND;
851 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
852 						 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
853 						 VIRTCHNL_VLAN_ETHERTYPE_9100;
854 
855 		supported_caps = &caps->offloads.stripping_support;
856 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
857 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
858 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
859 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
860 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
861 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
862 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
863 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
864 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
865 
866 		supported_caps = &caps->offloads.insertion_support;
867 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
868 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
869 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
870 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
871 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
872 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
873 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
874 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
875 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
876 
877 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
878 
879 		caps->offloads.ethertype_match =
880 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
881 	}
882 
883 	caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
884 }
885 
886 /**
887  * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
888  * @vf: VF that capabilities are being set for
889  * @caps: VLAN capabilities to populate
890  *
891  * Determine VLAN capabilities support based on whether a port VLAN is
892  * configured. If a port VLAN is configured then the VF does not have any VLAN
893  * filtering or offload capabilities since the port VLAN is using the inner VLAN
894  * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
895  * VLAN fitlering and offload capabilities.
896  */
897 static void
898 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
899 {
900 	struct virtchnl_vlan_supported_caps *supported_caps;
901 
902 	if (ice_vf_is_port_vlan_ena(vf)) {
903 		supported_caps = &caps->filtering.filtering_support;
904 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
905 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
906 
907 		supported_caps = &caps->offloads.stripping_support;
908 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
909 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
910 
911 		supported_caps = &caps->offloads.insertion_support;
912 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
913 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
914 
915 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
916 		caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
917 		caps->filtering.max_filters = 0;
918 	} else {
919 		supported_caps = &caps->filtering.filtering_support;
920 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
921 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
922 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
923 
924 		supported_caps = &caps->offloads.stripping_support;
925 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
926 					VIRTCHNL_VLAN_TOGGLE |
927 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
928 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
929 
930 		supported_caps = &caps->offloads.insertion_support;
931 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
932 					VIRTCHNL_VLAN_TOGGLE |
933 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
934 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
935 
936 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
937 		caps->offloads.ethertype_match =
938 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
939 		caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
940 	}
941 }
942 
943 /**
944  * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
945  * @vf: VF to determine VLAN capabilities for
946  *
947  * This will only be called if the VF and PF successfully negotiated
948  * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
949  *
950  * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
951  * is configured or not.
952  */
953 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
954 {
955 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
956 	struct virtchnl_vlan_caps *caps = NULL;
957 	int err, len = 0;
958 
959 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
960 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
961 		goto out;
962 	}
963 
964 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
965 	if (!caps) {
966 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
967 		goto out;
968 	}
969 	len = sizeof(*caps);
970 
971 	if (ice_is_dvm_ena(&vf->pf->hw))
972 		ice_vc_set_dvm_caps(vf, caps);
973 	else
974 		ice_vc_set_svm_caps(vf, caps);
975 
976 	/* store negotiated caps to prevent invalid VF messages */
977 	memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
978 
979 out:
980 	err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
981 				    v_ret, (u8 *)caps, len);
982 	kfree(caps);
983 	return err;
984 }
985 
986 /**
987  * ice_vc_validate_vlan_tpid - validate VLAN TPID
988  * @filtering_caps: negotiated/supported VLAN filtering capabilities
989  * @tpid: VLAN TPID used for validation
990  *
991  * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
992  * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
993  */
994 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
995 {
996 	enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
997 
998 	switch (tpid) {
999 	case ETH_P_8021Q:
1000 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
1001 		break;
1002 	case ETH_P_8021AD:
1003 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
1004 		break;
1005 	case ETH_P_QINQ1:
1006 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
1007 		break;
1008 	}
1009 
1010 	if (!(filtering_caps & vlan_ethertype))
1011 		return false;
1012 
1013 	return true;
1014 }
1015 
1016 /**
1017  * ice_vc_is_valid_vlan - validate the virtchnl_vlan
1018  * @vc_vlan: virtchnl_vlan to validate
1019  *
1020  * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
1021  * false. Otherwise return true.
1022  */
1023 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
1024 {
1025 	if (!vc_vlan->tci || !vc_vlan->tpid)
1026 		return false;
1027 
1028 	return true;
1029 }
1030 
1031 /**
1032  * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
1033  * @vfc: negotiated/supported VLAN filtering capabilities
1034  * @vfl: VLAN filter list from VF to validate
1035  *
1036  * Validate all of the filters in the VLAN filter list from the VF. If any of
1037  * the checks fail then return false. Otherwise return true.
1038  */
1039 static bool
1040 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
1041 				 struct virtchnl_vlan_filter_list_v2 *vfl)
1042 {
1043 	u16 i;
1044 
1045 	if (!vfl->num_elements)
1046 		return false;
1047 
1048 	for (i = 0; i < vfl->num_elements; i++) {
1049 		struct virtchnl_vlan_supported_caps *filtering_support =
1050 			&vfc->filtering_support;
1051 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
1052 		struct virtchnl_vlan *outer = &vlan_fltr->outer;
1053 		struct virtchnl_vlan *inner = &vlan_fltr->inner;
1054 
1055 		if ((ice_vc_is_valid_vlan(outer) &&
1056 		     filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
1057 		    (ice_vc_is_valid_vlan(inner) &&
1058 		     filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
1059 			return false;
1060 
1061 		if ((outer->tci_mask &&
1062 		     !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
1063 		    (inner->tci_mask &&
1064 		     !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
1065 			return false;
1066 
1067 		if (((outer->tci & VLAN_PRIO_MASK) &&
1068 		     !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
1069 		    ((inner->tci & VLAN_PRIO_MASK) &&
1070 		     !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
1071 			return false;
1072 
1073 		if ((ice_vc_is_valid_vlan(outer) &&
1074 		     !ice_vc_validate_vlan_tpid(filtering_support->outer,
1075 						outer->tpid)) ||
1076 		    (ice_vc_is_valid_vlan(inner) &&
1077 		     !ice_vc_validate_vlan_tpid(filtering_support->inner,
1078 						inner->tpid)))
1079 			return false;
1080 	}
1081 
1082 	return true;
1083 }
1084 
1085 /**
1086  * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
1087  * @vc_vlan: struct virtchnl_vlan to transform
1088  */
1089 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
1090 {
1091 	struct ice_vlan vlan = { 0 };
1092 
1093 	vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
1094 	vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
1095 	vlan.tpid = vc_vlan->tpid;
1096 
1097 	return vlan;
1098 }
1099 
1100 /**
1101  * ice_vc_vlan_action - action to perform on the virthcnl_vlan
1102  * @vsi: VF's VSI used to perform the action
1103  * @vlan_action: function to perform the action with (i.e. add/del)
1104  * @vlan: VLAN filter to perform the action with
1105  */
1106 static int
1107 ice_vc_vlan_action(struct ice_vsi *vsi,
1108 		   int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
1109 		   struct ice_vlan *vlan)
1110 {
1111 	int err;
1112 
1113 	err = vlan_action(vsi, vlan);
1114 	if (err)
1115 		return err;
1116 
1117 	return 0;
1118 }
1119 
1120 /**
1121  * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
1122  * @vf: VF used to delete the VLAN(s)
1123  * @vsi: VF's VSI used to delete the VLAN(s)
1124  * @vfl: virthchnl filter list used to delete the filters
1125  */
1126 static int
1127 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
1128 		 struct virtchnl_vlan_filter_list_v2 *vfl)
1129 {
1130 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
1131 	int err;
1132 	u16 i;
1133 
1134 	for (i = 0; i < vfl->num_elements; i++) {
1135 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
1136 		struct virtchnl_vlan *vc_vlan;
1137 
1138 		vc_vlan = &vlan_fltr->outer;
1139 		if (ice_vc_is_valid_vlan(vc_vlan)) {
1140 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1141 
1142 			err = ice_vc_vlan_action(vsi,
1143 						 vsi->outer_vlan_ops.del_vlan,
1144 						 &vlan);
1145 			if (err)
1146 				return err;
1147 
1148 			if (vlan_promisc)
1149 				ice_vf_dis_vlan_promisc(vsi, &vlan);
1150 
1151 			/* Disable VLAN filtering when only VLAN 0 is left */
1152 			if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
1153 				err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
1154 				if (err)
1155 					return err;
1156 			}
1157 		}
1158 
1159 		vc_vlan = &vlan_fltr->inner;
1160 		if (ice_vc_is_valid_vlan(vc_vlan)) {
1161 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1162 
1163 			err = ice_vc_vlan_action(vsi,
1164 						 vsi->inner_vlan_ops.del_vlan,
1165 						 &vlan);
1166 			if (err)
1167 				return err;
1168 
1169 			/* no support for VLAN promiscuous on inner VLAN unless
1170 			 * we are in Single VLAN Mode (SVM)
1171 			 */
1172 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
1173 				if (vlan_promisc)
1174 					ice_vf_dis_vlan_promisc(vsi, &vlan);
1175 
1176 				/* Disable VLAN filtering when only VLAN 0 is left */
1177 				if (!ice_vsi_has_non_zero_vlans(vsi)) {
1178 					err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
1179 					if (err)
1180 						return err;
1181 				}
1182 			}
1183 		}
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 /**
1190  * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
1191  * @vf: VF the message was received from
1192  * @msg: message received from the VF
1193  */
1194 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
1195 {
1196 	struct virtchnl_vlan_filter_list_v2 *vfl =
1197 		(struct virtchnl_vlan_filter_list_v2 *)msg;
1198 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1199 	struct ice_vsi *vsi;
1200 
1201 	if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
1202 					      vfl)) {
1203 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1204 		goto out;
1205 	}
1206 
1207 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
1208 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1209 		goto out;
1210 	}
1211 
1212 	vsi = ice_get_vf_vsi(vf);
1213 	if (!vsi) {
1214 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1215 		goto out;
1216 	}
1217 
1218 	if (ice_vc_del_vlans(vf, vsi, vfl))
1219 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1220 
1221 out:
1222 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
1223 				     0);
1224 }
1225 
1226 /**
1227  * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
1228  * @vf: VF used to add the VLAN(s)
1229  * @vsi: VF's VSI used to add the VLAN(s)
1230  * @vfl: virthchnl filter list used to add the filters
1231  */
1232 static int
1233 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
1234 		 struct virtchnl_vlan_filter_list_v2 *vfl)
1235 {
1236 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
1237 	int err;
1238 	u16 i;
1239 
1240 	for (i = 0; i < vfl->num_elements; i++) {
1241 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
1242 		struct virtchnl_vlan *vc_vlan;
1243 
1244 		vc_vlan = &vlan_fltr->outer;
1245 		if (ice_vc_is_valid_vlan(vc_vlan)) {
1246 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1247 
1248 			err = ice_vc_vlan_action(vsi,
1249 						 vsi->outer_vlan_ops.add_vlan,
1250 						 &vlan);
1251 			if (err)
1252 				return err;
1253 
1254 			if (vlan_promisc) {
1255 				err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
1256 				if (err)
1257 					return err;
1258 			}
1259 
1260 			/* Enable VLAN filtering on first non-zero VLAN */
1261 			if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
1262 				err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
1263 				if (err)
1264 					return err;
1265 			}
1266 		}
1267 
1268 		vc_vlan = &vlan_fltr->inner;
1269 		if (ice_vc_is_valid_vlan(vc_vlan)) {
1270 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1271 
1272 			err = ice_vc_vlan_action(vsi,
1273 						 vsi->inner_vlan_ops.add_vlan,
1274 						 &vlan);
1275 			if (err)
1276 				return err;
1277 
1278 			/* no support for VLAN promiscuous on inner VLAN unless
1279 			 * we are in Single VLAN Mode (SVM)
1280 			 */
1281 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
1282 				if (vlan_promisc) {
1283 					err = ice_vf_ena_vlan_promisc(vf, vsi,
1284 								      &vlan);
1285 					if (err)
1286 						return err;
1287 				}
1288 
1289 				/* Enable VLAN filtering on first non-zero VLAN */
1290 				if (vf->spoofchk && vlan.vid) {
1291 					err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
1292 					if (err)
1293 						return err;
1294 				}
1295 			}
1296 		}
1297 	}
1298 
1299 	return 0;
1300 }
1301 
1302 /**
1303  * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
1304  * @vsi: VF VSI used to get number of existing VLAN filters
1305  * @vfc: negotiated/supported VLAN filtering capabilities
1306  * @vfl: VLAN filter list from VF to validate
1307  *
1308  * Validate all of the filters in the VLAN filter list from the VF during the
1309  * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
1310  * Otherwise return true.
1311  */
1312 static bool
1313 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
1314 				     struct virtchnl_vlan_filtering_caps *vfc,
1315 				     struct virtchnl_vlan_filter_list_v2 *vfl)
1316 {
1317 	u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
1318 		vfl->num_elements;
1319 
1320 	if (num_requested_filters > vfc->max_filters)
1321 		return false;
1322 
1323 	return ice_vc_validate_vlan_filter_list(vfc, vfl);
1324 }
1325 
1326 /**
1327  * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
1328  * @vf: VF the message was received from
1329  * @msg: message received from the VF
1330  */
1331 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
1332 {
1333 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1334 	struct virtchnl_vlan_filter_list_v2 *vfl =
1335 		(struct virtchnl_vlan_filter_list_v2 *)msg;
1336 	struct ice_vsi *vsi;
1337 
1338 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1339 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1340 		goto out;
1341 	}
1342 
1343 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
1344 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1345 		goto out;
1346 	}
1347 
1348 	vsi = ice_get_vf_vsi(vf);
1349 	if (!vsi) {
1350 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1351 		goto out;
1352 	}
1353 
1354 	if (!ice_vc_validate_add_vlan_filter_list(vsi,
1355 						  &vf->vlan_v2_caps.filtering,
1356 						  vfl)) {
1357 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1358 		goto out;
1359 	}
1360 
1361 	if (ice_vc_add_vlans(vf, vsi, vfl))
1362 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1363 
1364 out:
1365 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
1366 				     0);
1367 }
1368 
1369 /**
1370  * ice_vc_valid_vlan_setting - validate VLAN setting
1371  * @negotiated_settings: negotiated VLAN settings during VF init
1372  * @ethertype_setting: ethertype(s) requested for the VLAN setting
1373  */
1374 static bool
1375 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
1376 {
1377 	if (ethertype_setting && !(negotiated_settings & ethertype_setting))
1378 		return false;
1379 
1380 	/* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
1381 	 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
1382 	 */
1383 	if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
1384 	    hweight32(ethertype_setting) > 1)
1385 		return false;
1386 
1387 	/* ability to modify the VLAN setting was not negotiated */
1388 	if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
1389 		return false;
1390 
1391 	return true;
1392 }
1393 
1394 /**
1395  * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
1396  * @caps: negotiated VLAN settings during VF init
1397  * @msg: message to validate
1398  *
1399  * Used to validate any VLAN virtchnl message sent as a
1400  * virtchnl_vlan_setting structure. Validates the message against the
1401  * negotiated/supported caps during VF driver init.
1402  */
1403 static bool
1404 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
1405 			      struct virtchnl_vlan_setting *msg)
1406 {
1407 	if ((!msg->outer_ethertype_setting &&
1408 	     !msg->inner_ethertype_setting) ||
1409 	    (!caps->outer && !caps->inner))
1410 		return false;
1411 
1412 	if (msg->outer_ethertype_setting &&
1413 	    !ice_vc_valid_vlan_setting(caps->outer,
1414 				       msg->outer_ethertype_setting))
1415 		return false;
1416 
1417 	if (msg->inner_ethertype_setting &&
1418 	    !ice_vc_valid_vlan_setting(caps->inner,
1419 				       msg->inner_ethertype_setting))
1420 		return false;
1421 
1422 	return true;
1423 }
1424 
1425 /**
1426  * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
1427  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
1428  * @tpid: VLAN TPID to populate
1429  */
1430 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
1431 {
1432 	switch (ethertype_setting) {
1433 	case VIRTCHNL_VLAN_ETHERTYPE_8100:
1434 		*tpid = ETH_P_8021Q;
1435 		break;
1436 	case VIRTCHNL_VLAN_ETHERTYPE_88A8:
1437 		*tpid = ETH_P_8021AD;
1438 		break;
1439 	case VIRTCHNL_VLAN_ETHERTYPE_9100:
1440 		*tpid = ETH_P_QINQ1;
1441 		break;
1442 	default:
1443 		*tpid = 0;
1444 		return -EINVAL;
1445 	}
1446 
1447 	return 0;
1448 }
1449 
1450 /**
1451  * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
1452  * @vsi: VF's VSI used to enable the VLAN offload
1453  * @ena_offload: function used to enable the VLAN offload
1454  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
1455  */
1456 static int
1457 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
1458 			int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
1459 			u32 ethertype_setting)
1460 {
1461 	u16 tpid;
1462 	int err;
1463 
1464 	err = ice_vc_get_tpid(ethertype_setting, &tpid);
1465 	if (err)
1466 		return err;
1467 
1468 	err = ena_offload(vsi, tpid);
1469 	if (err)
1470 		return err;
1471 
1472 	return 0;
1473 }
1474 
1475 /**
1476  * ice_vc_ena_vlan_stripping_v2_msg
1477  * @vf: VF the message was received from
1478  * @msg: message received from the VF
1479  *
1480  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
1481  */
1482 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
1483 {
1484 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1485 	struct virtchnl_vlan_supported_caps *stripping_support;
1486 	struct virtchnl_vlan_setting *strip_msg =
1487 		(struct virtchnl_vlan_setting *)msg;
1488 	u32 ethertype_setting;
1489 	struct ice_vsi *vsi;
1490 
1491 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1492 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1493 		goto out;
1494 	}
1495 
1496 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
1497 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1498 		goto out;
1499 	}
1500 
1501 	vsi = ice_get_vf_vsi(vf);
1502 	if (!vsi) {
1503 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1504 		goto out;
1505 	}
1506 
1507 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
1508 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
1509 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1510 		goto out;
1511 	}
1512 
1513 	if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
1514 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1515 		goto out;
1516 	}
1517 
1518 	ethertype_setting = strip_msg->outer_ethertype_setting;
1519 	if (ethertype_setting) {
1520 		if (ice_vc_ena_vlan_offload(vsi,
1521 					    vsi->outer_vlan_ops.ena_stripping,
1522 					    ethertype_setting)) {
1523 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1524 			goto out;
1525 		} else {
1526 			enum ice_l2tsel l2tsel =
1527 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
1528 
1529 			/* PF tells the VF that the outer VLAN tag is always
1530 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
1531 			 * inner is always extracted to
1532 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
1533 			 * support outer stripping so the first tag always ends
1534 			 * up in L2TAG2_2ND and the second/inner tag, if
1535 			 * enabled, is extracted in L2TAG1.
1536 			 */
1537 			ice_vsi_update_l2tsel(vsi, l2tsel);
1538 
1539 			vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
1540 		}
1541 	}
1542 
1543 	ethertype_setting = strip_msg->inner_ethertype_setting;
1544 	if (ethertype_setting &&
1545 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
1546 				    ethertype_setting)) {
1547 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1548 		goto out;
1549 	}
1550 
1551 	if (ethertype_setting)
1552 		vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
1553 
1554 out:
1555 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
1556 				     v_ret, NULL, 0);
1557 }
1558 
1559 /**
1560  * ice_vc_dis_vlan_stripping_v2_msg
1561  * @vf: VF the message was received from
1562  * @msg: message received from the VF
1563  *
1564  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
1565  */
1566 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
1567 {
1568 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1569 	struct virtchnl_vlan_supported_caps *stripping_support;
1570 	struct virtchnl_vlan_setting *strip_msg =
1571 		(struct virtchnl_vlan_setting *)msg;
1572 	u32 ethertype_setting;
1573 	struct ice_vsi *vsi;
1574 
1575 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1576 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1577 		goto out;
1578 	}
1579 
1580 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
1581 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1582 		goto out;
1583 	}
1584 
1585 	vsi = ice_get_vf_vsi(vf);
1586 	if (!vsi) {
1587 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1588 		goto out;
1589 	}
1590 
1591 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
1592 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
1593 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1594 		goto out;
1595 	}
1596 
1597 	ethertype_setting = strip_msg->outer_ethertype_setting;
1598 	if (ethertype_setting) {
1599 		if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
1600 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1601 			goto out;
1602 		} else {
1603 			enum ice_l2tsel l2tsel =
1604 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
1605 
1606 			/* PF tells the VF that the outer VLAN tag is always
1607 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
1608 			 * inner is always extracted to
1609 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
1610 			 * support inner stripping while outer stripping is
1611 			 * disabled so that the first and only tag is extracted
1612 			 * in L2TAG1.
1613 			 */
1614 			ice_vsi_update_l2tsel(vsi, l2tsel);
1615 
1616 			vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
1617 		}
1618 	}
1619 
1620 	ethertype_setting = strip_msg->inner_ethertype_setting;
1621 	if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
1622 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1623 		goto out;
1624 	}
1625 
1626 	if (ethertype_setting)
1627 		vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
1628 
1629 out:
1630 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
1631 				     v_ret, NULL, 0);
1632 }
1633 
1634 /**
1635  * ice_vc_ena_vlan_insertion_v2_msg
1636  * @vf: VF the message was received from
1637  * @msg: message received from the VF
1638  *
1639  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
1640  */
1641 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
1642 {
1643 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1644 	struct virtchnl_vlan_supported_caps *insertion_support;
1645 	struct virtchnl_vlan_setting *insertion_msg =
1646 		(struct virtchnl_vlan_setting *)msg;
1647 	u32 ethertype_setting;
1648 	struct ice_vsi *vsi;
1649 
1650 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1651 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1652 		goto out;
1653 	}
1654 
1655 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
1656 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1657 		goto out;
1658 	}
1659 
1660 	vsi = ice_get_vf_vsi(vf);
1661 	if (!vsi) {
1662 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1663 		goto out;
1664 	}
1665 
1666 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
1667 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
1668 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1669 		goto out;
1670 	}
1671 
1672 	ethertype_setting = insertion_msg->outer_ethertype_setting;
1673 	if (ethertype_setting &&
1674 	    ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
1675 				    ethertype_setting)) {
1676 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1677 		goto out;
1678 	}
1679 
1680 	ethertype_setting = insertion_msg->inner_ethertype_setting;
1681 	if (ethertype_setting &&
1682 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
1683 				    ethertype_setting)) {
1684 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1685 		goto out;
1686 	}
1687 
1688 out:
1689 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
1690 				     v_ret, NULL, 0);
1691 }
1692 
1693 /**
1694  * ice_vc_dis_vlan_insertion_v2_msg
1695  * @vf: VF the message was received from
1696  * @msg: message received from the VF
1697  *
1698  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
1699  */
1700 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
1701 {
1702 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1703 	struct virtchnl_vlan_supported_caps *insertion_support;
1704 	struct virtchnl_vlan_setting *insertion_msg =
1705 		(struct virtchnl_vlan_setting *)msg;
1706 	u32 ethertype_setting;
1707 	struct ice_vsi *vsi;
1708 
1709 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1710 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1711 		goto out;
1712 	}
1713 
1714 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
1715 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1716 		goto out;
1717 	}
1718 
1719 	vsi = ice_get_vf_vsi(vf);
1720 	if (!vsi) {
1721 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1722 		goto out;
1723 	}
1724 
1725 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
1726 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
1727 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1728 		goto out;
1729 	}
1730 
1731 	ethertype_setting = insertion_msg->outer_ethertype_setting;
1732 	if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
1733 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1734 		goto out;
1735 	}
1736 
1737 	ethertype_setting = insertion_msg->inner_ethertype_setting;
1738 	if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
1739 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1740 		goto out;
1741 	}
1742 
1743 out:
1744 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
1745 				     v_ret, NULL, 0);
1746 }
1747 
1748 static int ice_vc_get_ptp_cap(struct ice_vf *vf,
1749 			      const struct virtchnl_ptp_caps *msg)
1750 {
1751 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1752 	u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
1753 		   VIRTCHNL_1588_PTP_CAP_READ_PHC;
1754 
1755 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1756 		goto err;
1757 
1758 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1759 
1760 	if (msg->caps & caps)
1761 		vf->ptp_caps = caps;
1762 
1763 err:
1764 	/* send the response back to the VF */
1765 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
1766 				     (u8 *)&vf->ptp_caps,
1767 				     sizeof(struct virtchnl_ptp_caps));
1768 }
1769 
1770 static int ice_vc_get_phc_time(struct ice_vf *vf)
1771 {
1772 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1773 	struct virtchnl_phc_time *phc_time = NULL;
1774 	struct ice_pf *pf = vf->pf;
1775 	u32 len = 0;
1776 	int ret;
1777 
1778 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1779 		goto err;
1780 
1781 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1782 
1783 	phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
1784 	if (!phc_time) {
1785 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1786 		goto err;
1787 	}
1788 
1789 	len = sizeof(*phc_time);
1790 
1791 	phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
1792 
1793 err:
1794 	/* send the response back to the VF */
1795 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
1796 				    (u8 *)phc_time, len);
1797 	kfree(phc_time);
1798 	return ret;
1799 }
1800 
1801 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
1802 	.get_ver_msg = ice_vc_get_ver_msg,
1803 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
1804 	.reset_vf = ice_vc_reset_vf_msg,
1805 	.add_mac_addr_msg = ice_vc_add_mac_addr_msg,
1806 	.del_mac_addr_msg = ice_vc_del_mac_addr_msg,
1807 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
1808 	.ena_qs_msg = ice_vc_ena_qs_msg,
1809 	.dis_qs_msg = ice_vc_dis_qs_msg,
1810 	.request_qs_msg = ice_vc_request_qs_msg,
1811 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
1812 	.config_rss_key = ice_vc_config_rss_key,
1813 	.config_rss_lut = ice_vc_config_rss_lut,
1814 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
1815 	.get_stats_msg = ice_vc_get_stats_msg,
1816 	.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
1817 	.add_vlan_msg = ice_vc_add_vlan_msg,
1818 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
1819 	.query_rxdid = ice_vc_query_rxdid,
1820 	.get_rss_hashcfg = ice_vc_get_rss_hashcfg,
1821 	.set_rss_hashcfg = ice_vc_set_rss_hashcfg,
1822 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
1823 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
1824 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
1825 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
1826 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
1827 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
1828 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
1829 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
1830 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
1831 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
1832 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
1833 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
1834 	.get_qos_caps = ice_vc_get_qos_caps,
1835 	.cfg_q_bw = ice_vc_cfg_q_bw,
1836 	.cfg_q_quanta = ice_vc_cfg_q_quanta,
1837 	.get_ptp_cap = ice_vc_get_ptp_cap,
1838 	.get_phc_time = ice_vc_get_phc_time,
1839 	/* If you add a new op here please make sure to add it to
1840 	 * ice_virtchnl_repr_ops as well.
1841 	 */
1842 };
1843 
1844 /**
1845  * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
1846  * @vf: the VF to switch ops
1847  */
1848 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
1849 {
1850 	vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
1851 }
1852 
1853 /**
1854  * ice_vc_repr_add_mac
1855  * @vf: pointer to VF
1856  * @msg: virtchannel message
1857  *
1858  * When port representors are created, we do not add MAC rule
1859  * to firmware, we store it so that PF could report same
1860  * MAC as VF.
1861  */
1862 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
1863 {
1864 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1865 	struct virtchnl_ether_addr_list *al =
1866 	    (struct virtchnl_ether_addr_list *)msg;
1867 	struct ice_vsi *vsi;
1868 	struct ice_pf *pf;
1869 	int i;
1870 
1871 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1872 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1873 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1874 		goto handle_mac_exit;
1875 	}
1876 
1877 	pf = vf->pf;
1878 
1879 	vsi = ice_get_vf_vsi(vf);
1880 	if (!vsi) {
1881 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1882 		goto handle_mac_exit;
1883 	}
1884 
1885 	for (i = 0; i < al->num_elements; i++) {
1886 		u8 *mac_addr = al->list[i].addr;
1887 
1888 		if (!is_unicast_ether_addr(mac_addr) ||
1889 		    ether_addr_equal(mac_addr, vf->hw_lan_addr))
1890 			continue;
1891 
1892 		if (vf->pf_set_mac) {
1893 			dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
1894 			v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1895 			goto handle_mac_exit;
1896 		}
1897 
1898 		ice_vfhw_mac_add(vf, &al->list[i]);
1899 		break;
1900 	}
1901 
1902 handle_mac_exit:
1903 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1904 				     v_ret, NULL, 0);
1905 }
1906 
1907 /**
1908  * ice_vc_repr_del_mac - response with success for deleting MAC
1909  * @vf: pointer to VF
1910  * @msg: virtchannel message
1911  *
1912  * Respond with success to not break normal VF flow.
1913  * For legacy VF driver try to update cached MAC address.
1914  */
1915 static int
1916 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
1917 {
1918 	struct virtchnl_ether_addr_list *al =
1919 		(struct virtchnl_ether_addr_list *)msg;
1920 
1921 	ice_update_legacy_cached_mac(vf, &al->list[0]);
1922 
1923 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
1924 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
1925 }
1926 
1927 static int
1928 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
1929 {
1930 	dev_dbg(ice_pf_to_dev(vf->pf),
1931 		"Can't config promiscuous mode in switchdev mode for VF %d\n",
1932 		vf->vf_id);
1933 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1934 				     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
1935 				     NULL, 0);
1936 }
1937 
1938 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
1939 	.get_ver_msg = ice_vc_get_ver_msg,
1940 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
1941 	.reset_vf = ice_vc_reset_vf_msg,
1942 	.add_mac_addr_msg = ice_vc_repr_add_mac,
1943 	.del_mac_addr_msg = ice_vc_repr_del_mac,
1944 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
1945 	.ena_qs_msg = ice_vc_ena_qs_msg,
1946 	.dis_qs_msg = ice_vc_dis_qs_msg,
1947 	.request_qs_msg = ice_vc_request_qs_msg,
1948 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
1949 	.config_rss_key = ice_vc_config_rss_key,
1950 	.config_rss_lut = ice_vc_config_rss_lut,
1951 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
1952 	.get_stats_msg = ice_vc_get_stats_msg,
1953 	.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
1954 	.add_vlan_msg = ice_vc_add_vlan_msg,
1955 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
1956 	.query_rxdid = ice_vc_query_rxdid,
1957 	.get_rss_hashcfg = ice_vc_get_rss_hashcfg,
1958 	.set_rss_hashcfg = ice_vc_set_rss_hashcfg,
1959 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
1960 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
1961 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
1962 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
1963 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
1964 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
1965 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
1966 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
1967 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
1968 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
1969 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
1970 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
1971 	.get_qos_caps = ice_vc_get_qos_caps,
1972 	.cfg_q_bw = ice_vc_cfg_q_bw,
1973 	.cfg_q_quanta = ice_vc_cfg_q_quanta,
1974 	.get_ptp_cap = ice_vc_get_ptp_cap,
1975 	.get_phc_time = ice_vc_get_phc_time,
1976 };
1977 
1978 /**
1979  * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
1980  * @vf: the VF to switch ops
1981  */
1982 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
1983 {
1984 	vf->virtchnl_ops = &ice_virtchnl_repr_ops;
1985 }
1986 
1987 /**
1988  * ice_is_malicious_vf - check if this vf might be overflowing mailbox
1989  * @vf: the VF to check
1990  * @mbxdata: data about the state of the mailbox
1991  *
1992  * Detect if a given VF might be malicious and attempting to overflow the PF
1993  * mailbox. If so, log a warning message and ignore this event.
1994  */
1995 static bool
1996 ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
1997 {
1998 	bool report_malvf = false;
1999 	struct device *dev;
2000 	struct ice_pf *pf;
2001 	int status;
2002 
2003 	pf = vf->pf;
2004 	dev = ice_pf_to_dev(pf);
2005 
2006 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
2007 		return vf->mbx_info.malicious;
2008 
2009 	/* check to see if we have a newly malicious VF */
2010 	status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info,
2011 					  &report_malvf);
2012 	if (status)
2013 		dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n",
2014 				     vf->vf_id, vf->dev_lan_addr, status);
2015 
2016 	if (report_malvf) {
2017 		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2018 		u8 zero_addr[ETH_ALEN] = {};
2019 
2020 		dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
2021 			 vf->dev_lan_addr,
2022 			 pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr);
2023 	}
2024 
2025 	return vf->mbx_info.malicious;
2026 }
2027 
2028 /**
2029  * ice_vc_process_vf_msg - Process request from VF
2030  * @pf: pointer to the PF structure
2031  * @event: pointer to the AQ event
2032  * @mbxdata: information used to detect VF attempting mailbox overflow
2033  *
2034  * Called from the common asq/arq handler to process request from VF. When this
2035  * flow is used for devices with hardware VF to PF message queue overflow
2036  * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
2037  * check is skipped.
2038  */
2039 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
2040 			   struct ice_mbx_data *mbxdata)
2041 {
2042 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2043 	s16 vf_id = le16_to_cpu(event->desc.retval);
2044 	const struct ice_virtchnl_ops *ops;
2045 	u16 msglen = event->msg_len;
2046 	u8 *msg = event->msg_buf;
2047 	struct ice_vf *vf = NULL;
2048 	struct device *dev;
2049 	int err = 0;
2050 
2051 	dev = ice_pf_to_dev(pf);
2052 
2053 	vf = ice_get_vf_by_id(pf, vf_id);
2054 	if (!vf) {
2055 		dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
2056 			vf_id, v_opcode, msglen);
2057 		return;
2058 	}
2059 
2060 	mutex_lock(&vf->cfg_lock);
2061 
2062 	/* Check if the VF is trying to overflow the mailbox */
2063 	if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
2064 		goto finish;
2065 
2066 	/* Check if VF is disabled. */
2067 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2068 		err = -EPERM;
2069 		goto error_handler;
2070 	}
2071 
2072 	ops = vf->virtchnl_ops;
2073 
2074 	/* Perform basic checks on the msg */
2075 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2076 	if (err) {
2077 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
2078 			err = -EPERM;
2079 		else
2080 			err = -EINVAL;
2081 	}
2082 
2083 error_handler:
2084 	if (err) {
2085 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
2086 				      NULL, 0);
2087 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2088 			vf_id, v_opcode, msglen, err);
2089 		goto finish;
2090 	}
2091 
2092 	if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
2093 		ice_vc_send_msg_to_vf(vf, v_opcode,
2094 				      VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
2095 				      0);
2096 		goto finish;
2097 	}
2098 
2099 	switch (v_opcode) {
2100 	case VIRTCHNL_OP_VERSION:
2101 		err = ops->get_ver_msg(vf, msg);
2102 		break;
2103 	case VIRTCHNL_OP_GET_VF_RESOURCES:
2104 		err = ops->get_vf_res_msg(vf, msg);
2105 		if (ice_vf_init_vlan_stripping(vf))
2106 			dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
2107 				vf->vf_id);
2108 		ice_vc_notify_vf_link_state(vf);
2109 		break;
2110 	case VIRTCHNL_OP_RESET_VF:
2111 		ops->reset_vf(vf);
2112 		break;
2113 	case VIRTCHNL_OP_ADD_ETH_ADDR:
2114 		err = ops->add_mac_addr_msg(vf, msg);
2115 		break;
2116 	case VIRTCHNL_OP_DEL_ETH_ADDR:
2117 		err = ops->del_mac_addr_msg(vf, msg);
2118 		break;
2119 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2120 		err = ops->cfg_qs_msg(vf, msg);
2121 		break;
2122 	case VIRTCHNL_OP_ENABLE_QUEUES:
2123 		err = ops->ena_qs_msg(vf, msg);
2124 		ice_vc_notify_vf_link_state(vf);
2125 		break;
2126 	case VIRTCHNL_OP_DISABLE_QUEUES:
2127 		err = ops->dis_qs_msg(vf, msg);
2128 		break;
2129 	case VIRTCHNL_OP_REQUEST_QUEUES:
2130 		err = ops->request_qs_msg(vf, msg);
2131 		break;
2132 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2133 		err = ops->cfg_irq_map_msg(vf, msg);
2134 		break;
2135 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
2136 		err = ops->config_rss_key(vf, msg);
2137 		break;
2138 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
2139 		err = ops->config_rss_lut(vf, msg);
2140 		break;
2141 	case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
2142 		err = ops->config_rss_hfunc(vf, msg);
2143 		break;
2144 	case VIRTCHNL_OP_GET_STATS:
2145 		err = ops->get_stats_msg(vf, msg);
2146 		break;
2147 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2148 		err = ops->cfg_promiscuous_mode_msg(vf, msg);
2149 		break;
2150 	case VIRTCHNL_OP_ADD_VLAN:
2151 		err = ops->add_vlan_msg(vf, msg);
2152 		break;
2153 	case VIRTCHNL_OP_DEL_VLAN:
2154 		err = ops->remove_vlan_msg(vf, msg);
2155 		break;
2156 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
2157 		err = ops->query_rxdid(vf);
2158 		break;
2159 	case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
2160 		err = ops->get_rss_hashcfg(vf);
2161 		break;
2162 	case VIRTCHNL_OP_SET_RSS_HASHCFG:
2163 		err = ops->set_rss_hashcfg(vf, msg);
2164 		break;
2165 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2166 		err = ops->ena_vlan_stripping(vf);
2167 		break;
2168 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2169 		err = ops->dis_vlan_stripping(vf);
2170 		break;
2171 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
2172 		err = ops->add_fdir_fltr_msg(vf, msg);
2173 		break;
2174 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
2175 		err = ops->del_fdir_fltr_msg(vf, msg);
2176 		break;
2177 	case VIRTCHNL_OP_ADD_RSS_CFG:
2178 		err = ops->handle_rss_cfg_msg(vf, msg, true);
2179 		break;
2180 	case VIRTCHNL_OP_DEL_RSS_CFG:
2181 		err = ops->handle_rss_cfg_msg(vf, msg, false);
2182 		break;
2183 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
2184 		err = ops->get_offload_vlan_v2_caps(vf);
2185 		break;
2186 	case VIRTCHNL_OP_ADD_VLAN_V2:
2187 		err = ops->add_vlan_v2_msg(vf, msg);
2188 		break;
2189 	case VIRTCHNL_OP_DEL_VLAN_V2:
2190 		err = ops->remove_vlan_v2_msg(vf, msg);
2191 		break;
2192 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
2193 		err = ops->ena_vlan_stripping_v2_msg(vf, msg);
2194 		break;
2195 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
2196 		err = ops->dis_vlan_stripping_v2_msg(vf, msg);
2197 		break;
2198 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
2199 		err = ops->ena_vlan_insertion_v2_msg(vf, msg);
2200 		break;
2201 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
2202 		err = ops->dis_vlan_insertion_v2_msg(vf, msg);
2203 		break;
2204 	case VIRTCHNL_OP_GET_QOS_CAPS:
2205 		err = ops->get_qos_caps(vf);
2206 		break;
2207 	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
2208 		err = ops->cfg_q_bw(vf, msg);
2209 		break;
2210 	case VIRTCHNL_OP_CONFIG_QUANTA:
2211 		err = ops->cfg_q_quanta(vf, msg);
2212 		break;
2213 	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
2214 		err = ops->get_ptp_cap(vf, (const void *)msg);
2215 		break;
2216 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
2217 		err = ops->get_phc_time(vf);
2218 		break;
2219 	case VIRTCHNL_OP_UNKNOWN:
2220 	default:
2221 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
2222 			vf_id);
2223 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
2224 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
2225 					    NULL, 0);
2226 		break;
2227 	}
2228 	if (err) {
2229 		/* Helper function cares less about error return values here
2230 		 * as it is busy with pending work.
2231 		 */
2232 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
2233 			 vf_id, v_opcode, err);
2234 	}
2235 
2236 finish:
2237 	mutex_unlock(&vf->cfg_lock);
2238 	ice_put_vf(vf);
2239 }
2240