xref: /linux/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2023, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_flow.h"
8 #include "ice_vf_lib_private.h"
9 
10 #define to_fltr_conf_from_desc(p) \
11 	container_of(p, struct virtchnl_fdir_fltr_conf, input)
12 
13 #define GTPU_TEID_OFFSET 4
14 #define GTPU_EH_QFI_OFFSET 1
15 #define GTPU_EH_QFI_MASK 0x3F
16 #define PFCP_S_OFFSET 0
17 #define PFCP_S_MASK 0x1
18 #define PFCP_PORT_NR 8805
19 
20 #define FDIR_INSET_FLAG_ESP_S 0
21 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
22 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
23 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
24 
25 enum ice_fdir_tunnel_type {
26 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
27 	ICE_FDIR_TUNNEL_TYPE_GTPU,
28 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
29 };
30 
31 struct virtchnl_fdir_fltr_conf {
32 	struct ice_fdir_fltr input;
33 	enum ice_fdir_tunnel_type ttype;
34 	u64 inset_flag;
35 	u32 flow_id;
36 };
37 
38 struct virtchnl_fdir_inset_map {
39 	enum virtchnl_proto_hdr_field field;
40 	enum ice_flow_field fld;
41 	u64 flag;
42 	u64 mask;
43 };
44 
45 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
46 	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
47 	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
48 	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
49 	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
50 	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
51 	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
52 	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
53 	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
54 	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
55 	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
56 	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
57 	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
58 	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
59 	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
60 	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
61 	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
62 	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
63 	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
64 	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
65 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
66 		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
67 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
68 		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
69 	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
70 	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
71 	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72 };
73 
74 /**
75  * ice_vc_fdir_param_check
76  * @vf: pointer to the VF structure
77  * @vsi_id: VF relative VSI ID
78  *
79  * Check for the valid VSI ID, PF's state and VF's state
80  *
81  * Return: 0 on success, and -EINVAL on error.
82  */
83 static int
84 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
85 {
86 	struct ice_pf *pf = vf->pf;
87 
88 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
89 		return -EINVAL;
90 
91 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
92 		return -EINVAL;
93 
94 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
95 		return -EINVAL;
96 
97 	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
98 		return -EINVAL;
99 
100 	if (!ice_get_vf_vsi(vf))
101 		return -EINVAL;
102 
103 	return 0;
104 }
105 
106 /**
107  * ice_vf_start_ctrl_vsi
108  * @vf: pointer to the VF structure
109  *
110  * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
111  *
112  * Return: 0 on success, and other on error.
113  */
114 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
115 {
116 	struct ice_pf *pf = vf->pf;
117 	struct ice_vsi *ctrl_vsi;
118 	struct device *dev;
119 	int err;
120 
121 	dev = ice_pf_to_dev(pf);
122 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
123 		return -EEXIST;
124 
125 	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
126 	if (!ctrl_vsi) {
127 		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
128 			vf->vf_id);
129 		return -ENOMEM;
130 	}
131 
132 	err = ice_vsi_open_ctrl(ctrl_vsi);
133 	if (err) {
134 		dev_dbg(dev, "Could not open control VSI for VF %d\n",
135 			vf->vf_id);
136 		goto err_vsi_open;
137 	}
138 
139 	return 0;
140 
141 err_vsi_open:
142 	ice_vsi_release(ctrl_vsi);
143 	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
144 		pf->vsi[vf->ctrl_vsi_idx] = NULL;
145 		vf->ctrl_vsi_idx = ICE_NO_VSI;
146 	}
147 	return err;
148 }
149 
150 /**
151  * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
152  * @vf: pointer to the VF structure
153  * @flow: filter flow type
154  *
155  * Return: 0 on success, and other on error.
156  */
157 static int
158 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
159 {
160 	struct ice_vf_fdir *fdir = &vf->fdir;
161 
162 	if (!fdir->fdir_prof) {
163 		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
164 					       ICE_FLTR_PTYPE_MAX,
165 					       sizeof(*fdir->fdir_prof),
166 					       GFP_KERNEL);
167 		if (!fdir->fdir_prof)
168 			return -ENOMEM;
169 	}
170 
171 	if (!fdir->fdir_prof[flow]) {
172 		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
173 						     sizeof(**fdir->fdir_prof),
174 						     GFP_KERNEL);
175 		if (!fdir->fdir_prof[flow])
176 			return -ENOMEM;
177 	}
178 
179 	return 0;
180 }
181 
182 /**
183  * ice_vc_fdir_free_prof - free profile for this filter flow type
184  * @vf: pointer to the VF structure
185  * @flow: filter flow type
186  */
187 static void
188 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
189 {
190 	struct ice_vf_fdir *fdir = &vf->fdir;
191 
192 	if (!fdir->fdir_prof)
193 		return;
194 
195 	if (!fdir->fdir_prof[flow])
196 		return;
197 
198 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
199 	fdir->fdir_prof[flow] = NULL;
200 }
201 
202 /**
203  * ice_vc_fdir_free_prof_all - free all the profile for this VF
204  * @vf: pointer to the VF structure
205  */
206 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
207 {
208 	struct ice_vf_fdir *fdir = &vf->fdir;
209 	enum ice_fltr_ptype flow;
210 
211 	if (!fdir->fdir_prof)
212 		return;
213 
214 	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
215 		ice_vc_fdir_free_prof(vf, flow);
216 
217 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
218 	fdir->fdir_prof = NULL;
219 }
220 
221 /**
222  * ice_vc_fdir_parse_flow_fld
223  * @proto_hdr: virtual channel protocol filter header
224  * @conf: FDIR configuration for each filter
225  * @fld: field type array
226  * @fld_cnt: field counter
227  *
228  * Parse the virtual channel filter header and store them into field type array
229  *
230  * Return: 0 on success, and other on error.
231  */
232 static int
233 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
234 			   struct virtchnl_fdir_fltr_conf *conf,
235 			   enum ice_flow_field *fld, int *fld_cnt)
236 {
237 	struct virtchnl_proto_hdr hdr;
238 	u32 i;
239 
240 	memcpy(&hdr, proto_hdr, sizeof(hdr));
241 
242 	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
243 	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
244 		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
245 			if (fdir_inset_map[i].mask &&
246 			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
247 			     fdir_inset_map[i].flag))
248 				continue;
249 
250 			fld[*fld_cnt] = fdir_inset_map[i].fld;
251 			*fld_cnt += 1;
252 			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
253 				return -EINVAL;
254 			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
255 						     fdir_inset_map[i].field);
256 		}
257 
258 	return 0;
259 }
260 
261 /**
262  * ice_vc_fdir_set_flow_fld
263  * @vf: pointer to the VF structure
264  * @fltr: virtual channel add cmd buffer
265  * @conf: FDIR configuration for each filter
266  * @seg: array of one or more packet segments that describe the flow
267  *
268  * Parse the virtual channel add msg buffer's field vector and store them into
269  * flow's packet segment field
270  *
271  * Return: 0 on success, and other on error.
272  */
273 static int
274 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
275 			 struct virtchnl_fdir_fltr_conf *conf,
276 			 struct ice_flow_seg_info *seg)
277 {
278 	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
279 	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
280 	struct device *dev = ice_pf_to_dev(vf->pf);
281 	struct virtchnl_proto_hdrs *proto;
282 	int fld_cnt = 0;
283 	int i;
284 
285 	proto = &rule->proto_hdrs;
286 	for (i = 0; i < proto->count; i++) {
287 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
288 		int ret;
289 
290 		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
291 		if (ret)
292 			return ret;
293 	}
294 
295 	if (fld_cnt == 0) {
296 		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
297 		return -EINVAL;
298 	}
299 
300 	for (i = 0; i < fld_cnt; i++)
301 		ice_flow_set_fld(seg, fld[i],
302 				 ICE_FLOW_FLD_OFF_INVAL,
303 				 ICE_FLOW_FLD_OFF_INVAL,
304 				 ICE_FLOW_FLD_OFF_INVAL, false);
305 
306 	return 0;
307 }
308 
309 /**
310  * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
311  * @vf: pointer to the VF structure
312  * @conf: FDIR configuration for each filter
313  * @seg: array of one or more packet segments that describe the flow
314  *
315  * Return: 0 on success, and other on error.
316  */
317 static int
318 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
319 			 struct virtchnl_fdir_fltr_conf *conf,
320 			 struct ice_flow_seg_info *seg)
321 {
322 	enum ice_fltr_ptype flow = conf->input.flow_type;
323 	enum ice_fdir_tunnel_type ttype = conf->ttype;
324 	struct device *dev = ice_pf_to_dev(vf->pf);
325 
326 	switch (flow) {
327 	case ICE_FLTR_PTYPE_NON_IP_L2:
328 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
329 		break;
330 	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
331 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
332 				  ICE_FLOW_SEG_HDR_IPV4 |
333 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
334 		break;
335 	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
336 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
337 				  ICE_FLOW_SEG_HDR_IPV4 |
338 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
339 		break;
340 	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
341 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
342 				  ICE_FLOW_SEG_HDR_IPV4 |
343 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
344 		break;
345 	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
346 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
347 				  ICE_FLOW_SEG_HDR_IPV4 |
348 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
349 		break;
350 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
351 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
352 				  ICE_FLOW_SEG_HDR_IPV4 |
353 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
354 		break;
355 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
356 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
357 				  ICE_FLOW_SEG_HDR_IPV4 |
358 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
359 		break;
360 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
361 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
362 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
363 		break;
364 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
365 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
366 				  ICE_FLOW_SEG_HDR_IPV4 |
367 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
368 		break;
369 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
370 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
371 				  ICE_FLOW_SEG_HDR_IPV4 |
372 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
373 		break;
374 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
375 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
376 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
377 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
378 		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
379 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
380 					  ICE_FLOW_SEG_HDR_IPV4 |
381 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
382 		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
383 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
384 					  ICE_FLOW_SEG_HDR_GTPU_IP |
385 					  ICE_FLOW_SEG_HDR_IPV4 |
386 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
387 		} else {
388 			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
389 				flow, vf->vf_id);
390 			return -EINVAL;
391 		}
392 		break;
393 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
394 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
395 				  ICE_FLOW_SEG_HDR_IPV4 |
396 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
397 		break;
398 	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
399 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
400 				  ICE_FLOW_SEG_HDR_IPV6 |
401 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
402 		break;
403 	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
404 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
405 				  ICE_FLOW_SEG_HDR_IPV6 |
406 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
407 		break;
408 	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
409 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
410 				  ICE_FLOW_SEG_HDR_IPV6 |
411 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
412 		break;
413 	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
414 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
415 				  ICE_FLOW_SEG_HDR_IPV6 |
416 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
417 		break;
418 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
419 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
420 				  ICE_FLOW_SEG_HDR_IPV6 |
421 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
422 		break;
423 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
424 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
425 				  ICE_FLOW_SEG_HDR_IPV6 |
426 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
427 		break;
428 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
429 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
430 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
431 		break;
432 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
433 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
434 				  ICE_FLOW_SEG_HDR_IPV6 |
435 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
436 		break;
437 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
438 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
439 				  ICE_FLOW_SEG_HDR_IPV6 |
440 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
441 		break;
442 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
443 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
444 				  ICE_FLOW_SEG_HDR_IPV6 |
445 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
446 		break;
447 	default:
448 		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
449 			flow, vf->vf_id);
450 		return -EINVAL;
451 	}
452 
453 	return 0;
454 }
455 
456 /**
457  * ice_vc_fdir_rem_prof - remove profile for this filter flow type
458  * @vf: pointer to the VF structure
459  * @flow: filter flow type
460  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
461  */
462 static void
463 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
464 {
465 	struct ice_vf_fdir *fdir = &vf->fdir;
466 	struct ice_fd_hw_prof *vf_prof;
467 	struct ice_pf *pf = vf->pf;
468 	struct ice_vsi *vf_vsi;
469 	struct device *dev;
470 	struct ice_hw *hw;
471 	u64 prof_id;
472 	int i;
473 
474 	dev = ice_pf_to_dev(pf);
475 	hw = &pf->hw;
476 	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
477 		return;
478 
479 	vf_prof = fdir->fdir_prof[flow];
480 	prof_id = vf_prof->prof_id[tun];
481 
482 	vf_vsi = ice_get_vf_vsi(vf);
483 	if (!vf_vsi) {
484 		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
485 		return;
486 	}
487 
488 	if (!fdir->prof_entry_cnt[flow][tun])
489 		return;
490 
491 	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
492 		if (vf_prof->entry_h[i][tun]) {
493 			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
494 
495 			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
496 			ice_flow_rem_entry(hw, ICE_BLK_FD,
497 					   vf_prof->entry_h[i][tun]);
498 			vf_prof->entry_h[i][tun] = 0;
499 		}
500 
501 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
502 	devm_kfree(dev, vf_prof->fdir_seg[tun]);
503 	vf_prof->fdir_seg[tun] = NULL;
504 
505 	for (i = 0; i < vf_prof->cnt; i++)
506 		vf_prof->vsi_h[i] = 0;
507 
508 	fdir->prof_entry_cnt[flow][tun] = 0;
509 }
510 
511 /**
512  * ice_vc_fdir_rem_prof_all - remove profile for this VF
513  * @vf: pointer to the VF structure
514  */
515 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
516 {
517 	enum ice_fltr_ptype flow;
518 
519 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
520 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
521 		ice_vc_fdir_rem_prof(vf, flow, 0);
522 		ice_vc_fdir_rem_prof(vf, flow, 1);
523 	}
524 }
525 
526 /**
527  * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
528  * @fdir: pointer to the VF FDIR structure
529  */
530 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
531 {
532 	enum ice_fltr_ptype flow;
533 
534 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
535 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
536 		fdir->fdir_fltr_cnt[flow][0] = 0;
537 		fdir->fdir_fltr_cnt[flow][1] = 0;
538 	}
539 
540 	fdir->fdir_fltr_cnt_total = 0;
541 }
542 
543 /**
544  * ice_vc_fdir_has_prof_conflict
545  * @vf: pointer to the VF structure
546  * @conf: FDIR configuration for each filter
547  *
548  * Check if @conf has conflicting profile with existing profiles
549  *
550  * Return: true on success, and false on error.
551  */
552 static bool
553 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
554 			      struct virtchnl_fdir_fltr_conf *conf)
555 {
556 	struct ice_fdir_fltr *desc;
557 
558 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
559 		struct virtchnl_fdir_fltr_conf *existing_conf;
560 		enum ice_fltr_ptype flow_type_a, flow_type_b;
561 		struct ice_fdir_fltr *a, *b;
562 
563 		existing_conf = to_fltr_conf_from_desc(desc);
564 		a = &existing_conf->input;
565 		b = &conf->input;
566 		flow_type_a = a->flow_type;
567 		flow_type_b = b->flow_type;
568 
569 		/* No need to compare two rules with different tunnel types or
570 		 * with the same protocol type.
571 		 */
572 		if (existing_conf->ttype != conf->ttype ||
573 		    flow_type_a == flow_type_b)
574 			continue;
575 
576 		switch (flow_type_a) {
577 		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
578 		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
579 		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
580 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
581 				return true;
582 			break;
583 		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
584 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
585 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
586 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
587 				return true;
588 			break;
589 		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
590 		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
591 		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
592 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
593 				return true;
594 			break;
595 		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
596 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
597 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
598 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
599 				return true;
600 			break;
601 		default:
602 			break;
603 		}
604 	}
605 
606 	return false;
607 }
608 
609 /**
610  * ice_vc_fdir_write_flow_prof
611  * @vf: pointer to the VF structure
612  * @flow: filter flow type
613  * @seg: array of one or more packet segments that describe the flow
614  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
615  *
616  * Write the flow's profile config and packet segment into the hardware
617  *
618  * Return: 0 on success, and other on error.
619  */
620 static int
621 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
622 			    struct ice_flow_seg_info *seg, int tun)
623 {
624 	struct ice_vf_fdir *fdir = &vf->fdir;
625 	struct ice_vsi *vf_vsi, *ctrl_vsi;
626 	struct ice_flow_seg_info *old_seg;
627 	struct ice_flow_prof *prof = NULL;
628 	struct ice_fd_hw_prof *vf_prof;
629 	struct device *dev;
630 	struct ice_pf *pf;
631 	struct ice_hw *hw;
632 	u64 entry1_h = 0;
633 	u64 entry2_h = 0;
634 	int ret;
635 
636 	pf = vf->pf;
637 	dev = ice_pf_to_dev(pf);
638 	hw = &pf->hw;
639 	vf_vsi = ice_get_vf_vsi(vf);
640 	if (!vf_vsi)
641 		return -EINVAL;
642 
643 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
644 	if (!ctrl_vsi)
645 		return -EINVAL;
646 
647 	vf_prof = fdir->fdir_prof[flow];
648 	old_seg = vf_prof->fdir_seg[tun];
649 	if (old_seg) {
650 		if (!memcmp(old_seg, seg, sizeof(*seg))) {
651 			dev_dbg(dev, "Duplicated profile for VF %d!\n",
652 				vf->vf_id);
653 			return -EEXIST;
654 		}
655 
656 		if (fdir->fdir_fltr_cnt[flow][tun]) {
657 			ret = -EINVAL;
658 			dev_dbg(dev, "Input set conflicts for VF %d\n",
659 				vf->vf_id);
660 			goto err_exit;
661 		}
662 
663 		/* remove previously allocated profile */
664 		ice_vc_fdir_rem_prof(vf, flow, tun);
665 	}
666 
667 	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
668 				tun + 1, false, &prof);
669 	if (ret) {
670 		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
671 			flow, vf->vf_id);
672 		goto err_exit;
673 	}
674 
675 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
676 				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
677 				 seg, &entry1_h);
678 	if (ret) {
679 		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
680 			flow, vf->vf_id);
681 		goto err_prof;
682 	}
683 
684 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
685 				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
686 				 seg, &entry2_h);
687 	if (ret) {
688 		dev_dbg(dev,
689 			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
690 			flow, vf->vf_id);
691 		goto err_entry_1;
692 	}
693 
694 	vf_prof->fdir_seg[tun] = seg;
695 	vf_prof->cnt = 0;
696 	fdir->prof_entry_cnt[flow][tun] = 0;
697 
698 	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
699 	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
700 	vf_prof->cnt++;
701 	fdir->prof_entry_cnt[flow][tun]++;
702 
703 	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
704 	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
705 	vf_prof->cnt++;
706 	fdir->prof_entry_cnt[flow][tun]++;
707 
708 	vf_prof->prof_id[tun] = prof->id;
709 
710 	return 0;
711 
712 err_entry_1:
713 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
714 			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
715 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
716 err_prof:
717 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
718 err_exit:
719 	return ret;
720 }
721 
722 /**
723  * ice_vc_fdir_config_input_set
724  * @vf: pointer to the VF structure
725  * @fltr: virtual channel add cmd buffer
726  * @conf: FDIR configuration for each filter
727  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
728  *
729  * Config the input set type and value for virtual channel add msg buffer
730  *
731  * Return: 0 on success, and other on error.
732  */
733 static int
734 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
735 			     struct virtchnl_fdir_fltr_conf *conf, int tun)
736 {
737 	struct ice_fdir_fltr *input = &conf->input;
738 	struct device *dev = ice_pf_to_dev(vf->pf);
739 	struct ice_flow_seg_info *seg;
740 	enum ice_fltr_ptype flow;
741 	int ret;
742 
743 	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
744 	if (ret) {
745 		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
746 			vf->vf_id);
747 		return ret;
748 	}
749 
750 	flow = input->flow_type;
751 	ret = ice_vc_fdir_alloc_prof(vf, flow);
752 	if (ret) {
753 		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
754 		return ret;
755 	}
756 
757 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
758 	if (!seg)
759 		return -ENOMEM;
760 
761 	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
762 	if (ret) {
763 		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
764 		goto err_exit;
765 	}
766 
767 	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
768 	if (ret) {
769 		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
770 		goto err_exit;
771 	}
772 
773 	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
774 	if (ret == -EEXIST) {
775 		devm_kfree(dev, seg);
776 	} else if (ret) {
777 		dev_dbg(dev, "Write flow profile for VF %d failed\n",
778 			vf->vf_id);
779 		goto err_exit;
780 	}
781 
782 	return 0;
783 
784 err_exit:
785 	devm_kfree(dev, seg);
786 	return ret;
787 }
788 
789 /**
790  * ice_vc_fdir_parse_pattern
791  * @vf: pointer to the VF info
792  * @fltr: virtual channel add cmd buffer
793  * @conf: FDIR configuration for each filter
794  *
795  * Parse the virtual channel filter's pattern and store them into conf
796  *
797  * Return: 0 on success, and other on error.
798  */
799 static int
800 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
801 			  struct virtchnl_fdir_fltr_conf *conf)
802 {
803 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
804 	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
805 	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
806 	struct device *dev = ice_pf_to_dev(vf->pf);
807 	struct ice_fdir_fltr *input = &conf->input;
808 	int i;
809 
810 	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
811 		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
812 			proto->count, vf->vf_id);
813 		return -EINVAL;
814 	}
815 
816 	for (i = 0; i < proto->count; i++) {
817 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
818 		struct ip_esp_hdr *esph;
819 		struct ip_auth_hdr *ah;
820 		struct sctphdr *sctph;
821 		struct ipv6hdr *ip6h;
822 		struct udphdr *udph;
823 		struct tcphdr *tcph;
824 		struct ethhdr *eth;
825 		struct iphdr *iph;
826 		u8 s_field;
827 		u8 *rawh;
828 
829 		switch (hdr->type) {
830 		case VIRTCHNL_PROTO_HDR_ETH:
831 			eth = (struct ethhdr *)hdr->buffer;
832 			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
833 
834 			if (hdr->field_selector)
835 				input->ext_data.ether_type = eth->h_proto;
836 			break;
837 		case VIRTCHNL_PROTO_HDR_IPV4:
838 			iph = (struct iphdr *)hdr->buffer;
839 			l3 = VIRTCHNL_PROTO_HDR_IPV4;
840 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
841 
842 			if (hdr->field_selector) {
843 				input->ip.v4.src_ip = iph->saddr;
844 				input->ip.v4.dst_ip = iph->daddr;
845 				input->ip.v4.tos = iph->tos;
846 				input->ip.v4.proto = iph->protocol;
847 			}
848 			break;
849 		case VIRTCHNL_PROTO_HDR_IPV6:
850 			ip6h = (struct ipv6hdr *)hdr->buffer;
851 			l3 = VIRTCHNL_PROTO_HDR_IPV6;
852 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
853 
854 			if (hdr->field_selector) {
855 				memcpy(input->ip.v6.src_ip,
856 				       ip6h->saddr.in6_u.u6_addr8,
857 				       sizeof(ip6h->saddr));
858 				memcpy(input->ip.v6.dst_ip,
859 				       ip6h->daddr.in6_u.u6_addr8,
860 				       sizeof(ip6h->daddr));
861 				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
862 						  (ip6h->flow_lbl[0] >> 4);
863 				input->ip.v6.proto = ip6h->nexthdr;
864 			}
865 			break;
866 		case VIRTCHNL_PROTO_HDR_TCP:
867 			tcph = (struct tcphdr *)hdr->buffer;
868 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
869 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
870 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
871 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
872 
873 			if (hdr->field_selector) {
874 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
875 					input->ip.v4.src_port = tcph->source;
876 					input->ip.v4.dst_port = tcph->dest;
877 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
878 					input->ip.v6.src_port = tcph->source;
879 					input->ip.v6.dst_port = tcph->dest;
880 				}
881 			}
882 			break;
883 		case VIRTCHNL_PROTO_HDR_UDP:
884 			udph = (struct udphdr *)hdr->buffer;
885 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
886 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
887 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
888 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
889 
890 			if (hdr->field_selector) {
891 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
892 					input->ip.v4.src_port = udph->source;
893 					input->ip.v4.dst_port = udph->dest;
894 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
895 					input->ip.v6.src_port = udph->source;
896 					input->ip.v6.dst_port = udph->dest;
897 				}
898 			}
899 			break;
900 		case VIRTCHNL_PROTO_HDR_SCTP:
901 			sctph = (struct sctphdr *)hdr->buffer;
902 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
903 				input->flow_type =
904 					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
905 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
906 				input->flow_type =
907 					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
908 
909 			if (hdr->field_selector) {
910 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
911 					input->ip.v4.src_port = sctph->source;
912 					input->ip.v4.dst_port = sctph->dest;
913 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
914 					input->ip.v6.src_port = sctph->source;
915 					input->ip.v6.dst_port = sctph->dest;
916 				}
917 			}
918 			break;
919 		case VIRTCHNL_PROTO_HDR_L2TPV3:
920 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
921 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
922 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
923 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
924 
925 			if (hdr->field_selector)
926 				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
927 			break;
928 		case VIRTCHNL_PROTO_HDR_ESP:
929 			esph = (struct ip_esp_hdr *)hdr->buffer;
930 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
931 			    l4 == VIRTCHNL_PROTO_HDR_UDP)
932 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
933 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
934 				 l4 == VIRTCHNL_PROTO_HDR_UDP)
935 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
936 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
937 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
938 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
939 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
940 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
941 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
942 
943 			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
944 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
945 			else
946 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
947 
948 			if (hdr->field_selector) {
949 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
950 					input->ip.v4.sec_parm_idx = esph->spi;
951 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
952 					input->ip.v6.sec_parm_idx = esph->spi;
953 			}
954 			break;
955 		case VIRTCHNL_PROTO_HDR_AH:
956 			ah = (struct ip_auth_hdr *)hdr->buffer;
957 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
958 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
959 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
960 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
961 
962 			if (hdr->field_selector) {
963 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
964 					input->ip.v4.sec_parm_idx = ah->spi;
965 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
966 					input->ip.v6.sec_parm_idx = ah->spi;
967 			}
968 			break;
969 		case VIRTCHNL_PROTO_HDR_PFCP:
970 			rawh = (u8 *)hdr->buffer;
971 			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
972 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
973 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
974 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
975 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
976 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
977 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
978 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
979 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
980 
981 			if (hdr->field_selector) {
982 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
983 					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
984 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
985 					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
986 			}
987 			break;
988 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
989 			rawh = (u8 *)hdr->buffer;
990 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
991 
992 			if (hdr->field_selector)
993 				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
994 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
995 			break;
996 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
997 			rawh = (u8 *)hdr->buffer;
998 
999 			if (hdr->field_selector)
1000 				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1001 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1002 			break;
1003 		default:
1004 			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1005 				hdr->type, vf->vf_id);
1006 			return -EINVAL;
1007 		}
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 /**
1014  * ice_vc_fdir_parse_action
1015  * @vf: pointer to the VF info
1016  * @fltr: virtual channel add cmd buffer
1017  * @conf: FDIR configuration for each filter
1018  *
1019  * Parse the virtual channel filter's action and store them into conf
1020  *
1021  * Return: 0 on success, and other on error.
1022  */
1023 static int
1024 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1025 			 struct virtchnl_fdir_fltr_conf *conf)
1026 {
1027 	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1028 	struct device *dev = ice_pf_to_dev(vf->pf);
1029 	struct ice_fdir_fltr *input = &conf->input;
1030 	u32 dest_num = 0;
1031 	u32 mark_num = 0;
1032 	int i;
1033 
1034 	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1035 		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1036 			as->count, vf->vf_id);
1037 		return -EINVAL;
1038 	}
1039 
1040 	for (i = 0; i < as->count; i++) {
1041 		struct virtchnl_filter_action *action = &as->actions[i];
1042 
1043 		switch (action->type) {
1044 		case VIRTCHNL_ACTION_PASSTHRU:
1045 			dest_num++;
1046 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1047 			break;
1048 		case VIRTCHNL_ACTION_DROP:
1049 			dest_num++;
1050 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1051 			break;
1052 		case VIRTCHNL_ACTION_QUEUE:
1053 			dest_num++;
1054 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1055 			input->q_index = action->act_conf.queue.index;
1056 			break;
1057 		case VIRTCHNL_ACTION_Q_REGION:
1058 			dest_num++;
1059 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1060 			input->q_index = action->act_conf.queue.index;
1061 			input->q_region = action->act_conf.queue.region;
1062 			break;
1063 		case VIRTCHNL_ACTION_MARK:
1064 			mark_num++;
1065 			input->fltr_id = action->act_conf.mark_id;
1066 			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1067 			break;
1068 		default:
1069 			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1070 				action->type, vf->vf_id);
1071 			return -EINVAL;
1072 		}
1073 	}
1074 
1075 	if (dest_num == 0 || dest_num >= 2) {
1076 		dev_dbg(dev, "Invalid destination action for VF %d\n",
1077 			vf->vf_id);
1078 		return -EINVAL;
1079 	}
1080 
1081 	if (mark_num >= 2) {
1082 		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1083 		return -EINVAL;
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 /**
1090  * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1091  * @vf: pointer to the VF info
1092  * @fltr: virtual channel add cmd buffer
1093  * @conf: FDIR configuration for each filter
1094  *
1095  * Return: 0 on success, and other on error.
1096  */
1097 static int
1098 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1099 			  struct virtchnl_fdir_fltr_conf *conf)
1100 {
1101 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1102 	int ret;
1103 
1104 	if (!ice_vc_validate_pattern(vf, proto))
1105 		return -EINVAL;
1106 
1107 	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1108 	if (ret)
1109 		return ret;
1110 
1111 	return ice_vc_fdir_parse_action(vf, fltr, conf);
1112 }
1113 
1114 /**
1115  * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1116  * @conf_a: FDIR configuration for filter a
1117  * @conf_b: FDIR configuration for filter b
1118  *
1119  * Return: 0 on success, and other on error.
1120  */
1121 static bool
1122 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1123 		       struct virtchnl_fdir_fltr_conf *conf_b)
1124 {
1125 	struct ice_fdir_fltr *a = &conf_a->input;
1126 	struct ice_fdir_fltr *b = &conf_b->input;
1127 
1128 	if (conf_a->ttype != conf_b->ttype)
1129 		return false;
1130 	if (a->flow_type != b->flow_type)
1131 		return false;
1132 	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1133 		return false;
1134 	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1135 		return false;
1136 	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1137 		return false;
1138 	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1139 		return false;
1140 	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1141 		return false;
1142 	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1143 		return false;
1144 	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1145 		return false;
1146 	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1147 		return false;
1148 
1149 	return true;
1150 }
1151 
1152 /**
1153  * ice_vc_fdir_is_dup_fltr
1154  * @vf: pointer to the VF info
1155  * @conf: FDIR configuration for each filter
1156  *
1157  * Check if there is duplicated rule with same conf value
1158  *
1159  * Return: 0 true success, and false on error.
1160  */
1161 static bool
1162 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1163 {
1164 	struct ice_fdir_fltr *desc;
1165 	bool ret;
1166 
1167 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1168 		struct virtchnl_fdir_fltr_conf *node =
1169 				to_fltr_conf_from_desc(desc);
1170 
1171 		ret = ice_vc_fdir_comp_rules(node, conf);
1172 		if (ret)
1173 			return true;
1174 	}
1175 
1176 	return false;
1177 }
1178 
1179 /**
1180  * ice_vc_fdir_insert_entry
1181  * @vf: pointer to the VF info
1182  * @conf: FDIR configuration for each filter
1183  * @id: pointer to ID value allocated by driver
1184  *
1185  * Insert FDIR conf entry into list and allocate ID for this filter
1186  *
1187  * Return: 0 true success, and other on error.
1188  */
1189 static int
1190 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1191 			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1192 {
1193 	struct ice_fdir_fltr *input = &conf->input;
1194 	int i;
1195 
1196 	/* alloc ID corresponding with conf */
1197 	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1198 		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1199 	if (i < 0)
1200 		return -EINVAL;
1201 	*id = i;
1202 
1203 	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1204 	return 0;
1205 }
1206 
1207 /**
1208  * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1209  * @vf: pointer to the VF info
1210  * @conf: FDIR configuration for each filter
1211  * @id: filter rule's ID
1212  */
1213 static void
1214 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1215 			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1216 {
1217 	struct ice_fdir_fltr *input = &conf->input;
1218 
1219 	idr_remove(&vf->fdir.fdir_rule_idr, id);
1220 	list_del(&input->fltr_node);
1221 }
1222 
1223 /**
1224  * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1225  * @vf: pointer to the VF info
1226  * @id: filter rule's ID
1227  *
1228  * Return: NULL on error, and other on success.
1229  */
1230 static struct virtchnl_fdir_fltr_conf *
1231 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1232 {
1233 	return idr_find(&vf->fdir.fdir_rule_idr, id);
1234 }
1235 
1236 /**
1237  * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1238  * @vf: pointer to the VF info
1239  */
1240 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1241 {
1242 	struct virtchnl_fdir_fltr_conf *conf;
1243 	struct ice_fdir_fltr *desc, *temp;
1244 
1245 	list_for_each_entry_safe(desc, temp,
1246 				 &vf->fdir.fdir_rule_list, fltr_node) {
1247 		conf = to_fltr_conf_from_desc(desc);
1248 		list_del(&desc->fltr_node);
1249 		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1250 	}
1251 }
1252 
1253 /**
1254  * ice_vc_fdir_write_fltr - write filter rule into hardware
1255  * @vf: pointer to the VF info
1256  * @conf: FDIR configuration for each filter
1257  * @add: true implies add rule, false implies del rules
1258  * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1259  *
1260  * Return: 0 on success, and other on error.
1261  */
1262 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1263 				  struct virtchnl_fdir_fltr_conf *conf,
1264 				  bool add, bool is_tun)
1265 {
1266 	struct ice_fdir_fltr *input = &conf->input;
1267 	struct ice_vsi *vsi, *ctrl_vsi;
1268 	struct ice_fltr_desc desc;
1269 	struct device *dev;
1270 	struct ice_pf *pf;
1271 	struct ice_hw *hw;
1272 	int ret;
1273 	u8 *pkt;
1274 
1275 	pf = vf->pf;
1276 	dev = ice_pf_to_dev(pf);
1277 	hw = &pf->hw;
1278 	vsi = ice_get_vf_vsi(vf);
1279 	if (!vsi) {
1280 		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1281 		return -EINVAL;
1282 	}
1283 
1284 	input->dest_vsi = vsi->idx;
1285 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1286 
1287 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1288 	if (!ctrl_vsi) {
1289 		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1290 		return -EINVAL;
1291 	}
1292 
1293 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1294 	if (!pkt)
1295 		return -ENOMEM;
1296 
1297 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1298 	ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1299 	if (ret) {
1300 		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1301 			vf->vf_id, input->flow_type);
1302 		goto err_free_pkt;
1303 	}
1304 
1305 	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1306 	if (ret)
1307 		goto err_free_pkt;
1308 
1309 	return 0;
1310 
1311 err_free_pkt:
1312 	devm_kfree(dev, pkt);
1313 	return ret;
1314 }
1315 
1316 /**
1317  * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1318  * @t: pointer to timer_list
1319  */
1320 static void ice_vf_fdir_timer(struct timer_list *t)
1321 {
1322 	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1323 	struct ice_vf_fdir_ctx *ctx_done;
1324 	struct ice_vf_fdir *fdir;
1325 	unsigned long flags;
1326 	struct ice_vf *vf;
1327 	struct ice_pf *pf;
1328 
1329 	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1330 	vf = container_of(fdir, struct ice_vf, fdir);
1331 	ctx_done = &fdir->ctx_done;
1332 	pf = vf->pf;
1333 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1334 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1335 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1336 		WARN_ON_ONCE(1);
1337 		return;
1338 	}
1339 
1340 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1341 
1342 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1343 	ctx_done->conf = ctx_irq->conf;
1344 	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1345 	ctx_done->v_opcode = ctx_irq->v_opcode;
1346 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1347 
1348 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1349 	ice_service_task_schedule(pf);
1350 }
1351 
1352 /**
1353  * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1354  * @ctrl_vsi: pointer to a VF's CTRL VSI
1355  * @rx_desc: pointer to FDIR Rx queue descriptor
1356  */
1357 void
1358 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1359 			union ice_32b_rx_flex_desc *rx_desc)
1360 {
1361 	struct ice_pf *pf = ctrl_vsi->back;
1362 	struct ice_vf *vf = ctrl_vsi->vf;
1363 	struct ice_vf_fdir_ctx *ctx_done;
1364 	struct ice_vf_fdir_ctx *ctx_irq;
1365 	struct ice_vf_fdir *fdir;
1366 	unsigned long flags;
1367 	struct device *dev;
1368 	int ret;
1369 
1370 	if (WARN_ON(!vf))
1371 		return;
1372 
1373 	fdir = &vf->fdir;
1374 	ctx_done = &fdir->ctx_done;
1375 	ctx_irq = &fdir->ctx_irq;
1376 	dev = ice_pf_to_dev(pf);
1377 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1378 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1379 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1380 		WARN_ON_ONCE(1);
1381 		return;
1382 	}
1383 
1384 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1385 
1386 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1387 	ctx_done->conf = ctx_irq->conf;
1388 	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1389 	ctx_done->v_opcode = ctx_irq->v_opcode;
1390 	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1391 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1392 
1393 	ret = del_timer(&ctx_irq->rx_tmr);
1394 	if (!ret)
1395 		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1396 
1397 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1398 	ice_service_task_schedule(pf);
1399 }
1400 
1401 /**
1402  * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1403  * @vf: pointer to the VF info
1404  */
1405 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1406 {
1407 	u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1408 	struct ice_vsi *vf_vsi;
1409 	struct device *dev;
1410 	struct ice_pf *pf;
1411 	struct ice_hw *hw;
1412 	u16 vsi_num;
1413 
1414 	pf = vf->pf;
1415 	hw = &pf->hw;
1416 	dev = ice_pf_to_dev(pf);
1417 	vf_vsi = ice_get_vf_vsi(vf);
1418 	if (!vf_vsi) {
1419 		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1420 		return;
1421 	}
1422 
1423 	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1424 
1425 	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1426 	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1427 	switch (hw->mac_type) {
1428 	case ICE_MAC_E830:
1429 		fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1430 		fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1431 		fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1432 		fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1433 		break;
1434 	case ICE_MAC_E810:
1435 	default:
1436 		fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1437 		fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1438 		fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1439 		fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1440 	}
1441 
1442 	dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1443 		vf->vf_id, fd_size_g, fd_size_b);
1444 	dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1445 		vf->vf_id, fd_cnt_g, fd_cnt_b);
1446 }
1447 
1448 /**
1449  * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1450  * @vf: pointer to the VF info
1451  * @ctx: FDIR context info for post processing
1452  * @status: virtchnl FDIR program status
1453  *
1454  * Return: 0 on success, and other on error.
1455  */
1456 static int
1457 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1458 		      enum virtchnl_fdir_prgm_status *status)
1459 {
1460 	struct device *dev = ice_pf_to_dev(vf->pf);
1461 	u32 stat_err, error, prog_id;
1462 	int ret;
1463 
1464 	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1465 	if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1466 	    ICE_FXD_FLTR_WB_QW1_DD_YES) {
1467 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1468 		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1469 		ret = -EINVAL;
1470 		goto err_exit;
1471 	}
1472 
1473 	prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1474 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1475 	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1476 		dev_err(dev, "VF %d: Desc show add, but ctx not",
1477 			vf->vf_id);
1478 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1479 		ret = -EINVAL;
1480 		goto err_exit;
1481 	}
1482 
1483 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1484 	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1485 		dev_err(dev, "VF %d: Desc show del, but ctx not",
1486 			vf->vf_id);
1487 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1488 		ret = -EINVAL;
1489 		goto err_exit;
1490 	}
1491 
1492 	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1493 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1494 		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1495 			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1496 				vf->vf_id);
1497 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1498 		} else {
1499 			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1500 				vf->vf_id);
1501 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1502 		}
1503 		ret = -EINVAL;
1504 		goto err_exit;
1505 	}
1506 
1507 	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1508 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1509 		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1510 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1511 		ret = -EINVAL;
1512 		goto err_exit;
1513 	}
1514 
1515 	*status = VIRTCHNL_FDIR_SUCCESS;
1516 
1517 	return 0;
1518 
1519 err_exit:
1520 	ice_vf_fdir_dump_info(vf);
1521 	return ret;
1522 }
1523 
1524 /**
1525  * ice_vc_add_fdir_fltr_post
1526  * @vf: pointer to the VF structure
1527  * @ctx: FDIR context info for post processing
1528  * @status: virtchnl FDIR program status
1529  * @success: true implies success, false implies failure
1530  *
1531  * Post process for flow director add command. If success, then do post process
1532  * and send back success msg by virtchnl. Otherwise, do context reversion and
1533  * send back failure msg by virtchnl.
1534  *
1535  * Return: 0 on success, and other on error.
1536  */
1537 static int
1538 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1539 			  enum virtchnl_fdir_prgm_status status,
1540 			  bool success)
1541 {
1542 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1543 	struct device *dev = ice_pf_to_dev(vf->pf);
1544 	enum virtchnl_status_code v_ret;
1545 	struct virtchnl_fdir_add *resp;
1546 	int ret, len, is_tun;
1547 
1548 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1549 	len = sizeof(*resp);
1550 	resp = kzalloc(len, GFP_KERNEL);
1551 	if (!resp) {
1552 		len = 0;
1553 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1554 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1555 		goto err_exit;
1556 	}
1557 
1558 	if (!success)
1559 		goto err_exit;
1560 
1561 	is_tun = 0;
1562 	resp->status = status;
1563 	resp->flow_id = conf->flow_id;
1564 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1565 	vf->fdir.fdir_fltr_cnt_total++;
1566 
1567 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1568 				    (u8 *)resp, len);
1569 	kfree(resp);
1570 
1571 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1572 		vf->vf_id, conf->flow_id,
1573 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1574 		"add" : "del");
1575 	return ret;
1576 
1577 err_exit:
1578 	if (resp)
1579 		resp->status = status;
1580 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1581 	devm_kfree(dev, conf);
1582 
1583 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1584 				    (u8 *)resp, len);
1585 	kfree(resp);
1586 	return ret;
1587 }
1588 
1589 /**
1590  * ice_vc_del_fdir_fltr_post
1591  * @vf: pointer to the VF structure
1592  * @ctx: FDIR context info for post processing
1593  * @status: virtchnl FDIR program status
1594  * @success: true implies success, false implies failure
1595  *
1596  * Post process for flow director del command. If success, then do post process
1597  * and send back success msg by virtchnl. Otherwise, do context reversion and
1598  * send back failure msg by virtchnl.
1599  *
1600  * Return: 0 on success, and other on error.
1601  */
1602 static int
1603 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1604 			  enum virtchnl_fdir_prgm_status status,
1605 			  bool success)
1606 {
1607 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1608 	struct device *dev = ice_pf_to_dev(vf->pf);
1609 	enum virtchnl_status_code v_ret;
1610 	struct virtchnl_fdir_del *resp;
1611 	int ret, len, is_tun;
1612 
1613 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1614 	len = sizeof(*resp);
1615 	resp = kzalloc(len, GFP_KERNEL);
1616 	if (!resp) {
1617 		len = 0;
1618 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1619 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1620 		goto err_exit;
1621 	}
1622 
1623 	if (!success)
1624 		goto err_exit;
1625 
1626 	is_tun = 0;
1627 	resp->status = status;
1628 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1629 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1630 	vf->fdir.fdir_fltr_cnt_total--;
1631 
1632 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1633 				    (u8 *)resp, len);
1634 	kfree(resp);
1635 
1636 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1637 		vf->vf_id, conf->flow_id,
1638 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1639 		"add" : "del");
1640 	devm_kfree(dev, conf);
1641 	return ret;
1642 
1643 err_exit:
1644 	if (resp)
1645 		resp->status = status;
1646 	if (success)
1647 		devm_kfree(dev, conf);
1648 
1649 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1650 				    (u8 *)resp, len);
1651 	kfree(resp);
1652 	return ret;
1653 }
1654 
1655 /**
1656  * ice_flush_fdir_ctx
1657  * @pf: pointer to the PF structure
1658  *
1659  * Flush all the pending event on ctx_done list and process them.
1660  */
1661 void ice_flush_fdir_ctx(struct ice_pf *pf)
1662 {
1663 	struct ice_vf *vf;
1664 	unsigned int bkt;
1665 
1666 	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1667 		return;
1668 
1669 	mutex_lock(&pf->vfs.table_lock);
1670 	ice_for_each_vf(pf, bkt, vf) {
1671 		struct device *dev = ice_pf_to_dev(pf);
1672 		enum virtchnl_fdir_prgm_status status;
1673 		struct ice_vf_fdir_ctx *ctx;
1674 		unsigned long flags;
1675 		int ret;
1676 
1677 		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1678 			continue;
1679 
1680 		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1681 			continue;
1682 
1683 		ctx = &vf->fdir.ctx_done;
1684 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1685 		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1686 			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1687 			continue;
1688 		}
1689 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1690 
1691 		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1692 		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1693 			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1694 			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1695 				vf->vf_id);
1696 			goto err_exit;
1697 		}
1698 
1699 		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1700 		if (ret)
1701 			goto err_exit;
1702 
1703 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1704 			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1705 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1706 			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1707 		else
1708 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1709 
1710 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1711 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1712 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1713 		continue;
1714 err_exit:
1715 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1716 			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1717 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1718 			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1719 		else
1720 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1721 
1722 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1723 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1724 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1725 	}
1726 	mutex_unlock(&pf->vfs.table_lock);
1727 }
1728 
1729 /**
1730  * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1731  * @vf: pointer to the VF structure
1732  * @conf: FDIR configuration for each filter
1733  * @v_opcode: virtual channel operation code
1734  *
1735  * Return: 0 on success, and other on error.
1736  */
1737 static int
1738 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1739 			enum virtchnl_ops v_opcode)
1740 {
1741 	struct device *dev = ice_pf_to_dev(vf->pf);
1742 	struct ice_vf_fdir_ctx *ctx;
1743 	unsigned long flags;
1744 
1745 	ctx = &vf->fdir.ctx_irq;
1746 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1747 	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1748 	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1749 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1750 		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1751 			vf->vf_id);
1752 		return -EBUSY;
1753 	}
1754 	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1755 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1756 
1757 	ctx->conf = conf;
1758 	ctx->v_opcode = v_opcode;
1759 	ctx->stat = ICE_FDIR_CTX_READY;
1760 	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1761 
1762 	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1763 
1764 	return 0;
1765 }
1766 
1767 /**
1768  * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1769  * @vf: pointer to the VF structure
1770  *
1771  * Return: 0 on success, and other on error.
1772  */
1773 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1774 {
1775 	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1776 	unsigned long flags;
1777 
1778 	del_timer(&ctx->rx_tmr);
1779 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1780 	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1781 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1782 }
1783 
1784 /**
1785  * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1786  * @vf: pointer to the VF info
1787  * @msg: pointer to the msg buffer
1788  *
1789  * Return: 0 on success, and other on error.
1790  */
1791 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1792 {
1793 	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1794 	struct virtchnl_fdir_add *stat = NULL;
1795 	struct virtchnl_fdir_fltr_conf *conf;
1796 	enum virtchnl_status_code v_ret;
1797 	struct ice_vsi *vf_vsi;
1798 	struct device *dev;
1799 	struct ice_pf *pf;
1800 	int is_tun = 0;
1801 	int len = 0;
1802 	int ret;
1803 
1804 	pf = vf->pf;
1805 	dev = ice_pf_to_dev(pf);
1806 	vf_vsi = ice_get_vf_vsi(vf);
1807 
1808 #define ICE_VF_MAX_FDIR_FILTERS	128
1809 	if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
1810 	    vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
1811 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1812 		dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
1813 			vf->vf_id);
1814 		goto err_exit;
1815 	}
1816 
1817 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1818 	if (ret) {
1819 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1820 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1821 		goto err_exit;
1822 	}
1823 
1824 	ret = ice_vf_start_ctrl_vsi(vf);
1825 	if (ret && (ret != -EEXIST)) {
1826 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1827 		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1828 			vf->vf_id, ret);
1829 		goto err_exit;
1830 	}
1831 
1832 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1833 	if (!stat) {
1834 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1835 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1836 		goto err_exit;
1837 	}
1838 
1839 	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1840 	if (!conf) {
1841 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1842 		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1843 		goto err_exit;
1844 	}
1845 
1846 	len = sizeof(*stat);
1847 	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1848 	if (ret) {
1849 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1850 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1851 		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1852 		goto err_free_conf;
1853 	}
1854 
1855 	if (fltr->validate_only) {
1856 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1857 		stat->status = VIRTCHNL_FDIR_SUCCESS;
1858 		devm_kfree(dev, conf);
1859 		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1860 					    v_ret, (u8 *)stat, len);
1861 		goto exit;
1862 	}
1863 
1864 	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1865 	if (ret) {
1866 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1867 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1868 		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1869 			vf->vf_id, ret);
1870 		goto err_free_conf;
1871 	}
1872 
1873 	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1874 	if (ret) {
1875 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1876 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1877 		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1878 			vf->vf_id);
1879 		goto err_free_conf;
1880 	}
1881 
1882 	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1883 	if (ret) {
1884 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1885 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1886 		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1887 		goto err_free_conf;
1888 	}
1889 
1890 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1891 	if (ret) {
1892 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1893 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1894 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1895 		goto err_rem_entry;
1896 	}
1897 
1898 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1899 	if (ret) {
1900 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1901 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1902 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1903 			vf->vf_id, ret);
1904 		goto err_clr_irq;
1905 	}
1906 
1907 exit:
1908 	kfree(stat);
1909 	return ret;
1910 
1911 err_clr_irq:
1912 	ice_vc_fdir_clear_irq_ctx(vf);
1913 err_rem_entry:
1914 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1915 err_free_conf:
1916 	devm_kfree(dev, conf);
1917 err_exit:
1918 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1919 				    (u8 *)stat, len);
1920 	kfree(stat);
1921 	return ret;
1922 }
1923 
1924 /**
1925  * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1926  * @vf: pointer to the VF info
1927  * @msg: pointer to the msg buffer
1928  *
1929  * Return: 0 on success, and other on error.
1930  */
1931 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1932 {
1933 	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1934 	struct virtchnl_fdir_del *stat = NULL;
1935 	struct virtchnl_fdir_fltr_conf *conf;
1936 	enum virtchnl_status_code v_ret;
1937 	struct device *dev;
1938 	struct ice_pf *pf;
1939 	int is_tun = 0;
1940 	int len = 0;
1941 	int ret;
1942 
1943 	pf = vf->pf;
1944 	dev = ice_pf_to_dev(pf);
1945 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1946 	if (ret) {
1947 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1948 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1949 		goto err_exit;
1950 	}
1951 
1952 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1953 	if (!stat) {
1954 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1955 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1956 		goto err_exit;
1957 	}
1958 
1959 	len = sizeof(*stat);
1960 
1961 	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1962 	if (!conf) {
1963 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1964 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1965 		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1966 			vf->vf_id, fltr->flow_id);
1967 		goto err_exit;
1968 	}
1969 
1970 	/* Just return failure when ctrl_vsi idx is invalid */
1971 	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1972 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1973 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1974 		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1975 		goto err_exit;
1976 	}
1977 
1978 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1979 	if (ret) {
1980 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1981 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1982 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1983 		goto err_exit;
1984 	}
1985 
1986 	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1987 	if (ret) {
1988 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1989 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1990 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1991 			vf->vf_id, ret);
1992 		goto err_del_tmr;
1993 	}
1994 
1995 	kfree(stat);
1996 
1997 	return ret;
1998 
1999 err_del_tmr:
2000 	ice_vc_fdir_clear_irq_ctx(vf);
2001 err_exit:
2002 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2003 				    (u8 *)stat, len);
2004 	kfree(stat);
2005 	return ret;
2006 }
2007 
2008 /**
2009  * ice_vf_fdir_init - init FDIR resource for VF
2010  * @vf: pointer to the VF info
2011  */
2012 void ice_vf_fdir_init(struct ice_vf *vf)
2013 {
2014 	struct ice_vf_fdir *fdir = &vf->fdir;
2015 
2016 	idr_init(&fdir->fdir_rule_idr);
2017 	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2018 
2019 	spin_lock_init(&fdir->ctx_lock);
2020 	fdir->ctx_irq.flags = 0;
2021 	fdir->ctx_done.flags = 0;
2022 	ice_vc_fdir_reset_cnt_all(fdir);
2023 }
2024 
2025 /**
2026  * ice_vf_fdir_exit - destroy FDIR resource for VF
2027  * @vf: pointer to the VF info
2028  */
2029 void ice_vf_fdir_exit(struct ice_vf *vf)
2030 {
2031 	ice_vc_fdir_flush_entry(vf);
2032 	idr_destroy(&vf->fdir.fdir_rule_idr);
2033 	ice_vc_fdir_rem_prof_all(vf);
2034 	ice_vc_fdir_free_prof_all(vf);
2035 }
2036