xref: /linux/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c (revision a23e1966932464e1c5226cb9ac4ce1d5fc10ba22)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2023, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_flow.h"
8 #include "ice_vf_lib_private.h"
9 
10 #define to_fltr_conf_from_desc(p) \
11 	container_of(p, struct virtchnl_fdir_fltr_conf, input)
12 
13 #define GTPU_TEID_OFFSET 4
14 #define GTPU_EH_QFI_OFFSET 1
15 #define GTPU_EH_QFI_MASK 0x3F
16 #define PFCP_S_OFFSET 0
17 #define PFCP_S_MASK 0x1
18 #define PFCP_PORT_NR 8805
19 
20 #define FDIR_INSET_FLAG_ESP_S 0
21 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
22 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
23 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
24 
25 enum ice_fdir_tunnel_type {
26 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
27 	ICE_FDIR_TUNNEL_TYPE_GTPU,
28 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
29 };
30 
31 struct virtchnl_fdir_fltr_conf {
32 	struct ice_fdir_fltr input;
33 	enum ice_fdir_tunnel_type ttype;
34 	u64 inset_flag;
35 	u32 flow_id;
36 };
37 
38 struct virtchnl_fdir_inset_map {
39 	enum virtchnl_proto_hdr_field field;
40 	enum ice_flow_field fld;
41 	u64 flag;
42 	u64 mask;
43 };
44 
45 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
46 	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
47 	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
48 	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
49 	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
50 	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
51 	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
52 	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
53 	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
54 	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
55 	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
56 	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
57 	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
58 	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
59 	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
60 	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
61 	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
62 	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
63 	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
64 	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
65 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
66 		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
67 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
68 		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
69 	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
70 	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
71 	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72 };
73 
74 /**
75  * ice_vc_fdir_param_check
76  * @vf: pointer to the VF structure
77  * @vsi_id: VF relative VSI ID
78  *
79  * Check for the valid VSI ID, PF's state and VF's state
80  *
81  * Return: 0 on success, and -EINVAL on error.
82  */
83 static int
84 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
85 {
86 	struct ice_pf *pf = vf->pf;
87 
88 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
89 		return -EINVAL;
90 
91 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
92 		return -EINVAL;
93 
94 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
95 		return -EINVAL;
96 
97 	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
98 		return -EINVAL;
99 
100 	if (!ice_get_vf_vsi(vf))
101 		return -EINVAL;
102 
103 	return 0;
104 }
105 
106 /**
107  * ice_vf_start_ctrl_vsi
108  * @vf: pointer to the VF structure
109  *
110  * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
111  *
112  * Return: 0 on success, and other on error.
113  */
114 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
115 {
116 	struct ice_pf *pf = vf->pf;
117 	struct ice_vsi *ctrl_vsi;
118 	struct device *dev;
119 	int err;
120 
121 	dev = ice_pf_to_dev(pf);
122 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
123 		return -EEXIST;
124 
125 	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
126 	if (!ctrl_vsi) {
127 		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
128 			vf->vf_id);
129 		return -ENOMEM;
130 	}
131 
132 	err = ice_vsi_open_ctrl(ctrl_vsi);
133 	if (err) {
134 		dev_dbg(dev, "Could not open control VSI for VF %d\n",
135 			vf->vf_id);
136 		goto err_vsi_open;
137 	}
138 
139 	return 0;
140 
141 err_vsi_open:
142 	ice_vsi_release(ctrl_vsi);
143 	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
144 		pf->vsi[vf->ctrl_vsi_idx] = NULL;
145 		vf->ctrl_vsi_idx = ICE_NO_VSI;
146 	}
147 	return err;
148 }
149 
150 /**
151  * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
152  * @vf: pointer to the VF structure
153  * @flow: filter flow type
154  *
155  * Return: 0 on success, and other on error.
156  */
157 static int
158 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
159 {
160 	struct ice_vf_fdir *fdir = &vf->fdir;
161 
162 	if (!fdir->fdir_prof) {
163 		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
164 					       ICE_FLTR_PTYPE_MAX,
165 					       sizeof(*fdir->fdir_prof),
166 					       GFP_KERNEL);
167 		if (!fdir->fdir_prof)
168 			return -ENOMEM;
169 	}
170 
171 	if (!fdir->fdir_prof[flow]) {
172 		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
173 						     sizeof(**fdir->fdir_prof),
174 						     GFP_KERNEL);
175 		if (!fdir->fdir_prof[flow])
176 			return -ENOMEM;
177 	}
178 
179 	return 0;
180 }
181 
182 /**
183  * ice_vc_fdir_free_prof - free profile for this filter flow type
184  * @vf: pointer to the VF structure
185  * @flow: filter flow type
186  */
187 static void
188 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
189 {
190 	struct ice_vf_fdir *fdir = &vf->fdir;
191 
192 	if (!fdir->fdir_prof)
193 		return;
194 
195 	if (!fdir->fdir_prof[flow])
196 		return;
197 
198 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
199 	fdir->fdir_prof[flow] = NULL;
200 }
201 
202 /**
203  * ice_vc_fdir_free_prof_all - free all the profile for this VF
204  * @vf: pointer to the VF structure
205  */
206 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
207 {
208 	struct ice_vf_fdir *fdir = &vf->fdir;
209 	enum ice_fltr_ptype flow;
210 
211 	if (!fdir->fdir_prof)
212 		return;
213 
214 	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
215 		ice_vc_fdir_free_prof(vf, flow);
216 
217 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
218 	fdir->fdir_prof = NULL;
219 }
220 
221 /**
222  * ice_vc_fdir_parse_flow_fld
223  * @proto_hdr: virtual channel protocol filter header
224  * @conf: FDIR configuration for each filter
225  * @fld: field type array
226  * @fld_cnt: field counter
227  *
228  * Parse the virtual channel filter header and store them into field type array
229  *
230  * Return: 0 on success, and other on error.
231  */
232 static int
233 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
234 			   struct virtchnl_fdir_fltr_conf *conf,
235 			   enum ice_flow_field *fld, int *fld_cnt)
236 {
237 	struct virtchnl_proto_hdr hdr;
238 	u32 i;
239 
240 	memcpy(&hdr, proto_hdr, sizeof(hdr));
241 
242 	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
243 	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
244 		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
245 			if (fdir_inset_map[i].mask &&
246 			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
247 			     fdir_inset_map[i].flag))
248 				continue;
249 
250 			fld[*fld_cnt] = fdir_inset_map[i].fld;
251 			*fld_cnt += 1;
252 			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
253 				return -EINVAL;
254 			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
255 						     fdir_inset_map[i].field);
256 		}
257 
258 	return 0;
259 }
260 
261 /**
262  * ice_vc_fdir_set_flow_fld
263  * @vf: pointer to the VF structure
264  * @fltr: virtual channel add cmd buffer
265  * @conf: FDIR configuration for each filter
266  * @seg: array of one or more packet segments that describe the flow
267  *
268  * Parse the virtual channel add msg buffer's field vector and store them into
269  * flow's packet segment field
270  *
271  * Return: 0 on success, and other on error.
272  */
273 static int
274 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
275 			 struct virtchnl_fdir_fltr_conf *conf,
276 			 struct ice_flow_seg_info *seg)
277 {
278 	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
279 	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
280 	struct device *dev = ice_pf_to_dev(vf->pf);
281 	struct virtchnl_proto_hdrs *proto;
282 	int fld_cnt = 0;
283 	int i;
284 
285 	proto = &rule->proto_hdrs;
286 	for (i = 0; i < proto->count; i++) {
287 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
288 		int ret;
289 
290 		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
291 		if (ret)
292 			return ret;
293 	}
294 
295 	if (fld_cnt == 0) {
296 		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
297 		return -EINVAL;
298 	}
299 
300 	for (i = 0; i < fld_cnt; i++)
301 		ice_flow_set_fld(seg, fld[i],
302 				 ICE_FLOW_FLD_OFF_INVAL,
303 				 ICE_FLOW_FLD_OFF_INVAL,
304 				 ICE_FLOW_FLD_OFF_INVAL, false);
305 
306 	return 0;
307 }
308 
309 /**
310  * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
311  * @vf: pointer to the VF structure
312  * @conf: FDIR configuration for each filter
313  * @seg: array of one or more packet segments that describe the flow
314  *
315  * Return: 0 on success, and other on error.
316  */
317 static int
318 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
319 			 struct virtchnl_fdir_fltr_conf *conf,
320 			 struct ice_flow_seg_info *seg)
321 {
322 	enum ice_fltr_ptype flow = conf->input.flow_type;
323 	enum ice_fdir_tunnel_type ttype = conf->ttype;
324 	struct device *dev = ice_pf_to_dev(vf->pf);
325 
326 	switch (flow) {
327 	case ICE_FLTR_PTYPE_NON_IP_L2:
328 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
329 		break;
330 	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
331 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
332 				  ICE_FLOW_SEG_HDR_IPV4 |
333 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
334 		break;
335 	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
336 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
337 				  ICE_FLOW_SEG_HDR_IPV4 |
338 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
339 		break;
340 	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
341 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
342 				  ICE_FLOW_SEG_HDR_IPV4 |
343 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
344 		break;
345 	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
346 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
347 				  ICE_FLOW_SEG_HDR_IPV4 |
348 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
349 		break;
350 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
351 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
352 				  ICE_FLOW_SEG_HDR_IPV4 |
353 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
354 		break;
355 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
356 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
357 				  ICE_FLOW_SEG_HDR_IPV4 |
358 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
359 		break;
360 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
361 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
362 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
363 		break;
364 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
365 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
366 				  ICE_FLOW_SEG_HDR_IPV4 |
367 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
368 		break;
369 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
370 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
371 				  ICE_FLOW_SEG_HDR_IPV4 |
372 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
373 		break;
374 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
375 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
376 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
377 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
378 		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
379 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
380 					  ICE_FLOW_SEG_HDR_IPV4 |
381 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
382 		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
383 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
384 					  ICE_FLOW_SEG_HDR_GTPU_IP |
385 					  ICE_FLOW_SEG_HDR_IPV4 |
386 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
387 		} else {
388 			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
389 				flow, vf->vf_id);
390 			return -EINVAL;
391 		}
392 		break;
393 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
394 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
395 				  ICE_FLOW_SEG_HDR_IPV4 |
396 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
397 		break;
398 	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
399 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
400 				  ICE_FLOW_SEG_HDR_IPV6 |
401 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
402 		break;
403 	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
404 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
405 				  ICE_FLOW_SEG_HDR_IPV6 |
406 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
407 		break;
408 	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
409 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
410 				  ICE_FLOW_SEG_HDR_IPV6 |
411 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
412 		break;
413 	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
414 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
415 				  ICE_FLOW_SEG_HDR_IPV6 |
416 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
417 		break;
418 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
419 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
420 				  ICE_FLOW_SEG_HDR_IPV6 |
421 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
422 		break;
423 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
424 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
425 				  ICE_FLOW_SEG_HDR_IPV6 |
426 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
427 		break;
428 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
429 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
430 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
431 		break;
432 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
433 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
434 				  ICE_FLOW_SEG_HDR_IPV6 |
435 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
436 		break;
437 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
438 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
439 				  ICE_FLOW_SEG_HDR_IPV6 |
440 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
441 		break;
442 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
443 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
444 				  ICE_FLOW_SEG_HDR_IPV6 |
445 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
446 		break;
447 	default:
448 		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
449 			flow, vf->vf_id);
450 		return -EINVAL;
451 	}
452 
453 	return 0;
454 }
455 
456 /**
457  * ice_vc_fdir_rem_prof - remove profile for this filter flow type
458  * @vf: pointer to the VF structure
459  * @flow: filter flow type
460  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
461  */
462 static void
463 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
464 {
465 	struct ice_vf_fdir *fdir = &vf->fdir;
466 	struct ice_fd_hw_prof *vf_prof;
467 	struct ice_pf *pf = vf->pf;
468 	struct ice_vsi *vf_vsi;
469 	struct device *dev;
470 	struct ice_hw *hw;
471 	u64 prof_id;
472 	int i;
473 
474 	dev = ice_pf_to_dev(pf);
475 	hw = &pf->hw;
476 	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
477 		return;
478 
479 	vf_prof = fdir->fdir_prof[flow];
480 	prof_id = vf_prof->prof_id[tun];
481 
482 	vf_vsi = ice_get_vf_vsi(vf);
483 	if (!vf_vsi) {
484 		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
485 		return;
486 	}
487 
488 	if (!fdir->prof_entry_cnt[flow][tun])
489 		return;
490 
491 	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
492 		if (vf_prof->entry_h[i][tun]) {
493 			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
494 
495 			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
496 			ice_flow_rem_entry(hw, ICE_BLK_FD,
497 					   vf_prof->entry_h[i][tun]);
498 			vf_prof->entry_h[i][tun] = 0;
499 		}
500 
501 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
502 	devm_kfree(dev, vf_prof->fdir_seg[tun]);
503 	vf_prof->fdir_seg[tun] = NULL;
504 
505 	for (i = 0; i < vf_prof->cnt; i++)
506 		vf_prof->vsi_h[i] = 0;
507 
508 	fdir->prof_entry_cnt[flow][tun] = 0;
509 }
510 
511 /**
512  * ice_vc_fdir_rem_prof_all - remove profile for this VF
513  * @vf: pointer to the VF structure
514  */
515 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
516 {
517 	enum ice_fltr_ptype flow;
518 
519 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
520 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
521 		ice_vc_fdir_rem_prof(vf, flow, 0);
522 		ice_vc_fdir_rem_prof(vf, flow, 1);
523 	}
524 }
525 
526 /**
527  * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
528  * @fdir: pointer to the VF FDIR structure
529  */
530 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
531 {
532 	enum ice_fltr_ptype flow;
533 
534 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
535 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
536 		fdir->fdir_fltr_cnt[flow][0] = 0;
537 		fdir->fdir_fltr_cnt[flow][1] = 0;
538 	}
539 }
540 
541 /**
542  * ice_vc_fdir_has_prof_conflict
543  * @vf: pointer to the VF structure
544  * @conf: FDIR configuration for each filter
545  *
546  * Check if @conf has conflicting profile with existing profiles
547  *
548  * Return: true on success, and false on error.
549  */
550 static bool
551 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
552 			      struct virtchnl_fdir_fltr_conf *conf)
553 {
554 	struct ice_fdir_fltr *desc;
555 
556 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
557 		struct virtchnl_fdir_fltr_conf *existing_conf;
558 		enum ice_fltr_ptype flow_type_a, flow_type_b;
559 		struct ice_fdir_fltr *a, *b;
560 
561 		existing_conf = to_fltr_conf_from_desc(desc);
562 		a = &existing_conf->input;
563 		b = &conf->input;
564 		flow_type_a = a->flow_type;
565 		flow_type_b = b->flow_type;
566 
567 		/* No need to compare two rules with different tunnel types or
568 		 * with the same protocol type.
569 		 */
570 		if (existing_conf->ttype != conf->ttype ||
571 		    flow_type_a == flow_type_b)
572 			continue;
573 
574 		switch (flow_type_a) {
575 		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
576 		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
577 		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
578 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
579 				return true;
580 			break;
581 		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
582 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
583 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
584 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
585 				return true;
586 			break;
587 		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
588 		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
589 		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
590 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
591 				return true;
592 			break;
593 		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
594 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
595 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
596 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
597 				return true;
598 			break;
599 		default:
600 			break;
601 		}
602 	}
603 
604 	return false;
605 }
606 
607 /**
608  * ice_vc_fdir_write_flow_prof
609  * @vf: pointer to the VF structure
610  * @flow: filter flow type
611  * @seg: array of one or more packet segments that describe the flow
612  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
613  *
614  * Write the flow's profile config and packet segment into the hardware
615  *
616  * Return: 0 on success, and other on error.
617  */
618 static int
619 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
620 			    struct ice_flow_seg_info *seg, int tun)
621 {
622 	struct ice_vf_fdir *fdir = &vf->fdir;
623 	struct ice_vsi *vf_vsi, *ctrl_vsi;
624 	struct ice_flow_seg_info *old_seg;
625 	struct ice_flow_prof *prof = NULL;
626 	struct ice_fd_hw_prof *vf_prof;
627 	struct device *dev;
628 	struct ice_pf *pf;
629 	struct ice_hw *hw;
630 	u64 entry1_h = 0;
631 	u64 entry2_h = 0;
632 	int ret;
633 
634 	pf = vf->pf;
635 	dev = ice_pf_to_dev(pf);
636 	hw = &pf->hw;
637 	vf_vsi = ice_get_vf_vsi(vf);
638 	if (!vf_vsi)
639 		return -EINVAL;
640 
641 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
642 	if (!ctrl_vsi)
643 		return -EINVAL;
644 
645 	vf_prof = fdir->fdir_prof[flow];
646 	old_seg = vf_prof->fdir_seg[tun];
647 	if (old_seg) {
648 		if (!memcmp(old_seg, seg, sizeof(*seg))) {
649 			dev_dbg(dev, "Duplicated profile for VF %d!\n",
650 				vf->vf_id);
651 			return -EEXIST;
652 		}
653 
654 		if (fdir->fdir_fltr_cnt[flow][tun]) {
655 			ret = -EINVAL;
656 			dev_dbg(dev, "Input set conflicts for VF %d\n",
657 				vf->vf_id);
658 			goto err_exit;
659 		}
660 
661 		/* remove previously allocated profile */
662 		ice_vc_fdir_rem_prof(vf, flow, tun);
663 	}
664 
665 	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
666 				tun + 1, false, &prof);
667 	if (ret) {
668 		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
669 			flow, vf->vf_id);
670 		goto err_exit;
671 	}
672 
673 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
674 				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
675 				 seg, &entry1_h);
676 	if (ret) {
677 		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
678 			flow, vf->vf_id);
679 		goto err_prof;
680 	}
681 
682 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
683 				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
684 				 seg, &entry2_h);
685 	if (ret) {
686 		dev_dbg(dev,
687 			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
688 			flow, vf->vf_id);
689 		goto err_entry_1;
690 	}
691 
692 	vf_prof->fdir_seg[tun] = seg;
693 	vf_prof->cnt = 0;
694 	fdir->prof_entry_cnt[flow][tun] = 0;
695 
696 	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
697 	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
698 	vf_prof->cnt++;
699 	fdir->prof_entry_cnt[flow][tun]++;
700 
701 	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
702 	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
703 	vf_prof->cnt++;
704 	fdir->prof_entry_cnt[flow][tun]++;
705 
706 	vf_prof->prof_id[tun] = prof->id;
707 
708 	return 0;
709 
710 err_entry_1:
711 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
712 			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
713 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
714 err_prof:
715 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
716 err_exit:
717 	return ret;
718 }
719 
720 /**
721  * ice_vc_fdir_config_input_set
722  * @vf: pointer to the VF structure
723  * @fltr: virtual channel add cmd buffer
724  * @conf: FDIR configuration for each filter
725  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
726  *
727  * Config the input set type and value for virtual channel add msg buffer
728  *
729  * Return: 0 on success, and other on error.
730  */
731 static int
732 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
733 			     struct virtchnl_fdir_fltr_conf *conf, int tun)
734 {
735 	struct ice_fdir_fltr *input = &conf->input;
736 	struct device *dev = ice_pf_to_dev(vf->pf);
737 	struct ice_flow_seg_info *seg;
738 	enum ice_fltr_ptype flow;
739 	int ret;
740 
741 	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
742 	if (ret) {
743 		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
744 			vf->vf_id);
745 		return ret;
746 	}
747 
748 	flow = input->flow_type;
749 	ret = ice_vc_fdir_alloc_prof(vf, flow);
750 	if (ret) {
751 		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
752 		return ret;
753 	}
754 
755 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
756 	if (!seg)
757 		return -ENOMEM;
758 
759 	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
760 	if (ret) {
761 		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
762 		goto err_exit;
763 	}
764 
765 	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
766 	if (ret) {
767 		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
768 		goto err_exit;
769 	}
770 
771 	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
772 	if (ret == -EEXIST) {
773 		devm_kfree(dev, seg);
774 	} else if (ret) {
775 		dev_dbg(dev, "Write flow profile for VF %d failed\n",
776 			vf->vf_id);
777 		goto err_exit;
778 	}
779 
780 	return 0;
781 
782 err_exit:
783 	devm_kfree(dev, seg);
784 	return ret;
785 }
786 
787 /**
788  * ice_vc_fdir_parse_pattern
789  * @vf: pointer to the VF info
790  * @fltr: virtual channel add cmd buffer
791  * @conf: FDIR configuration for each filter
792  *
793  * Parse the virtual channel filter's pattern and store them into conf
794  *
795  * Return: 0 on success, and other on error.
796  */
797 static int
798 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
799 			  struct virtchnl_fdir_fltr_conf *conf)
800 {
801 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
802 	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
803 	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
804 	struct device *dev = ice_pf_to_dev(vf->pf);
805 	struct ice_fdir_fltr *input = &conf->input;
806 	int i;
807 
808 	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
809 		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
810 			proto->count, vf->vf_id);
811 		return -EINVAL;
812 	}
813 
814 	for (i = 0; i < proto->count; i++) {
815 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
816 		struct ip_esp_hdr *esph;
817 		struct ip_auth_hdr *ah;
818 		struct sctphdr *sctph;
819 		struct ipv6hdr *ip6h;
820 		struct udphdr *udph;
821 		struct tcphdr *tcph;
822 		struct ethhdr *eth;
823 		struct iphdr *iph;
824 		u8 s_field;
825 		u8 *rawh;
826 
827 		switch (hdr->type) {
828 		case VIRTCHNL_PROTO_HDR_ETH:
829 			eth = (struct ethhdr *)hdr->buffer;
830 			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
831 
832 			if (hdr->field_selector)
833 				input->ext_data.ether_type = eth->h_proto;
834 			break;
835 		case VIRTCHNL_PROTO_HDR_IPV4:
836 			iph = (struct iphdr *)hdr->buffer;
837 			l3 = VIRTCHNL_PROTO_HDR_IPV4;
838 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
839 
840 			if (hdr->field_selector) {
841 				input->ip.v4.src_ip = iph->saddr;
842 				input->ip.v4.dst_ip = iph->daddr;
843 				input->ip.v4.tos = iph->tos;
844 				input->ip.v4.proto = iph->protocol;
845 			}
846 			break;
847 		case VIRTCHNL_PROTO_HDR_IPV6:
848 			ip6h = (struct ipv6hdr *)hdr->buffer;
849 			l3 = VIRTCHNL_PROTO_HDR_IPV6;
850 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
851 
852 			if (hdr->field_selector) {
853 				memcpy(input->ip.v6.src_ip,
854 				       ip6h->saddr.in6_u.u6_addr8,
855 				       sizeof(ip6h->saddr));
856 				memcpy(input->ip.v6.dst_ip,
857 				       ip6h->daddr.in6_u.u6_addr8,
858 				       sizeof(ip6h->daddr));
859 				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
860 						  (ip6h->flow_lbl[0] >> 4);
861 				input->ip.v6.proto = ip6h->nexthdr;
862 			}
863 			break;
864 		case VIRTCHNL_PROTO_HDR_TCP:
865 			tcph = (struct tcphdr *)hdr->buffer;
866 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
867 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
868 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
869 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
870 
871 			if (hdr->field_selector) {
872 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
873 					input->ip.v4.src_port = tcph->source;
874 					input->ip.v4.dst_port = tcph->dest;
875 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
876 					input->ip.v6.src_port = tcph->source;
877 					input->ip.v6.dst_port = tcph->dest;
878 				}
879 			}
880 			break;
881 		case VIRTCHNL_PROTO_HDR_UDP:
882 			udph = (struct udphdr *)hdr->buffer;
883 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
884 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
885 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
886 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
887 
888 			if (hdr->field_selector) {
889 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
890 					input->ip.v4.src_port = udph->source;
891 					input->ip.v4.dst_port = udph->dest;
892 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
893 					input->ip.v6.src_port = udph->source;
894 					input->ip.v6.dst_port = udph->dest;
895 				}
896 			}
897 			break;
898 		case VIRTCHNL_PROTO_HDR_SCTP:
899 			sctph = (struct sctphdr *)hdr->buffer;
900 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
901 				input->flow_type =
902 					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
903 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
904 				input->flow_type =
905 					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
906 
907 			if (hdr->field_selector) {
908 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
909 					input->ip.v4.src_port = sctph->source;
910 					input->ip.v4.dst_port = sctph->dest;
911 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
912 					input->ip.v6.src_port = sctph->source;
913 					input->ip.v6.dst_port = sctph->dest;
914 				}
915 			}
916 			break;
917 		case VIRTCHNL_PROTO_HDR_L2TPV3:
918 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
919 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
920 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
921 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
922 
923 			if (hdr->field_selector)
924 				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
925 			break;
926 		case VIRTCHNL_PROTO_HDR_ESP:
927 			esph = (struct ip_esp_hdr *)hdr->buffer;
928 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
929 			    l4 == VIRTCHNL_PROTO_HDR_UDP)
930 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
931 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
932 				 l4 == VIRTCHNL_PROTO_HDR_UDP)
933 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
934 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
935 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
936 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
937 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
938 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
939 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
940 
941 			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
942 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
943 			else
944 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
945 
946 			if (hdr->field_selector) {
947 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
948 					input->ip.v4.sec_parm_idx = esph->spi;
949 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
950 					input->ip.v6.sec_parm_idx = esph->spi;
951 			}
952 			break;
953 		case VIRTCHNL_PROTO_HDR_AH:
954 			ah = (struct ip_auth_hdr *)hdr->buffer;
955 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
956 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
957 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
958 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
959 
960 			if (hdr->field_selector) {
961 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
962 					input->ip.v4.sec_parm_idx = ah->spi;
963 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
964 					input->ip.v6.sec_parm_idx = ah->spi;
965 			}
966 			break;
967 		case VIRTCHNL_PROTO_HDR_PFCP:
968 			rawh = (u8 *)hdr->buffer;
969 			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
970 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
971 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
972 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
973 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
974 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
975 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
976 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
977 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
978 
979 			if (hdr->field_selector) {
980 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
981 					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
982 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
983 					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
984 			}
985 			break;
986 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
987 			rawh = (u8 *)hdr->buffer;
988 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
989 
990 			if (hdr->field_selector)
991 				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
992 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
993 			break;
994 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
995 			rawh = (u8 *)hdr->buffer;
996 
997 			if (hdr->field_selector)
998 				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
999 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1000 			break;
1001 		default:
1002 			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1003 				hdr->type, vf->vf_id);
1004 			return -EINVAL;
1005 		}
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 /**
1012  * ice_vc_fdir_parse_action
1013  * @vf: pointer to the VF info
1014  * @fltr: virtual channel add cmd buffer
1015  * @conf: FDIR configuration for each filter
1016  *
1017  * Parse the virtual channel filter's action and store them into conf
1018  *
1019  * Return: 0 on success, and other on error.
1020  */
1021 static int
1022 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1023 			 struct virtchnl_fdir_fltr_conf *conf)
1024 {
1025 	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1026 	struct device *dev = ice_pf_to_dev(vf->pf);
1027 	struct ice_fdir_fltr *input = &conf->input;
1028 	u32 dest_num = 0;
1029 	u32 mark_num = 0;
1030 	int i;
1031 
1032 	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1033 		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1034 			as->count, vf->vf_id);
1035 		return -EINVAL;
1036 	}
1037 
1038 	for (i = 0; i < as->count; i++) {
1039 		struct virtchnl_filter_action *action = &as->actions[i];
1040 
1041 		switch (action->type) {
1042 		case VIRTCHNL_ACTION_PASSTHRU:
1043 			dest_num++;
1044 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1045 			break;
1046 		case VIRTCHNL_ACTION_DROP:
1047 			dest_num++;
1048 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1049 			break;
1050 		case VIRTCHNL_ACTION_QUEUE:
1051 			dest_num++;
1052 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1053 			input->q_index = action->act_conf.queue.index;
1054 			break;
1055 		case VIRTCHNL_ACTION_Q_REGION:
1056 			dest_num++;
1057 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1058 			input->q_index = action->act_conf.queue.index;
1059 			input->q_region = action->act_conf.queue.region;
1060 			break;
1061 		case VIRTCHNL_ACTION_MARK:
1062 			mark_num++;
1063 			input->fltr_id = action->act_conf.mark_id;
1064 			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1065 			break;
1066 		default:
1067 			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1068 				action->type, vf->vf_id);
1069 			return -EINVAL;
1070 		}
1071 	}
1072 
1073 	if (dest_num == 0 || dest_num >= 2) {
1074 		dev_dbg(dev, "Invalid destination action for VF %d\n",
1075 			vf->vf_id);
1076 		return -EINVAL;
1077 	}
1078 
1079 	if (mark_num >= 2) {
1080 		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1081 		return -EINVAL;
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 /**
1088  * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1089  * @vf: pointer to the VF info
1090  * @fltr: virtual channel add cmd buffer
1091  * @conf: FDIR configuration for each filter
1092  *
1093  * Return: 0 on success, and other on error.
1094  */
1095 static int
1096 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1097 			  struct virtchnl_fdir_fltr_conf *conf)
1098 {
1099 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1100 	int ret;
1101 
1102 	if (!ice_vc_validate_pattern(vf, proto))
1103 		return -EINVAL;
1104 
1105 	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1106 	if (ret)
1107 		return ret;
1108 
1109 	return ice_vc_fdir_parse_action(vf, fltr, conf);
1110 }
1111 
1112 /**
1113  * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1114  * @conf_a: FDIR configuration for filter a
1115  * @conf_b: FDIR configuration for filter b
1116  *
1117  * Return: 0 on success, and other on error.
1118  */
1119 static bool
1120 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1121 		       struct virtchnl_fdir_fltr_conf *conf_b)
1122 {
1123 	struct ice_fdir_fltr *a = &conf_a->input;
1124 	struct ice_fdir_fltr *b = &conf_b->input;
1125 
1126 	if (conf_a->ttype != conf_b->ttype)
1127 		return false;
1128 	if (a->flow_type != b->flow_type)
1129 		return false;
1130 	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1131 		return false;
1132 	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1133 		return false;
1134 	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1135 		return false;
1136 	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1137 		return false;
1138 	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1139 		return false;
1140 	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1141 		return false;
1142 	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1143 		return false;
1144 	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1145 		return false;
1146 
1147 	return true;
1148 }
1149 
1150 /**
1151  * ice_vc_fdir_is_dup_fltr
1152  * @vf: pointer to the VF info
1153  * @conf: FDIR configuration for each filter
1154  *
1155  * Check if there is duplicated rule with same conf value
1156  *
1157  * Return: 0 true success, and false on error.
1158  */
1159 static bool
1160 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1161 {
1162 	struct ice_fdir_fltr *desc;
1163 	bool ret;
1164 
1165 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1166 		struct virtchnl_fdir_fltr_conf *node =
1167 				to_fltr_conf_from_desc(desc);
1168 
1169 		ret = ice_vc_fdir_comp_rules(node, conf);
1170 		if (ret)
1171 			return true;
1172 	}
1173 
1174 	return false;
1175 }
1176 
1177 /**
1178  * ice_vc_fdir_insert_entry
1179  * @vf: pointer to the VF info
1180  * @conf: FDIR configuration for each filter
1181  * @id: pointer to ID value allocated by driver
1182  *
1183  * Insert FDIR conf entry into list and allocate ID for this filter
1184  *
1185  * Return: 0 true success, and other on error.
1186  */
1187 static int
1188 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1189 			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1190 {
1191 	struct ice_fdir_fltr *input = &conf->input;
1192 	int i;
1193 
1194 	/* alloc ID corresponding with conf */
1195 	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1196 		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1197 	if (i < 0)
1198 		return -EINVAL;
1199 	*id = i;
1200 
1201 	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1202 	return 0;
1203 }
1204 
1205 /**
1206  * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1207  * @vf: pointer to the VF info
1208  * @conf: FDIR configuration for each filter
1209  * @id: filter rule's ID
1210  */
1211 static void
1212 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1213 			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1214 {
1215 	struct ice_fdir_fltr *input = &conf->input;
1216 
1217 	idr_remove(&vf->fdir.fdir_rule_idr, id);
1218 	list_del(&input->fltr_node);
1219 }
1220 
1221 /**
1222  * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1223  * @vf: pointer to the VF info
1224  * @id: filter rule's ID
1225  *
1226  * Return: NULL on error, and other on success.
1227  */
1228 static struct virtchnl_fdir_fltr_conf *
1229 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1230 {
1231 	return idr_find(&vf->fdir.fdir_rule_idr, id);
1232 }
1233 
1234 /**
1235  * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1236  * @vf: pointer to the VF info
1237  */
1238 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1239 {
1240 	struct virtchnl_fdir_fltr_conf *conf;
1241 	struct ice_fdir_fltr *desc, *temp;
1242 
1243 	list_for_each_entry_safe(desc, temp,
1244 				 &vf->fdir.fdir_rule_list, fltr_node) {
1245 		conf = to_fltr_conf_from_desc(desc);
1246 		list_del(&desc->fltr_node);
1247 		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1248 	}
1249 }
1250 
1251 /**
1252  * ice_vc_fdir_write_fltr - write filter rule into hardware
1253  * @vf: pointer to the VF info
1254  * @conf: FDIR configuration for each filter
1255  * @add: true implies add rule, false implies del rules
1256  * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1257  *
1258  * Return: 0 on success, and other on error.
1259  */
1260 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1261 				  struct virtchnl_fdir_fltr_conf *conf,
1262 				  bool add, bool is_tun)
1263 {
1264 	struct ice_fdir_fltr *input = &conf->input;
1265 	struct ice_vsi *vsi, *ctrl_vsi;
1266 	struct ice_fltr_desc desc;
1267 	struct device *dev;
1268 	struct ice_pf *pf;
1269 	struct ice_hw *hw;
1270 	int ret;
1271 	u8 *pkt;
1272 
1273 	pf = vf->pf;
1274 	dev = ice_pf_to_dev(pf);
1275 	hw = &pf->hw;
1276 	vsi = ice_get_vf_vsi(vf);
1277 	if (!vsi) {
1278 		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1279 		return -EINVAL;
1280 	}
1281 
1282 	input->dest_vsi = vsi->idx;
1283 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1284 
1285 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1286 	if (!ctrl_vsi) {
1287 		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1288 		return -EINVAL;
1289 	}
1290 
1291 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1292 	if (!pkt)
1293 		return -ENOMEM;
1294 
1295 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1296 	ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1297 	if (ret) {
1298 		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1299 			vf->vf_id, input->flow_type);
1300 		goto err_free_pkt;
1301 	}
1302 
1303 	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1304 	if (ret)
1305 		goto err_free_pkt;
1306 
1307 	return 0;
1308 
1309 err_free_pkt:
1310 	devm_kfree(dev, pkt);
1311 	return ret;
1312 }
1313 
1314 /**
1315  * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1316  * @t: pointer to timer_list
1317  */
1318 static void ice_vf_fdir_timer(struct timer_list *t)
1319 {
1320 	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1321 	struct ice_vf_fdir_ctx *ctx_done;
1322 	struct ice_vf_fdir *fdir;
1323 	unsigned long flags;
1324 	struct ice_vf *vf;
1325 	struct ice_pf *pf;
1326 
1327 	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1328 	vf = container_of(fdir, struct ice_vf, fdir);
1329 	ctx_done = &fdir->ctx_done;
1330 	pf = vf->pf;
1331 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1332 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1333 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1334 		WARN_ON_ONCE(1);
1335 		return;
1336 	}
1337 
1338 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1339 
1340 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1341 	ctx_done->conf = ctx_irq->conf;
1342 	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1343 	ctx_done->v_opcode = ctx_irq->v_opcode;
1344 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1345 
1346 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1347 	ice_service_task_schedule(pf);
1348 }
1349 
1350 /**
1351  * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1352  * @ctrl_vsi: pointer to a VF's CTRL VSI
1353  * @rx_desc: pointer to FDIR Rx queue descriptor
1354  */
1355 void
1356 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1357 			union ice_32b_rx_flex_desc *rx_desc)
1358 {
1359 	struct ice_pf *pf = ctrl_vsi->back;
1360 	struct ice_vf *vf = ctrl_vsi->vf;
1361 	struct ice_vf_fdir_ctx *ctx_done;
1362 	struct ice_vf_fdir_ctx *ctx_irq;
1363 	struct ice_vf_fdir *fdir;
1364 	unsigned long flags;
1365 	struct device *dev;
1366 	int ret;
1367 
1368 	if (WARN_ON(!vf))
1369 		return;
1370 
1371 	fdir = &vf->fdir;
1372 	ctx_done = &fdir->ctx_done;
1373 	ctx_irq = &fdir->ctx_irq;
1374 	dev = ice_pf_to_dev(pf);
1375 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1376 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1377 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1378 		WARN_ON_ONCE(1);
1379 		return;
1380 	}
1381 
1382 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1383 
1384 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1385 	ctx_done->conf = ctx_irq->conf;
1386 	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1387 	ctx_done->v_opcode = ctx_irq->v_opcode;
1388 	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1389 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1390 
1391 	ret = del_timer(&ctx_irq->rx_tmr);
1392 	if (!ret)
1393 		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1394 
1395 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1396 	ice_service_task_schedule(pf);
1397 }
1398 
1399 /**
1400  * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1401  * @vf: pointer to the VF info
1402  */
1403 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1404 {
1405 	u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1406 	struct ice_vsi *vf_vsi;
1407 	struct device *dev;
1408 	struct ice_pf *pf;
1409 	struct ice_hw *hw;
1410 	u16 vsi_num;
1411 
1412 	pf = vf->pf;
1413 	hw = &pf->hw;
1414 	dev = ice_pf_to_dev(pf);
1415 	vf_vsi = ice_get_vf_vsi(vf);
1416 	if (!vf_vsi) {
1417 		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1418 		return;
1419 	}
1420 
1421 	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1422 
1423 	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1424 	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1425 	switch (hw->mac_type) {
1426 	case ICE_MAC_E830:
1427 		fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1428 		fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1429 		fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1430 		fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1431 		break;
1432 	case ICE_MAC_E810:
1433 	default:
1434 		fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1435 		fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1436 		fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1437 		fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1438 	}
1439 
1440 	dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1441 		vf->vf_id, fd_size_g, fd_size_b);
1442 	dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1443 		vf->vf_id, fd_cnt_g, fd_cnt_b);
1444 }
1445 
1446 /**
1447  * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1448  * @vf: pointer to the VF info
1449  * @ctx: FDIR context info for post processing
1450  * @status: virtchnl FDIR program status
1451  *
1452  * Return: 0 on success, and other on error.
1453  */
1454 static int
1455 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1456 		      enum virtchnl_fdir_prgm_status *status)
1457 {
1458 	struct device *dev = ice_pf_to_dev(vf->pf);
1459 	u32 stat_err, error, prog_id;
1460 	int ret;
1461 
1462 	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1463 	if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1464 	    ICE_FXD_FLTR_WB_QW1_DD_YES) {
1465 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1466 		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1467 		ret = -EINVAL;
1468 		goto err_exit;
1469 	}
1470 
1471 	prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1472 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1473 	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1474 		dev_err(dev, "VF %d: Desc show add, but ctx not",
1475 			vf->vf_id);
1476 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1477 		ret = -EINVAL;
1478 		goto err_exit;
1479 	}
1480 
1481 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1482 	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1483 		dev_err(dev, "VF %d: Desc show del, but ctx not",
1484 			vf->vf_id);
1485 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1486 		ret = -EINVAL;
1487 		goto err_exit;
1488 	}
1489 
1490 	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1491 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1492 		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1493 			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1494 				vf->vf_id);
1495 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1496 		} else {
1497 			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1498 				vf->vf_id);
1499 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1500 		}
1501 		ret = -EINVAL;
1502 		goto err_exit;
1503 	}
1504 
1505 	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1506 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1507 		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1508 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1509 		ret = -EINVAL;
1510 		goto err_exit;
1511 	}
1512 
1513 	*status = VIRTCHNL_FDIR_SUCCESS;
1514 
1515 	return 0;
1516 
1517 err_exit:
1518 	ice_vf_fdir_dump_info(vf);
1519 	return ret;
1520 }
1521 
1522 /**
1523  * ice_vc_add_fdir_fltr_post
1524  * @vf: pointer to the VF structure
1525  * @ctx: FDIR context info for post processing
1526  * @status: virtchnl FDIR program status
1527  * @success: true implies success, false implies failure
1528  *
1529  * Post process for flow director add command. If success, then do post process
1530  * and send back success msg by virtchnl. Otherwise, do context reversion and
1531  * send back failure msg by virtchnl.
1532  *
1533  * Return: 0 on success, and other on error.
1534  */
1535 static int
1536 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1537 			  enum virtchnl_fdir_prgm_status status,
1538 			  bool success)
1539 {
1540 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1541 	struct device *dev = ice_pf_to_dev(vf->pf);
1542 	enum virtchnl_status_code v_ret;
1543 	struct virtchnl_fdir_add *resp;
1544 	int ret, len, is_tun;
1545 
1546 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1547 	len = sizeof(*resp);
1548 	resp = kzalloc(len, GFP_KERNEL);
1549 	if (!resp) {
1550 		len = 0;
1551 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1552 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1553 		goto err_exit;
1554 	}
1555 
1556 	if (!success)
1557 		goto err_exit;
1558 
1559 	is_tun = 0;
1560 	resp->status = status;
1561 	resp->flow_id = conf->flow_id;
1562 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1563 
1564 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1565 				    (u8 *)resp, len);
1566 	kfree(resp);
1567 
1568 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1569 		vf->vf_id, conf->flow_id,
1570 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1571 		"add" : "del");
1572 	return ret;
1573 
1574 err_exit:
1575 	if (resp)
1576 		resp->status = status;
1577 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1578 	devm_kfree(dev, conf);
1579 
1580 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1581 				    (u8 *)resp, len);
1582 	kfree(resp);
1583 	return ret;
1584 }
1585 
1586 /**
1587  * ice_vc_del_fdir_fltr_post
1588  * @vf: pointer to the VF structure
1589  * @ctx: FDIR context info for post processing
1590  * @status: virtchnl FDIR program status
1591  * @success: true implies success, false implies failure
1592  *
1593  * Post process for flow director del command. If success, then do post process
1594  * and send back success msg by virtchnl. Otherwise, do context reversion and
1595  * send back failure msg by virtchnl.
1596  *
1597  * Return: 0 on success, and other on error.
1598  */
1599 static int
1600 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1601 			  enum virtchnl_fdir_prgm_status status,
1602 			  bool success)
1603 {
1604 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1605 	struct device *dev = ice_pf_to_dev(vf->pf);
1606 	enum virtchnl_status_code v_ret;
1607 	struct virtchnl_fdir_del *resp;
1608 	int ret, len, is_tun;
1609 
1610 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1611 	len = sizeof(*resp);
1612 	resp = kzalloc(len, GFP_KERNEL);
1613 	if (!resp) {
1614 		len = 0;
1615 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1616 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1617 		goto err_exit;
1618 	}
1619 
1620 	if (!success)
1621 		goto err_exit;
1622 
1623 	is_tun = 0;
1624 	resp->status = status;
1625 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1626 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1627 
1628 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1629 				    (u8 *)resp, len);
1630 	kfree(resp);
1631 
1632 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1633 		vf->vf_id, conf->flow_id,
1634 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1635 		"add" : "del");
1636 	devm_kfree(dev, conf);
1637 	return ret;
1638 
1639 err_exit:
1640 	if (resp)
1641 		resp->status = status;
1642 	if (success)
1643 		devm_kfree(dev, conf);
1644 
1645 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1646 				    (u8 *)resp, len);
1647 	kfree(resp);
1648 	return ret;
1649 }
1650 
1651 /**
1652  * ice_flush_fdir_ctx
1653  * @pf: pointer to the PF structure
1654  *
1655  * Flush all the pending event on ctx_done list and process them.
1656  */
1657 void ice_flush_fdir_ctx(struct ice_pf *pf)
1658 {
1659 	struct ice_vf *vf;
1660 	unsigned int bkt;
1661 
1662 	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1663 		return;
1664 
1665 	mutex_lock(&pf->vfs.table_lock);
1666 	ice_for_each_vf(pf, bkt, vf) {
1667 		struct device *dev = ice_pf_to_dev(pf);
1668 		enum virtchnl_fdir_prgm_status status;
1669 		struct ice_vf_fdir_ctx *ctx;
1670 		unsigned long flags;
1671 		int ret;
1672 
1673 		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1674 			continue;
1675 
1676 		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1677 			continue;
1678 
1679 		ctx = &vf->fdir.ctx_done;
1680 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1681 		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1682 			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1683 			continue;
1684 		}
1685 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1686 
1687 		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1688 		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1689 			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1690 			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1691 				vf->vf_id);
1692 			goto err_exit;
1693 		}
1694 
1695 		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1696 		if (ret)
1697 			goto err_exit;
1698 
1699 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1700 			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1701 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1702 			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1703 		else
1704 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1705 
1706 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1707 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1708 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1709 		continue;
1710 err_exit:
1711 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1712 			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1713 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1714 			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1715 		else
1716 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1717 
1718 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1719 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1720 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1721 	}
1722 	mutex_unlock(&pf->vfs.table_lock);
1723 }
1724 
1725 /**
1726  * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1727  * @vf: pointer to the VF structure
1728  * @conf: FDIR configuration for each filter
1729  * @v_opcode: virtual channel operation code
1730  *
1731  * Return: 0 on success, and other on error.
1732  */
1733 static int
1734 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1735 			enum virtchnl_ops v_opcode)
1736 {
1737 	struct device *dev = ice_pf_to_dev(vf->pf);
1738 	struct ice_vf_fdir_ctx *ctx;
1739 	unsigned long flags;
1740 
1741 	ctx = &vf->fdir.ctx_irq;
1742 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1743 	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1744 	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1745 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1746 		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1747 			vf->vf_id);
1748 		return -EBUSY;
1749 	}
1750 	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1751 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1752 
1753 	ctx->conf = conf;
1754 	ctx->v_opcode = v_opcode;
1755 	ctx->stat = ICE_FDIR_CTX_READY;
1756 	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1757 
1758 	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1759 
1760 	return 0;
1761 }
1762 
1763 /**
1764  * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1765  * @vf: pointer to the VF structure
1766  *
1767  * Return: 0 on success, and other on error.
1768  */
1769 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1770 {
1771 	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1772 	unsigned long flags;
1773 
1774 	del_timer(&ctx->rx_tmr);
1775 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1776 	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1777 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1778 }
1779 
1780 /**
1781  * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1782  * @vf: pointer to the VF info
1783  * @msg: pointer to the msg buffer
1784  *
1785  * Return: 0 on success, and other on error.
1786  */
1787 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1788 {
1789 	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1790 	struct virtchnl_fdir_add *stat = NULL;
1791 	struct virtchnl_fdir_fltr_conf *conf;
1792 	enum virtchnl_status_code v_ret;
1793 	struct device *dev;
1794 	struct ice_pf *pf;
1795 	int is_tun = 0;
1796 	int len = 0;
1797 	int ret;
1798 
1799 	pf = vf->pf;
1800 	dev = ice_pf_to_dev(pf);
1801 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1802 	if (ret) {
1803 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1804 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1805 		goto err_exit;
1806 	}
1807 
1808 	ret = ice_vf_start_ctrl_vsi(vf);
1809 	if (ret && (ret != -EEXIST)) {
1810 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1811 		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1812 			vf->vf_id, ret);
1813 		goto err_exit;
1814 	}
1815 
1816 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1817 	if (!stat) {
1818 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1819 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1820 		goto err_exit;
1821 	}
1822 
1823 	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1824 	if (!conf) {
1825 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1826 		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1827 		goto err_exit;
1828 	}
1829 
1830 	len = sizeof(*stat);
1831 	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1832 	if (ret) {
1833 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1834 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1835 		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1836 		goto err_free_conf;
1837 	}
1838 
1839 	if (fltr->validate_only) {
1840 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1841 		stat->status = VIRTCHNL_FDIR_SUCCESS;
1842 		devm_kfree(dev, conf);
1843 		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1844 					    v_ret, (u8 *)stat, len);
1845 		goto exit;
1846 	}
1847 
1848 	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1849 	if (ret) {
1850 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1851 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1852 		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1853 			vf->vf_id, ret);
1854 		goto err_free_conf;
1855 	}
1856 
1857 	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1858 	if (ret) {
1859 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1860 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1861 		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1862 			vf->vf_id);
1863 		goto err_free_conf;
1864 	}
1865 
1866 	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1867 	if (ret) {
1868 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1869 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1870 		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1871 		goto err_free_conf;
1872 	}
1873 
1874 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1875 	if (ret) {
1876 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1877 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1878 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1879 		goto err_rem_entry;
1880 	}
1881 
1882 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1883 	if (ret) {
1884 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1885 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1886 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1887 			vf->vf_id, ret);
1888 		goto err_clr_irq;
1889 	}
1890 
1891 exit:
1892 	kfree(stat);
1893 	return ret;
1894 
1895 err_clr_irq:
1896 	ice_vc_fdir_clear_irq_ctx(vf);
1897 err_rem_entry:
1898 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1899 err_free_conf:
1900 	devm_kfree(dev, conf);
1901 err_exit:
1902 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1903 				    (u8 *)stat, len);
1904 	kfree(stat);
1905 	return ret;
1906 }
1907 
1908 /**
1909  * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1910  * @vf: pointer to the VF info
1911  * @msg: pointer to the msg buffer
1912  *
1913  * Return: 0 on success, and other on error.
1914  */
1915 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1916 {
1917 	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1918 	struct virtchnl_fdir_del *stat = NULL;
1919 	struct virtchnl_fdir_fltr_conf *conf;
1920 	enum virtchnl_status_code v_ret;
1921 	struct device *dev;
1922 	struct ice_pf *pf;
1923 	int is_tun = 0;
1924 	int len = 0;
1925 	int ret;
1926 
1927 	pf = vf->pf;
1928 	dev = ice_pf_to_dev(pf);
1929 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1930 	if (ret) {
1931 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1932 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1933 		goto err_exit;
1934 	}
1935 
1936 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1937 	if (!stat) {
1938 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1939 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1940 		goto err_exit;
1941 	}
1942 
1943 	len = sizeof(*stat);
1944 
1945 	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1946 	if (!conf) {
1947 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1948 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1949 		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1950 			vf->vf_id, fltr->flow_id);
1951 		goto err_exit;
1952 	}
1953 
1954 	/* Just return failure when ctrl_vsi idx is invalid */
1955 	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1956 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1957 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1958 		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1959 		goto err_exit;
1960 	}
1961 
1962 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1963 	if (ret) {
1964 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1965 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1966 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1967 		goto err_exit;
1968 	}
1969 
1970 	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1971 	if (ret) {
1972 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1973 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1974 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1975 			vf->vf_id, ret);
1976 		goto err_del_tmr;
1977 	}
1978 
1979 	kfree(stat);
1980 
1981 	return ret;
1982 
1983 err_del_tmr:
1984 	ice_vc_fdir_clear_irq_ctx(vf);
1985 err_exit:
1986 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1987 				    (u8 *)stat, len);
1988 	kfree(stat);
1989 	return ret;
1990 }
1991 
1992 /**
1993  * ice_vf_fdir_init - init FDIR resource for VF
1994  * @vf: pointer to the VF info
1995  */
1996 void ice_vf_fdir_init(struct ice_vf *vf)
1997 {
1998 	struct ice_vf_fdir *fdir = &vf->fdir;
1999 
2000 	idr_init(&fdir->fdir_rule_idr);
2001 	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2002 
2003 	spin_lock_init(&fdir->ctx_lock);
2004 	fdir->ctx_irq.flags = 0;
2005 	fdir->ctx_done.flags = 0;
2006 	ice_vc_fdir_reset_cnt_all(fdir);
2007 }
2008 
2009 /**
2010  * ice_vf_fdir_exit - destroy FDIR resource for VF
2011  * @vf: pointer to the VF info
2012  */
2013 void ice_vf_fdir_exit(struct ice_vf *vf)
2014 {
2015 	ice_vc_fdir_flush_entry(vf);
2016 	idr_destroy(&vf->fdir.fdir_rule_idr);
2017 	ice_vc_fdir_rem_prof_all(vf);
2018 	ice_vc_fdir_free_prof_all(vf);
2019 }
2020