xref: /linux/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c (revision 2b0cfa6e49566c8fa6759734cf821aa6e8271a9e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2023, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_flow.h"
8 #include "ice_vf_lib_private.h"
9 
10 #define to_fltr_conf_from_desc(p) \
11 	container_of(p, struct virtchnl_fdir_fltr_conf, input)
12 
13 #define GTPU_TEID_OFFSET 4
14 #define GTPU_EH_QFI_OFFSET 1
15 #define GTPU_EH_QFI_MASK 0x3F
16 #define PFCP_S_OFFSET 0
17 #define PFCP_S_MASK 0x1
18 #define PFCP_PORT_NR 8805
19 
20 #define FDIR_INSET_FLAG_ESP_S 0
21 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
22 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
23 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
24 
25 enum ice_fdir_tunnel_type {
26 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
27 	ICE_FDIR_TUNNEL_TYPE_GTPU,
28 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
29 };
30 
31 struct virtchnl_fdir_fltr_conf {
32 	struct ice_fdir_fltr input;
33 	enum ice_fdir_tunnel_type ttype;
34 	u64 inset_flag;
35 	u32 flow_id;
36 };
37 
38 struct virtchnl_fdir_inset_map {
39 	enum virtchnl_proto_hdr_field field;
40 	enum ice_flow_field fld;
41 	u64 flag;
42 	u64 mask;
43 };
44 
45 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
46 	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
47 	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
48 	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
49 	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
50 	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
51 	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
52 	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
53 	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
54 	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
55 	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
56 	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
57 	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
58 	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
59 	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
60 	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
61 	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
62 	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
63 	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
64 	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
65 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
66 		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
67 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
68 		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
69 	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
70 	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
71 	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72 };
73 
74 /**
75  * ice_vc_fdir_param_check
76  * @vf: pointer to the VF structure
77  * @vsi_id: VF relative VSI ID
78  *
79  * Check for the valid VSI ID, PF's state and VF's state
80  *
81  * Return: 0 on success, and -EINVAL on error.
82  */
83 static int
84 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
85 {
86 	struct ice_pf *pf = vf->pf;
87 
88 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
89 		return -EINVAL;
90 
91 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
92 		return -EINVAL;
93 
94 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
95 		return -EINVAL;
96 
97 	if (vsi_id != vf->lan_vsi_num)
98 		return -EINVAL;
99 
100 	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
101 		return -EINVAL;
102 
103 	if (!ice_get_vf_vsi(vf))
104 		return -EINVAL;
105 
106 	return 0;
107 }
108 
109 /**
110  * ice_vf_start_ctrl_vsi
111  * @vf: pointer to the VF structure
112  *
113  * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
114  *
115  * Return: 0 on success, and other on error.
116  */
117 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
118 {
119 	struct ice_pf *pf = vf->pf;
120 	struct ice_vsi *ctrl_vsi;
121 	struct device *dev;
122 	int err;
123 
124 	dev = ice_pf_to_dev(pf);
125 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
126 		return -EEXIST;
127 
128 	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
129 	if (!ctrl_vsi) {
130 		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
131 			vf->vf_id);
132 		return -ENOMEM;
133 	}
134 
135 	err = ice_vsi_open_ctrl(ctrl_vsi);
136 	if (err) {
137 		dev_dbg(dev, "Could not open control VSI for VF %d\n",
138 			vf->vf_id);
139 		goto err_vsi_open;
140 	}
141 
142 	return 0;
143 
144 err_vsi_open:
145 	ice_vsi_release(ctrl_vsi);
146 	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
147 		pf->vsi[vf->ctrl_vsi_idx] = NULL;
148 		vf->ctrl_vsi_idx = ICE_NO_VSI;
149 	}
150 	return err;
151 }
152 
153 /**
154  * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
155  * @vf: pointer to the VF structure
156  * @flow: filter flow type
157  *
158  * Return: 0 on success, and other on error.
159  */
160 static int
161 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
162 {
163 	struct ice_vf_fdir *fdir = &vf->fdir;
164 
165 	if (!fdir->fdir_prof) {
166 		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
167 					       ICE_FLTR_PTYPE_MAX,
168 					       sizeof(*fdir->fdir_prof),
169 					       GFP_KERNEL);
170 		if (!fdir->fdir_prof)
171 			return -ENOMEM;
172 	}
173 
174 	if (!fdir->fdir_prof[flow]) {
175 		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
176 						     sizeof(**fdir->fdir_prof),
177 						     GFP_KERNEL);
178 		if (!fdir->fdir_prof[flow])
179 			return -ENOMEM;
180 	}
181 
182 	return 0;
183 }
184 
185 /**
186  * ice_vc_fdir_free_prof - free profile for this filter flow type
187  * @vf: pointer to the VF structure
188  * @flow: filter flow type
189  */
190 static void
191 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
192 {
193 	struct ice_vf_fdir *fdir = &vf->fdir;
194 
195 	if (!fdir->fdir_prof)
196 		return;
197 
198 	if (!fdir->fdir_prof[flow])
199 		return;
200 
201 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
202 	fdir->fdir_prof[flow] = NULL;
203 }
204 
205 /**
206  * ice_vc_fdir_free_prof_all - free all the profile for this VF
207  * @vf: pointer to the VF structure
208  */
209 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
210 {
211 	struct ice_vf_fdir *fdir = &vf->fdir;
212 	enum ice_fltr_ptype flow;
213 
214 	if (!fdir->fdir_prof)
215 		return;
216 
217 	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
218 		ice_vc_fdir_free_prof(vf, flow);
219 
220 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
221 	fdir->fdir_prof = NULL;
222 }
223 
224 /**
225  * ice_vc_fdir_parse_flow_fld
226  * @proto_hdr: virtual channel protocol filter header
227  * @conf: FDIR configuration for each filter
228  * @fld: field type array
229  * @fld_cnt: field counter
230  *
231  * Parse the virtual channel filter header and store them into field type array
232  *
233  * Return: 0 on success, and other on error.
234  */
235 static int
236 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
237 			   struct virtchnl_fdir_fltr_conf *conf,
238 			   enum ice_flow_field *fld, int *fld_cnt)
239 {
240 	struct virtchnl_proto_hdr hdr;
241 	u32 i;
242 
243 	memcpy(&hdr, proto_hdr, sizeof(hdr));
244 
245 	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
246 	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
247 		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
248 			if (fdir_inset_map[i].mask &&
249 			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
250 			     fdir_inset_map[i].flag))
251 				continue;
252 
253 			fld[*fld_cnt] = fdir_inset_map[i].fld;
254 			*fld_cnt += 1;
255 			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
256 				return -EINVAL;
257 			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
258 						     fdir_inset_map[i].field);
259 		}
260 
261 	return 0;
262 }
263 
264 /**
265  * ice_vc_fdir_set_flow_fld
266  * @vf: pointer to the VF structure
267  * @fltr: virtual channel add cmd buffer
268  * @conf: FDIR configuration for each filter
269  * @seg: array of one or more packet segments that describe the flow
270  *
271  * Parse the virtual channel add msg buffer's field vector and store them into
272  * flow's packet segment field
273  *
274  * Return: 0 on success, and other on error.
275  */
276 static int
277 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
278 			 struct virtchnl_fdir_fltr_conf *conf,
279 			 struct ice_flow_seg_info *seg)
280 {
281 	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
282 	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
283 	struct device *dev = ice_pf_to_dev(vf->pf);
284 	struct virtchnl_proto_hdrs *proto;
285 	int fld_cnt = 0;
286 	int i;
287 
288 	proto = &rule->proto_hdrs;
289 	for (i = 0; i < proto->count; i++) {
290 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
291 		int ret;
292 
293 		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
294 		if (ret)
295 			return ret;
296 	}
297 
298 	if (fld_cnt == 0) {
299 		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
300 		return -EINVAL;
301 	}
302 
303 	for (i = 0; i < fld_cnt; i++)
304 		ice_flow_set_fld(seg, fld[i],
305 				 ICE_FLOW_FLD_OFF_INVAL,
306 				 ICE_FLOW_FLD_OFF_INVAL,
307 				 ICE_FLOW_FLD_OFF_INVAL, false);
308 
309 	return 0;
310 }
311 
312 /**
313  * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
314  * @vf: pointer to the VF structure
315  * @conf: FDIR configuration for each filter
316  * @seg: array of one or more packet segments that describe the flow
317  *
318  * Return: 0 on success, and other on error.
319  */
320 static int
321 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
322 			 struct virtchnl_fdir_fltr_conf *conf,
323 			 struct ice_flow_seg_info *seg)
324 {
325 	enum ice_fltr_ptype flow = conf->input.flow_type;
326 	enum ice_fdir_tunnel_type ttype = conf->ttype;
327 	struct device *dev = ice_pf_to_dev(vf->pf);
328 
329 	switch (flow) {
330 	case ICE_FLTR_PTYPE_NON_IP_L2:
331 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
332 		break;
333 	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
334 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
335 				  ICE_FLOW_SEG_HDR_IPV4 |
336 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
337 		break;
338 	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
339 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
340 				  ICE_FLOW_SEG_HDR_IPV4 |
341 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
342 		break;
343 	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
344 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
345 				  ICE_FLOW_SEG_HDR_IPV4 |
346 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
347 		break;
348 	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
349 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
350 				  ICE_FLOW_SEG_HDR_IPV4 |
351 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
352 		break;
353 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
354 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
355 				  ICE_FLOW_SEG_HDR_IPV4 |
356 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
357 		break;
358 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
359 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
360 				  ICE_FLOW_SEG_HDR_IPV4 |
361 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
362 		break;
363 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
364 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
365 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
366 		break;
367 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
368 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
369 				  ICE_FLOW_SEG_HDR_IPV4 |
370 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
371 		break;
372 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
373 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
374 				  ICE_FLOW_SEG_HDR_IPV4 |
375 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
376 		break;
377 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
378 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
379 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
380 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
381 		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
382 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
383 					  ICE_FLOW_SEG_HDR_IPV4 |
384 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
385 		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
386 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
387 					  ICE_FLOW_SEG_HDR_GTPU_IP |
388 					  ICE_FLOW_SEG_HDR_IPV4 |
389 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
390 		} else {
391 			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
392 				flow, vf->vf_id);
393 			return -EINVAL;
394 		}
395 		break;
396 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
397 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
398 				  ICE_FLOW_SEG_HDR_IPV4 |
399 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
400 		break;
401 	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
402 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
403 				  ICE_FLOW_SEG_HDR_IPV6 |
404 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
405 		break;
406 	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
407 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
408 				  ICE_FLOW_SEG_HDR_IPV6 |
409 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
410 		break;
411 	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
412 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
413 				  ICE_FLOW_SEG_HDR_IPV6 |
414 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
415 		break;
416 	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
417 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
418 				  ICE_FLOW_SEG_HDR_IPV6 |
419 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
420 		break;
421 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
422 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
423 				  ICE_FLOW_SEG_HDR_IPV6 |
424 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
425 		break;
426 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
427 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
428 				  ICE_FLOW_SEG_HDR_IPV6 |
429 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
430 		break;
431 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
432 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
433 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
434 		break;
435 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
436 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
437 				  ICE_FLOW_SEG_HDR_IPV6 |
438 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
439 		break;
440 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
441 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
442 				  ICE_FLOW_SEG_HDR_IPV6 |
443 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
444 		break;
445 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
446 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
447 				  ICE_FLOW_SEG_HDR_IPV6 |
448 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
449 		break;
450 	default:
451 		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
452 			flow, vf->vf_id);
453 		return -EINVAL;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * ice_vc_fdir_rem_prof - remove profile for this filter flow type
461  * @vf: pointer to the VF structure
462  * @flow: filter flow type
463  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
464  */
465 static void
466 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
467 {
468 	struct ice_vf_fdir *fdir = &vf->fdir;
469 	struct ice_fd_hw_prof *vf_prof;
470 	struct ice_pf *pf = vf->pf;
471 	struct ice_vsi *vf_vsi;
472 	struct device *dev;
473 	struct ice_hw *hw;
474 	u64 prof_id;
475 	int i;
476 
477 	dev = ice_pf_to_dev(pf);
478 	hw = &pf->hw;
479 	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
480 		return;
481 
482 	vf_prof = fdir->fdir_prof[flow];
483 	prof_id = vf_prof->prof_id[tun];
484 
485 	vf_vsi = ice_get_vf_vsi(vf);
486 	if (!vf_vsi) {
487 		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
488 		return;
489 	}
490 
491 	if (!fdir->prof_entry_cnt[flow][tun])
492 		return;
493 
494 	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
495 		if (vf_prof->entry_h[i][tun]) {
496 			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
497 
498 			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
499 			ice_flow_rem_entry(hw, ICE_BLK_FD,
500 					   vf_prof->entry_h[i][tun]);
501 			vf_prof->entry_h[i][tun] = 0;
502 		}
503 
504 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
505 	devm_kfree(dev, vf_prof->fdir_seg[tun]);
506 	vf_prof->fdir_seg[tun] = NULL;
507 
508 	for (i = 0; i < vf_prof->cnt; i++)
509 		vf_prof->vsi_h[i] = 0;
510 
511 	fdir->prof_entry_cnt[flow][tun] = 0;
512 }
513 
514 /**
515  * ice_vc_fdir_rem_prof_all - remove profile for this VF
516  * @vf: pointer to the VF structure
517  */
518 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
519 {
520 	enum ice_fltr_ptype flow;
521 
522 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
523 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
524 		ice_vc_fdir_rem_prof(vf, flow, 0);
525 		ice_vc_fdir_rem_prof(vf, flow, 1);
526 	}
527 }
528 
529 /**
530  * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
531  * @fdir: pointer to the VF FDIR structure
532  */
533 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
534 {
535 	enum ice_fltr_ptype flow;
536 
537 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
538 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
539 		fdir->fdir_fltr_cnt[flow][0] = 0;
540 		fdir->fdir_fltr_cnt[flow][1] = 0;
541 	}
542 }
543 
544 /**
545  * ice_vc_fdir_has_prof_conflict
546  * @vf: pointer to the VF structure
547  * @conf: FDIR configuration for each filter
548  *
549  * Check if @conf has conflicting profile with existing profiles
550  *
551  * Return: true on success, and false on error.
552  */
553 static bool
554 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
555 			      struct virtchnl_fdir_fltr_conf *conf)
556 {
557 	struct ice_fdir_fltr *desc;
558 
559 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
560 		struct virtchnl_fdir_fltr_conf *existing_conf;
561 		enum ice_fltr_ptype flow_type_a, flow_type_b;
562 		struct ice_fdir_fltr *a, *b;
563 
564 		existing_conf = to_fltr_conf_from_desc(desc);
565 		a = &existing_conf->input;
566 		b = &conf->input;
567 		flow_type_a = a->flow_type;
568 		flow_type_b = b->flow_type;
569 
570 		/* No need to compare two rules with different tunnel types or
571 		 * with the same protocol type.
572 		 */
573 		if (existing_conf->ttype != conf->ttype ||
574 		    flow_type_a == flow_type_b)
575 			continue;
576 
577 		switch (flow_type_a) {
578 		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
579 		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
580 		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
581 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
582 				return true;
583 			break;
584 		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
585 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
586 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
587 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
588 				return true;
589 			break;
590 		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
591 		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
592 		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
593 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
594 				return true;
595 			break;
596 		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
597 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
598 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
599 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
600 				return true;
601 			break;
602 		default:
603 			break;
604 		}
605 	}
606 
607 	return false;
608 }
609 
610 /**
611  * ice_vc_fdir_write_flow_prof
612  * @vf: pointer to the VF structure
613  * @flow: filter flow type
614  * @seg: array of one or more packet segments that describe the flow
615  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
616  *
617  * Write the flow's profile config and packet segment into the hardware
618  *
619  * Return: 0 on success, and other on error.
620  */
621 static int
622 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
623 			    struct ice_flow_seg_info *seg, int tun)
624 {
625 	struct ice_vf_fdir *fdir = &vf->fdir;
626 	struct ice_vsi *vf_vsi, *ctrl_vsi;
627 	struct ice_flow_seg_info *old_seg;
628 	struct ice_flow_prof *prof = NULL;
629 	struct ice_fd_hw_prof *vf_prof;
630 	struct device *dev;
631 	struct ice_pf *pf;
632 	struct ice_hw *hw;
633 	u64 entry1_h = 0;
634 	u64 entry2_h = 0;
635 	int ret;
636 
637 	pf = vf->pf;
638 	dev = ice_pf_to_dev(pf);
639 	hw = &pf->hw;
640 	vf_vsi = ice_get_vf_vsi(vf);
641 	if (!vf_vsi)
642 		return -EINVAL;
643 
644 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
645 	if (!ctrl_vsi)
646 		return -EINVAL;
647 
648 	vf_prof = fdir->fdir_prof[flow];
649 	old_seg = vf_prof->fdir_seg[tun];
650 	if (old_seg) {
651 		if (!memcmp(old_seg, seg, sizeof(*seg))) {
652 			dev_dbg(dev, "Duplicated profile for VF %d!\n",
653 				vf->vf_id);
654 			return -EEXIST;
655 		}
656 
657 		if (fdir->fdir_fltr_cnt[flow][tun]) {
658 			ret = -EINVAL;
659 			dev_dbg(dev, "Input set conflicts for VF %d\n",
660 				vf->vf_id);
661 			goto err_exit;
662 		}
663 
664 		/* remove previously allocated profile */
665 		ice_vc_fdir_rem_prof(vf, flow, tun);
666 	}
667 
668 	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
669 				tun + 1, false, &prof);
670 	if (ret) {
671 		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
672 			flow, vf->vf_id);
673 		goto err_exit;
674 	}
675 
676 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
677 				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
678 				 seg, &entry1_h);
679 	if (ret) {
680 		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
681 			flow, vf->vf_id);
682 		goto err_prof;
683 	}
684 
685 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
686 				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
687 				 seg, &entry2_h);
688 	if (ret) {
689 		dev_dbg(dev,
690 			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
691 			flow, vf->vf_id);
692 		goto err_entry_1;
693 	}
694 
695 	vf_prof->fdir_seg[tun] = seg;
696 	vf_prof->cnt = 0;
697 	fdir->prof_entry_cnt[flow][tun] = 0;
698 
699 	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
700 	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
701 	vf_prof->cnt++;
702 	fdir->prof_entry_cnt[flow][tun]++;
703 
704 	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
705 	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
706 	vf_prof->cnt++;
707 	fdir->prof_entry_cnt[flow][tun]++;
708 
709 	vf_prof->prof_id[tun] = prof->id;
710 
711 	return 0;
712 
713 err_entry_1:
714 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
715 			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
716 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
717 err_prof:
718 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
719 err_exit:
720 	return ret;
721 }
722 
723 /**
724  * ice_vc_fdir_config_input_set
725  * @vf: pointer to the VF structure
726  * @fltr: virtual channel add cmd buffer
727  * @conf: FDIR configuration for each filter
728  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
729  *
730  * Config the input set type and value for virtual channel add msg buffer
731  *
732  * Return: 0 on success, and other on error.
733  */
734 static int
735 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
736 			     struct virtchnl_fdir_fltr_conf *conf, int tun)
737 {
738 	struct ice_fdir_fltr *input = &conf->input;
739 	struct device *dev = ice_pf_to_dev(vf->pf);
740 	struct ice_flow_seg_info *seg;
741 	enum ice_fltr_ptype flow;
742 	int ret;
743 
744 	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
745 	if (ret) {
746 		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
747 			vf->vf_id);
748 		return ret;
749 	}
750 
751 	flow = input->flow_type;
752 	ret = ice_vc_fdir_alloc_prof(vf, flow);
753 	if (ret) {
754 		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
755 		return ret;
756 	}
757 
758 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
759 	if (!seg)
760 		return -ENOMEM;
761 
762 	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
763 	if (ret) {
764 		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
765 		goto err_exit;
766 	}
767 
768 	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
769 	if (ret) {
770 		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
771 		goto err_exit;
772 	}
773 
774 	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
775 	if (ret == -EEXIST) {
776 		devm_kfree(dev, seg);
777 	} else if (ret) {
778 		dev_dbg(dev, "Write flow profile for VF %d failed\n",
779 			vf->vf_id);
780 		goto err_exit;
781 	}
782 
783 	return 0;
784 
785 err_exit:
786 	devm_kfree(dev, seg);
787 	return ret;
788 }
789 
790 /**
791  * ice_vc_fdir_parse_pattern
792  * @vf: pointer to the VF info
793  * @fltr: virtual channel add cmd buffer
794  * @conf: FDIR configuration for each filter
795  *
796  * Parse the virtual channel filter's pattern and store them into conf
797  *
798  * Return: 0 on success, and other on error.
799  */
800 static int
801 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
802 			  struct virtchnl_fdir_fltr_conf *conf)
803 {
804 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
805 	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
806 	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
807 	struct device *dev = ice_pf_to_dev(vf->pf);
808 	struct ice_fdir_fltr *input = &conf->input;
809 	int i;
810 
811 	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
812 		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
813 			proto->count, vf->vf_id);
814 		return -EINVAL;
815 	}
816 
817 	for (i = 0; i < proto->count; i++) {
818 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
819 		struct ip_esp_hdr *esph;
820 		struct ip_auth_hdr *ah;
821 		struct sctphdr *sctph;
822 		struct ipv6hdr *ip6h;
823 		struct udphdr *udph;
824 		struct tcphdr *tcph;
825 		struct ethhdr *eth;
826 		struct iphdr *iph;
827 		u8 s_field;
828 		u8 *rawh;
829 
830 		switch (hdr->type) {
831 		case VIRTCHNL_PROTO_HDR_ETH:
832 			eth = (struct ethhdr *)hdr->buffer;
833 			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
834 
835 			if (hdr->field_selector)
836 				input->ext_data.ether_type = eth->h_proto;
837 			break;
838 		case VIRTCHNL_PROTO_HDR_IPV4:
839 			iph = (struct iphdr *)hdr->buffer;
840 			l3 = VIRTCHNL_PROTO_HDR_IPV4;
841 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
842 
843 			if (hdr->field_selector) {
844 				input->ip.v4.src_ip = iph->saddr;
845 				input->ip.v4.dst_ip = iph->daddr;
846 				input->ip.v4.tos = iph->tos;
847 				input->ip.v4.proto = iph->protocol;
848 			}
849 			break;
850 		case VIRTCHNL_PROTO_HDR_IPV6:
851 			ip6h = (struct ipv6hdr *)hdr->buffer;
852 			l3 = VIRTCHNL_PROTO_HDR_IPV6;
853 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
854 
855 			if (hdr->field_selector) {
856 				memcpy(input->ip.v6.src_ip,
857 				       ip6h->saddr.in6_u.u6_addr8,
858 				       sizeof(ip6h->saddr));
859 				memcpy(input->ip.v6.dst_ip,
860 				       ip6h->daddr.in6_u.u6_addr8,
861 				       sizeof(ip6h->daddr));
862 				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
863 						  (ip6h->flow_lbl[0] >> 4);
864 				input->ip.v6.proto = ip6h->nexthdr;
865 			}
866 			break;
867 		case VIRTCHNL_PROTO_HDR_TCP:
868 			tcph = (struct tcphdr *)hdr->buffer;
869 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
870 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
871 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
872 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
873 
874 			if (hdr->field_selector) {
875 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
876 					input->ip.v4.src_port = tcph->source;
877 					input->ip.v4.dst_port = tcph->dest;
878 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
879 					input->ip.v6.src_port = tcph->source;
880 					input->ip.v6.dst_port = tcph->dest;
881 				}
882 			}
883 			break;
884 		case VIRTCHNL_PROTO_HDR_UDP:
885 			udph = (struct udphdr *)hdr->buffer;
886 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
887 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
888 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
889 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
890 
891 			if (hdr->field_selector) {
892 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
893 					input->ip.v4.src_port = udph->source;
894 					input->ip.v4.dst_port = udph->dest;
895 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
896 					input->ip.v6.src_port = udph->source;
897 					input->ip.v6.dst_port = udph->dest;
898 				}
899 			}
900 			break;
901 		case VIRTCHNL_PROTO_HDR_SCTP:
902 			sctph = (struct sctphdr *)hdr->buffer;
903 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
904 				input->flow_type =
905 					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
906 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
907 				input->flow_type =
908 					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
909 
910 			if (hdr->field_selector) {
911 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
912 					input->ip.v4.src_port = sctph->source;
913 					input->ip.v4.dst_port = sctph->dest;
914 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
915 					input->ip.v6.src_port = sctph->source;
916 					input->ip.v6.dst_port = sctph->dest;
917 				}
918 			}
919 			break;
920 		case VIRTCHNL_PROTO_HDR_L2TPV3:
921 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
922 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
923 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
924 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
925 
926 			if (hdr->field_selector)
927 				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
928 			break;
929 		case VIRTCHNL_PROTO_HDR_ESP:
930 			esph = (struct ip_esp_hdr *)hdr->buffer;
931 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
932 			    l4 == VIRTCHNL_PROTO_HDR_UDP)
933 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
934 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
935 				 l4 == VIRTCHNL_PROTO_HDR_UDP)
936 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
937 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
938 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
939 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
940 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
941 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
942 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
943 
944 			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
945 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
946 			else
947 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
948 
949 			if (hdr->field_selector) {
950 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
951 					input->ip.v4.sec_parm_idx = esph->spi;
952 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
953 					input->ip.v6.sec_parm_idx = esph->spi;
954 			}
955 			break;
956 		case VIRTCHNL_PROTO_HDR_AH:
957 			ah = (struct ip_auth_hdr *)hdr->buffer;
958 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
959 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
960 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
961 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
962 
963 			if (hdr->field_selector) {
964 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
965 					input->ip.v4.sec_parm_idx = ah->spi;
966 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
967 					input->ip.v6.sec_parm_idx = ah->spi;
968 			}
969 			break;
970 		case VIRTCHNL_PROTO_HDR_PFCP:
971 			rawh = (u8 *)hdr->buffer;
972 			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
973 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
974 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
975 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
976 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
977 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
978 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
979 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
980 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
981 
982 			if (hdr->field_selector) {
983 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
984 					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
985 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
986 					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
987 			}
988 			break;
989 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
990 			rawh = (u8 *)hdr->buffer;
991 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
992 
993 			if (hdr->field_selector)
994 				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
995 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
996 			break;
997 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
998 			rawh = (u8 *)hdr->buffer;
999 
1000 			if (hdr->field_selector)
1001 				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1002 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1003 			break;
1004 		default:
1005 			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1006 				hdr->type, vf->vf_id);
1007 			return -EINVAL;
1008 		}
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 /**
1015  * ice_vc_fdir_parse_action
1016  * @vf: pointer to the VF info
1017  * @fltr: virtual channel add cmd buffer
1018  * @conf: FDIR configuration for each filter
1019  *
1020  * Parse the virtual channel filter's action and store them into conf
1021  *
1022  * Return: 0 on success, and other on error.
1023  */
1024 static int
1025 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1026 			 struct virtchnl_fdir_fltr_conf *conf)
1027 {
1028 	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1029 	struct device *dev = ice_pf_to_dev(vf->pf);
1030 	struct ice_fdir_fltr *input = &conf->input;
1031 	u32 dest_num = 0;
1032 	u32 mark_num = 0;
1033 	int i;
1034 
1035 	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1036 		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1037 			as->count, vf->vf_id);
1038 		return -EINVAL;
1039 	}
1040 
1041 	for (i = 0; i < as->count; i++) {
1042 		struct virtchnl_filter_action *action = &as->actions[i];
1043 
1044 		switch (action->type) {
1045 		case VIRTCHNL_ACTION_PASSTHRU:
1046 			dest_num++;
1047 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1048 			break;
1049 		case VIRTCHNL_ACTION_DROP:
1050 			dest_num++;
1051 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1052 			break;
1053 		case VIRTCHNL_ACTION_QUEUE:
1054 			dest_num++;
1055 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1056 			input->q_index = action->act_conf.queue.index;
1057 			break;
1058 		case VIRTCHNL_ACTION_Q_REGION:
1059 			dest_num++;
1060 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1061 			input->q_index = action->act_conf.queue.index;
1062 			input->q_region = action->act_conf.queue.region;
1063 			break;
1064 		case VIRTCHNL_ACTION_MARK:
1065 			mark_num++;
1066 			input->fltr_id = action->act_conf.mark_id;
1067 			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1068 			break;
1069 		default:
1070 			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1071 				action->type, vf->vf_id);
1072 			return -EINVAL;
1073 		}
1074 	}
1075 
1076 	if (dest_num == 0 || dest_num >= 2) {
1077 		dev_dbg(dev, "Invalid destination action for VF %d\n",
1078 			vf->vf_id);
1079 		return -EINVAL;
1080 	}
1081 
1082 	if (mark_num >= 2) {
1083 		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1084 		return -EINVAL;
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 /**
1091  * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1092  * @vf: pointer to the VF info
1093  * @fltr: virtual channel add cmd buffer
1094  * @conf: FDIR configuration for each filter
1095  *
1096  * Return: 0 on success, and other on error.
1097  */
1098 static int
1099 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1100 			  struct virtchnl_fdir_fltr_conf *conf)
1101 {
1102 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1103 	int ret;
1104 
1105 	if (!ice_vc_validate_pattern(vf, proto))
1106 		return -EINVAL;
1107 
1108 	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1109 	if (ret)
1110 		return ret;
1111 
1112 	return ice_vc_fdir_parse_action(vf, fltr, conf);
1113 }
1114 
1115 /**
1116  * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1117  * @conf_a: FDIR configuration for filter a
1118  * @conf_b: FDIR configuration for filter b
1119  *
1120  * Return: 0 on success, and other on error.
1121  */
1122 static bool
1123 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1124 		       struct virtchnl_fdir_fltr_conf *conf_b)
1125 {
1126 	struct ice_fdir_fltr *a = &conf_a->input;
1127 	struct ice_fdir_fltr *b = &conf_b->input;
1128 
1129 	if (conf_a->ttype != conf_b->ttype)
1130 		return false;
1131 	if (a->flow_type != b->flow_type)
1132 		return false;
1133 	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1134 		return false;
1135 	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1136 		return false;
1137 	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1138 		return false;
1139 	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1140 		return false;
1141 	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1142 		return false;
1143 	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1144 		return false;
1145 	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1146 		return false;
1147 	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1148 		return false;
1149 
1150 	return true;
1151 }
1152 
1153 /**
1154  * ice_vc_fdir_is_dup_fltr
1155  * @vf: pointer to the VF info
1156  * @conf: FDIR configuration for each filter
1157  *
1158  * Check if there is duplicated rule with same conf value
1159  *
1160  * Return: 0 true success, and false on error.
1161  */
1162 static bool
1163 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1164 {
1165 	struct ice_fdir_fltr *desc;
1166 	bool ret;
1167 
1168 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1169 		struct virtchnl_fdir_fltr_conf *node =
1170 				to_fltr_conf_from_desc(desc);
1171 
1172 		ret = ice_vc_fdir_comp_rules(node, conf);
1173 		if (ret)
1174 			return true;
1175 	}
1176 
1177 	return false;
1178 }
1179 
1180 /**
1181  * ice_vc_fdir_insert_entry
1182  * @vf: pointer to the VF info
1183  * @conf: FDIR configuration for each filter
1184  * @id: pointer to ID value allocated by driver
1185  *
1186  * Insert FDIR conf entry into list and allocate ID for this filter
1187  *
1188  * Return: 0 true success, and other on error.
1189  */
1190 static int
1191 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1192 			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1193 {
1194 	struct ice_fdir_fltr *input = &conf->input;
1195 	int i;
1196 
1197 	/* alloc ID corresponding with conf */
1198 	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1199 		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1200 	if (i < 0)
1201 		return -EINVAL;
1202 	*id = i;
1203 
1204 	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1205 	return 0;
1206 }
1207 
1208 /**
1209  * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1210  * @vf: pointer to the VF info
1211  * @conf: FDIR configuration for each filter
1212  * @id: filter rule's ID
1213  */
1214 static void
1215 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1216 			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1217 {
1218 	struct ice_fdir_fltr *input = &conf->input;
1219 
1220 	idr_remove(&vf->fdir.fdir_rule_idr, id);
1221 	list_del(&input->fltr_node);
1222 }
1223 
1224 /**
1225  * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1226  * @vf: pointer to the VF info
1227  * @id: filter rule's ID
1228  *
1229  * Return: NULL on error, and other on success.
1230  */
1231 static struct virtchnl_fdir_fltr_conf *
1232 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1233 {
1234 	return idr_find(&vf->fdir.fdir_rule_idr, id);
1235 }
1236 
1237 /**
1238  * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1239  * @vf: pointer to the VF info
1240  */
1241 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1242 {
1243 	struct virtchnl_fdir_fltr_conf *conf;
1244 	struct ice_fdir_fltr *desc, *temp;
1245 
1246 	list_for_each_entry_safe(desc, temp,
1247 				 &vf->fdir.fdir_rule_list, fltr_node) {
1248 		conf = to_fltr_conf_from_desc(desc);
1249 		list_del(&desc->fltr_node);
1250 		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1251 	}
1252 }
1253 
1254 /**
1255  * ice_vc_fdir_write_fltr - write filter rule into hardware
1256  * @vf: pointer to the VF info
1257  * @conf: FDIR configuration for each filter
1258  * @add: true implies add rule, false implies del rules
1259  * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1260  *
1261  * Return: 0 on success, and other on error.
1262  */
1263 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1264 				  struct virtchnl_fdir_fltr_conf *conf,
1265 				  bool add, bool is_tun)
1266 {
1267 	struct ice_fdir_fltr *input = &conf->input;
1268 	struct ice_vsi *vsi, *ctrl_vsi;
1269 	struct ice_fltr_desc desc;
1270 	struct device *dev;
1271 	struct ice_pf *pf;
1272 	struct ice_hw *hw;
1273 	int ret;
1274 	u8 *pkt;
1275 
1276 	pf = vf->pf;
1277 	dev = ice_pf_to_dev(pf);
1278 	hw = &pf->hw;
1279 	vsi = ice_get_vf_vsi(vf);
1280 	if (!vsi) {
1281 		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1282 		return -EINVAL;
1283 	}
1284 
1285 	input->dest_vsi = vsi->idx;
1286 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1287 
1288 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1289 	if (!ctrl_vsi) {
1290 		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1291 		return -EINVAL;
1292 	}
1293 
1294 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1295 	if (!pkt)
1296 		return -ENOMEM;
1297 
1298 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1299 	ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1300 	if (ret) {
1301 		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1302 			vf->vf_id, input->flow_type);
1303 		goto err_free_pkt;
1304 	}
1305 
1306 	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1307 	if (ret)
1308 		goto err_free_pkt;
1309 
1310 	return 0;
1311 
1312 err_free_pkt:
1313 	devm_kfree(dev, pkt);
1314 	return ret;
1315 }
1316 
1317 /**
1318  * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1319  * @t: pointer to timer_list
1320  */
1321 static void ice_vf_fdir_timer(struct timer_list *t)
1322 {
1323 	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1324 	struct ice_vf_fdir_ctx *ctx_done;
1325 	struct ice_vf_fdir *fdir;
1326 	unsigned long flags;
1327 	struct ice_vf *vf;
1328 	struct ice_pf *pf;
1329 
1330 	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1331 	vf = container_of(fdir, struct ice_vf, fdir);
1332 	ctx_done = &fdir->ctx_done;
1333 	pf = vf->pf;
1334 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1335 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1336 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1337 		WARN_ON_ONCE(1);
1338 		return;
1339 	}
1340 
1341 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1342 
1343 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1344 	ctx_done->conf = ctx_irq->conf;
1345 	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1346 	ctx_done->v_opcode = ctx_irq->v_opcode;
1347 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1348 
1349 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1350 	ice_service_task_schedule(pf);
1351 }
1352 
1353 /**
1354  * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1355  * @ctrl_vsi: pointer to a VF's CTRL VSI
1356  * @rx_desc: pointer to FDIR Rx queue descriptor
1357  */
1358 void
1359 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1360 			union ice_32b_rx_flex_desc *rx_desc)
1361 {
1362 	struct ice_pf *pf = ctrl_vsi->back;
1363 	struct ice_vf *vf = ctrl_vsi->vf;
1364 	struct ice_vf_fdir_ctx *ctx_done;
1365 	struct ice_vf_fdir_ctx *ctx_irq;
1366 	struct ice_vf_fdir *fdir;
1367 	unsigned long flags;
1368 	struct device *dev;
1369 	int ret;
1370 
1371 	if (WARN_ON(!vf))
1372 		return;
1373 
1374 	fdir = &vf->fdir;
1375 	ctx_done = &fdir->ctx_done;
1376 	ctx_irq = &fdir->ctx_irq;
1377 	dev = ice_pf_to_dev(pf);
1378 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1379 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1380 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1381 		WARN_ON_ONCE(1);
1382 		return;
1383 	}
1384 
1385 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1386 
1387 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1388 	ctx_done->conf = ctx_irq->conf;
1389 	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1390 	ctx_done->v_opcode = ctx_irq->v_opcode;
1391 	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1392 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1393 
1394 	ret = del_timer(&ctx_irq->rx_tmr);
1395 	if (!ret)
1396 		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1397 
1398 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1399 	ice_service_task_schedule(pf);
1400 }
1401 
1402 /**
1403  * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1404  * @vf: pointer to the VF info
1405  */
1406 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1407 {
1408 	u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1409 	struct ice_vsi *vf_vsi;
1410 	struct device *dev;
1411 	struct ice_pf *pf;
1412 	struct ice_hw *hw;
1413 	u16 vsi_num;
1414 
1415 	pf = vf->pf;
1416 	hw = &pf->hw;
1417 	dev = ice_pf_to_dev(pf);
1418 	vf_vsi = ice_get_vf_vsi(vf);
1419 	if (!vf_vsi) {
1420 		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1421 		return;
1422 	}
1423 
1424 	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1425 
1426 	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1427 	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1428 	switch (hw->mac_type) {
1429 	case ICE_MAC_E830:
1430 		fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1431 		fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1432 		fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1433 		fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1434 		break;
1435 	case ICE_MAC_E810:
1436 	default:
1437 		fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1438 		fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1439 		fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1440 		fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1441 	}
1442 
1443 	dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1444 		vf->vf_id, fd_size_g, fd_size_b);
1445 	dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1446 		vf->vf_id, fd_cnt_g, fd_cnt_b);
1447 }
1448 
1449 /**
1450  * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1451  * @vf: pointer to the VF info
1452  * @ctx: FDIR context info for post processing
1453  * @status: virtchnl FDIR program status
1454  *
1455  * Return: 0 on success, and other on error.
1456  */
1457 static int
1458 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1459 		      enum virtchnl_fdir_prgm_status *status)
1460 {
1461 	struct device *dev = ice_pf_to_dev(vf->pf);
1462 	u32 stat_err, error, prog_id;
1463 	int ret;
1464 
1465 	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1466 	if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1467 	    ICE_FXD_FLTR_WB_QW1_DD_YES) {
1468 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1469 		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1470 		ret = -EINVAL;
1471 		goto err_exit;
1472 	}
1473 
1474 	prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1475 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1476 	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1477 		dev_err(dev, "VF %d: Desc show add, but ctx not",
1478 			vf->vf_id);
1479 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1480 		ret = -EINVAL;
1481 		goto err_exit;
1482 	}
1483 
1484 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1485 	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1486 		dev_err(dev, "VF %d: Desc show del, but ctx not",
1487 			vf->vf_id);
1488 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1489 		ret = -EINVAL;
1490 		goto err_exit;
1491 	}
1492 
1493 	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1494 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1495 		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1496 			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1497 				vf->vf_id);
1498 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1499 		} else {
1500 			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1501 				vf->vf_id);
1502 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1503 		}
1504 		ret = -EINVAL;
1505 		goto err_exit;
1506 	}
1507 
1508 	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1509 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1510 		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1511 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1512 		ret = -EINVAL;
1513 		goto err_exit;
1514 	}
1515 
1516 	*status = VIRTCHNL_FDIR_SUCCESS;
1517 
1518 	return 0;
1519 
1520 err_exit:
1521 	ice_vf_fdir_dump_info(vf);
1522 	return ret;
1523 }
1524 
1525 /**
1526  * ice_vc_add_fdir_fltr_post
1527  * @vf: pointer to the VF structure
1528  * @ctx: FDIR context info for post processing
1529  * @status: virtchnl FDIR program status
1530  * @success: true implies success, false implies failure
1531  *
1532  * Post process for flow director add command. If success, then do post process
1533  * and send back success msg by virtchnl. Otherwise, do context reversion and
1534  * send back failure msg by virtchnl.
1535  *
1536  * Return: 0 on success, and other on error.
1537  */
1538 static int
1539 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1540 			  enum virtchnl_fdir_prgm_status status,
1541 			  bool success)
1542 {
1543 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1544 	struct device *dev = ice_pf_to_dev(vf->pf);
1545 	enum virtchnl_status_code v_ret;
1546 	struct virtchnl_fdir_add *resp;
1547 	int ret, len, is_tun;
1548 
1549 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1550 	len = sizeof(*resp);
1551 	resp = kzalloc(len, GFP_KERNEL);
1552 	if (!resp) {
1553 		len = 0;
1554 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1555 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1556 		goto err_exit;
1557 	}
1558 
1559 	if (!success)
1560 		goto err_exit;
1561 
1562 	is_tun = 0;
1563 	resp->status = status;
1564 	resp->flow_id = conf->flow_id;
1565 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1566 
1567 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1568 				    (u8 *)resp, len);
1569 	kfree(resp);
1570 
1571 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1572 		vf->vf_id, conf->flow_id,
1573 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1574 		"add" : "del");
1575 	return ret;
1576 
1577 err_exit:
1578 	if (resp)
1579 		resp->status = status;
1580 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1581 	devm_kfree(dev, conf);
1582 
1583 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1584 				    (u8 *)resp, len);
1585 	kfree(resp);
1586 	return ret;
1587 }
1588 
1589 /**
1590  * ice_vc_del_fdir_fltr_post
1591  * @vf: pointer to the VF structure
1592  * @ctx: FDIR context info for post processing
1593  * @status: virtchnl FDIR program status
1594  * @success: true implies success, false implies failure
1595  *
1596  * Post process for flow director del command. If success, then do post process
1597  * and send back success msg by virtchnl. Otherwise, do context reversion and
1598  * send back failure msg by virtchnl.
1599  *
1600  * Return: 0 on success, and other on error.
1601  */
1602 static int
1603 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1604 			  enum virtchnl_fdir_prgm_status status,
1605 			  bool success)
1606 {
1607 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1608 	struct device *dev = ice_pf_to_dev(vf->pf);
1609 	enum virtchnl_status_code v_ret;
1610 	struct virtchnl_fdir_del *resp;
1611 	int ret, len, is_tun;
1612 
1613 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1614 	len = sizeof(*resp);
1615 	resp = kzalloc(len, GFP_KERNEL);
1616 	if (!resp) {
1617 		len = 0;
1618 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1619 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1620 		goto err_exit;
1621 	}
1622 
1623 	if (!success)
1624 		goto err_exit;
1625 
1626 	is_tun = 0;
1627 	resp->status = status;
1628 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1629 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1630 
1631 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1632 				    (u8 *)resp, len);
1633 	kfree(resp);
1634 
1635 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1636 		vf->vf_id, conf->flow_id,
1637 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1638 		"add" : "del");
1639 	devm_kfree(dev, conf);
1640 	return ret;
1641 
1642 err_exit:
1643 	if (resp)
1644 		resp->status = status;
1645 	if (success)
1646 		devm_kfree(dev, conf);
1647 
1648 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1649 				    (u8 *)resp, len);
1650 	kfree(resp);
1651 	return ret;
1652 }
1653 
1654 /**
1655  * ice_flush_fdir_ctx
1656  * @pf: pointer to the PF structure
1657  *
1658  * Flush all the pending event on ctx_done list and process them.
1659  */
1660 void ice_flush_fdir_ctx(struct ice_pf *pf)
1661 {
1662 	struct ice_vf *vf;
1663 	unsigned int bkt;
1664 
1665 	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1666 		return;
1667 
1668 	mutex_lock(&pf->vfs.table_lock);
1669 	ice_for_each_vf(pf, bkt, vf) {
1670 		struct device *dev = ice_pf_to_dev(pf);
1671 		enum virtchnl_fdir_prgm_status status;
1672 		struct ice_vf_fdir_ctx *ctx;
1673 		unsigned long flags;
1674 		int ret;
1675 
1676 		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1677 			continue;
1678 
1679 		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1680 			continue;
1681 
1682 		ctx = &vf->fdir.ctx_done;
1683 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1684 		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1685 			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1686 			continue;
1687 		}
1688 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1689 
1690 		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1691 		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1692 			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1693 			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1694 				vf->vf_id);
1695 			goto err_exit;
1696 		}
1697 
1698 		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1699 		if (ret)
1700 			goto err_exit;
1701 
1702 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1703 			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1704 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1705 			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1706 		else
1707 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1708 
1709 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1710 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1711 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1712 		continue;
1713 err_exit:
1714 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1715 			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1716 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1717 			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1718 		else
1719 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1720 
1721 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1722 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1723 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1724 	}
1725 	mutex_unlock(&pf->vfs.table_lock);
1726 }
1727 
1728 /**
1729  * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1730  * @vf: pointer to the VF structure
1731  * @conf: FDIR configuration for each filter
1732  * @v_opcode: virtual channel operation code
1733  *
1734  * Return: 0 on success, and other on error.
1735  */
1736 static int
1737 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1738 			enum virtchnl_ops v_opcode)
1739 {
1740 	struct device *dev = ice_pf_to_dev(vf->pf);
1741 	struct ice_vf_fdir_ctx *ctx;
1742 	unsigned long flags;
1743 
1744 	ctx = &vf->fdir.ctx_irq;
1745 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1746 	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1747 	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1748 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1749 		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1750 			vf->vf_id);
1751 		return -EBUSY;
1752 	}
1753 	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1754 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1755 
1756 	ctx->conf = conf;
1757 	ctx->v_opcode = v_opcode;
1758 	ctx->stat = ICE_FDIR_CTX_READY;
1759 	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1760 
1761 	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1762 
1763 	return 0;
1764 }
1765 
1766 /**
1767  * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1768  * @vf: pointer to the VF structure
1769  *
1770  * Return: 0 on success, and other on error.
1771  */
1772 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1773 {
1774 	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1775 	unsigned long flags;
1776 
1777 	del_timer(&ctx->rx_tmr);
1778 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1779 	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1780 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1781 }
1782 
1783 /**
1784  * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1785  * @vf: pointer to the VF info
1786  * @msg: pointer to the msg buffer
1787  *
1788  * Return: 0 on success, and other on error.
1789  */
1790 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1791 {
1792 	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1793 	struct virtchnl_fdir_add *stat = NULL;
1794 	struct virtchnl_fdir_fltr_conf *conf;
1795 	enum virtchnl_status_code v_ret;
1796 	struct device *dev;
1797 	struct ice_pf *pf;
1798 	int is_tun = 0;
1799 	int len = 0;
1800 	int ret;
1801 
1802 	pf = vf->pf;
1803 	dev = ice_pf_to_dev(pf);
1804 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1805 	if (ret) {
1806 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1807 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1808 		goto err_exit;
1809 	}
1810 
1811 	ret = ice_vf_start_ctrl_vsi(vf);
1812 	if (ret && (ret != -EEXIST)) {
1813 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1814 		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1815 			vf->vf_id, ret);
1816 		goto err_exit;
1817 	}
1818 
1819 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1820 	if (!stat) {
1821 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1822 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1823 		goto err_exit;
1824 	}
1825 
1826 	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1827 	if (!conf) {
1828 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1829 		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1830 		goto err_exit;
1831 	}
1832 
1833 	len = sizeof(*stat);
1834 	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1835 	if (ret) {
1836 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1837 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1838 		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1839 		goto err_free_conf;
1840 	}
1841 
1842 	if (fltr->validate_only) {
1843 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1844 		stat->status = VIRTCHNL_FDIR_SUCCESS;
1845 		devm_kfree(dev, conf);
1846 		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1847 					    v_ret, (u8 *)stat, len);
1848 		goto exit;
1849 	}
1850 
1851 	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1852 	if (ret) {
1853 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1854 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1855 		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1856 			vf->vf_id, ret);
1857 		goto err_free_conf;
1858 	}
1859 
1860 	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1861 	if (ret) {
1862 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1863 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1864 		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1865 			vf->vf_id);
1866 		goto err_free_conf;
1867 	}
1868 
1869 	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1870 	if (ret) {
1871 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1872 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1873 		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1874 		goto err_free_conf;
1875 	}
1876 
1877 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1878 	if (ret) {
1879 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1880 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1881 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1882 		goto err_rem_entry;
1883 	}
1884 
1885 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1886 	if (ret) {
1887 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1888 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1889 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1890 			vf->vf_id, ret);
1891 		goto err_clr_irq;
1892 	}
1893 
1894 exit:
1895 	kfree(stat);
1896 	return ret;
1897 
1898 err_clr_irq:
1899 	ice_vc_fdir_clear_irq_ctx(vf);
1900 err_rem_entry:
1901 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1902 err_free_conf:
1903 	devm_kfree(dev, conf);
1904 err_exit:
1905 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1906 				    (u8 *)stat, len);
1907 	kfree(stat);
1908 	return ret;
1909 }
1910 
1911 /**
1912  * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1913  * @vf: pointer to the VF info
1914  * @msg: pointer to the msg buffer
1915  *
1916  * Return: 0 on success, and other on error.
1917  */
1918 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1919 {
1920 	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1921 	struct virtchnl_fdir_del *stat = NULL;
1922 	struct virtchnl_fdir_fltr_conf *conf;
1923 	enum virtchnl_status_code v_ret;
1924 	struct device *dev;
1925 	struct ice_pf *pf;
1926 	int is_tun = 0;
1927 	int len = 0;
1928 	int ret;
1929 
1930 	pf = vf->pf;
1931 	dev = ice_pf_to_dev(pf);
1932 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1933 	if (ret) {
1934 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1935 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1936 		goto err_exit;
1937 	}
1938 
1939 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1940 	if (!stat) {
1941 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1942 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1943 		goto err_exit;
1944 	}
1945 
1946 	len = sizeof(*stat);
1947 
1948 	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1949 	if (!conf) {
1950 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1951 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1952 		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1953 			vf->vf_id, fltr->flow_id);
1954 		goto err_exit;
1955 	}
1956 
1957 	/* Just return failure when ctrl_vsi idx is invalid */
1958 	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1959 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1960 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1961 		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1962 		goto err_exit;
1963 	}
1964 
1965 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1966 	if (ret) {
1967 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1968 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1969 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1970 		goto err_exit;
1971 	}
1972 
1973 	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1974 	if (ret) {
1975 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1976 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1977 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1978 			vf->vf_id, ret);
1979 		goto err_del_tmr;
1980 	}
1981 
1982 	kfree(stat);
1983 
1984 	return ret;
1985 
1986 err_del_tmr:
1987 	ice_vc_fdir_clear_irq_ctx(vf);
1988 err_exit:
1989 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1990 				    (u8 *)stat, len);
1991 	kfree(stat);
1992 	return ret;
1993 }
1994 
1995 /**
1996  * ice_vf_fdir_init - init FDIR resource for VF
1997  * @vf: pointer to the VF info
1998  */
1999 void ice_vf_fdir_init(struct ice_vf *vf)
2000 {
2001 	struct ice_vf_fdir *fdir = &vf->fdir;
2002 
2003 	idr_init(&fdir->fdir_rule_idr);
2004 	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2005 
2006 	spin_lock_init(&fdir->ctx_lock);
2007 	fdir->ctx_irq.flags = 0;
2008 	fdir->ctx_done.flags = 0;
2009 	ice_vc_fdir_reset_cnt_all(fdir);
2010 }
2011 
2012 /**
2013  * ice_vf_fdir_exit - destroy FDIR resource for VF
2014  * @vf: pointer to the VF info
2015  */
2016 void ice_vf_fdir_exit(struct ice_vf *vf)
2017 {
2018 	ice_vc_fdir_flush_entry(vf);
2019 	idr_destroy(&vf->fdir.fdir_rule_idr);
2020 	ice_vc_fdir_rem_prof_all(vf);
2021 	ice_vc_fdir_free_prof_all(vf);
2022 }
2023