1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2023, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_flow.h"
8 #include "ice_vf_lib_private.h"
9
10 #define to_fltr_conf_from_desc(p) \
11 container_of(p, struct virtchnl_fdir_fltr_conf, input)
12
13 #define GTPU_TEID_OFFSET 4
14 #define GTPU_EH_QFI_OFFSET 1
15 #define GTPU_EH_QFI_MASK 0x3F
16 #define PFCP_S_OFFSET 0
17 #define PFCP_S_MASK 0x1
18 #define PFCP_PORT_NR 8805
19
20 #define FDIR_INSET_FLAG_ESP_S 0
21 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
22 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
23 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
24
25 enum ice_fdir_tunnel_type {
26 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
27 ICE_FDIR_TUNNEL_TYPE_GTPU,
28 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
29 ICE_FDIR_TUNNEL_TYPE_ECPRI,
30 ICE_FDIR_TUNNEL_TYPE_GTPU_INNER,
31 ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER,
32 ICE_FDIR_TUNNEL_TYPE_GRE,
33 ICE_FDIR_TUNNEL_TYPE_GTPOGRE,
34 ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER,
35 ICE_FDIR_TUNNEL_TYPE_GRE_INNER,
36 ICE_FDIR_TUNNEL_TYPE_L2TPV2,
37 ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER,
38 };
39
40 struct virtchnl_fdir_fltr_conf {
41 struct ice_fdir_fltr input;
42 enum ice_fdir_tunnel_type ttype;
43 u64 inset_flag;
44 u32 flow_id;
45
46 struct ice_parser_profile *prof;
47 bool parser_ena;
48 u8 *pkt_buf;
49 u8 pkt_len;
50 };
51
52 struct virtchnl_fdir_inset_map {
53 enum virtchnl_proto_hdr_field field;
54 enum ice_flow_field fld;
55 u64 flag;
56 u64 mask;
57 };
58
59 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
60 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
61 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
62 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
63 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
64 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
65 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
66 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
67 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
68 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
69 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
70 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
71 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
72 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
73 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
74 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
75 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
76 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
77 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
78 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
79 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
80 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
81 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
82 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
83 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
84 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
85 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
86 };
87
88 /**
89 * ice_vc_fdir_param_check
90 * @vf: pointer to the VF structure
91 * @vsi_id: VF relative VSI ID
92 *
93 * Check for the valid VSI ID, PF's state and VF's state
94 *
95 * Return: 0 on success, and -EINVAL on error.
96 */
97 static int
ice_vc_fdir_param_check(struct ice_vf * vf,u16 vsi_id)98 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
99 {
100 struct ice_pf *pf = vf->pf;
101
102 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
103 return -EINVAL;
104
105 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
106 return -EINVAL;
107
108 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
109 return -EINVAL;
110
111 if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
112 return -EINVAL;
113
114 if (!ice_get_vf_vsi(vf))
115 return -EINVAL;
116
117 return 0;
118 }
119
120 /**
121 * ice_vf_start_ctrl_vsi
122 * @vf: pointer to the VF structure
123 *
124 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
125 *
126 * Return: 0 on success, and other on error.
127 */
ice_vf_start_ctrl_vsi(struct ice_vf * vf)128 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
129 {
130 struct ice_pf *pf = vf->pf;
131 struct ice_vsi *ctrl_vsi;
132 struct device *dev;
133 int err;
134
135 dev = ice_pf_to_dev(pf);
136 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
137 return -EEXIST;
138
139 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
140 if (!ctrl_vsi) {
141 dev_dbg(dev, "Could not setup control VSI for VF %d\n",
142 vf->vf_id);
143 return -ENOMEM;
144 }
145
146 err = ice_vsi_open_ctrl(ctrl_vsi);
147 if (err) {
148 dev_dbg(dev, "Could not open control VSI for VF %d\n",
149 vf->vf_id);
150 goto err_vsi_open;
151 }
152
153 return 0;
154
155 err_vsi_open:
156 ice_vsi_release(ctrl_vsi);
157 if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
158 pf->vsi[vf->ctrl_vsi_idx] = NULL;
159 vf->ctrl_vsi_idx = ICE_NO_VSI;
160 }
161 return err;
162 }
163
164 /**
165 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
166 * @vf: pointer to the VF structure
167 * @flow: filter flow type
168 *
169 * Return: 0 on success, and other on error.
170 */
171 static int
ice_vc_fdir_alloc_prof(struct ice_vf * vf,enum ice_fltr_ptype flow)172 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
173 {
174 struct ice_vf_fdir *fdir = &vf->fdir;
175
176 if (!fdir->fdir_prof) {
177 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
178 ICE_FLTR_PTYPE_MAX,
179 sizeof(*fdir->fdir_prof),
180 GFP_KERNEL);
181 if (!fdir->fdir_prof)
182 return -ENOMEM;
183 }
184
185 if (!fdir->fdir_prof[flow]) {
186 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
187 sizeof(**fdir->fdir_prof),
188 GFP_KERNEL);
189 if (!fdir->fdir_prof[flow])
190 return -ENOMEM;
191 }
192
193 return 0;
194 }
195
196 /**
197 * ice_vc_fdir_free_prof - free profile for this filter flow type
198 * @vf: pointer to the VF structure
199 * @flow: filter flow type
200 */
201 static void
ice_vc_fdir_free_prof(struct ice_vf * vf,enum ice_fltr_ptype flow)202 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
203 {
204 struct ice_vf_fdir *fdir = &vf->fdir;
205
206 if (!fdir->fdir_prof)
207 return;
208
209 if (!fdir->fdir_prof[flow])
210 return;
211
212 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
213 fdir->fdir_prof[flow] = NULL;
214 }
215
216 /**
217 * ice_vc_fdir_free_prof_all - free all the profile for this VF
218 * @vf: pointer to the VF structure
219 */
ice_vc_fdir_free_prof_all(struct ice_vf * vf)220 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
221 {
222 struct ice_vf_fdir *fdir = &vf->fdir;
223 enum ice_fltr_ptype flow;
224
225 if (!fdir->fdir_prof)
226 return;
227
228 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
229 ice_vc_fdir_free_prof(vf, flow);
230
231 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
232 fdir->fdir_prof = NULL;
233 }
234
235 /**
236 * ice_vc_fdir_parse_flow_fld
237 * @proto_hdr: virtual channel protocol filter header
238 * @conf: FDIR configuration for each filter
239 * @fld: field type array
240 * @fld_cnt: field counter
241 *
242 * Parse the virtual channel filter header and store them into field type array
243 *
244 * Return: 0 on success, and other on error.
245 */
246 static int
ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr * proto_hdr,struct virtchnl_fdir_fltr_conf * conf,enum ice_flow_field * fld,int * fld_cnt)247 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
248 struct virtchnl_fdir_fltr_conf *conf,
249 enum ice_flow_field *fld, int *fld_cnt)
250 {
251 struct virtchnl_proto_hdr hdr;
252 u32 i;
253
254 memcpy(&hdr, proto_hdr, sizeof(hdr));
255
256 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
257 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
258 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
259 if (fdir_inset_map[i].mask &&
260 ((fdir_inset_map[i].mask & conf->inset_flag) !=
261 fdir_inset_map[i].flag))
262 continue;
263
264 fld[*fld_cnt] = fdir_inset_map[i].fld;
265 *fld_cnt += 1;
266 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
267 return -EINVAL;
268 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
269 fdir_inset_map[i].field);
270 }
271
272 return 0;
273 }
274
275 /**
276 * ice_vc_fdir_set_flow_fld
277 * @vf: pointer to the VF structure
278 * @fltr: virtual channel add cmd buffer
279 * @conf: FDIR configuration for each filter
280 * @seg: array of one or more packet segments that describe the flow
281 *
282 * Parse the virtual channel add msg buffer's field vector and store them into
283 * flow's packet segment field
284 *
285 * Return: 0 on success, and other on error.
286 */
287 static int
ice_vc_fdir_set_flow_fld(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf,struct ice_flow_seg_info * seg)288 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
289 struct virtchnl_fdir_fltr_conf *conf,
290 struct ice_flow_seg_info *seg)
291 {
292 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
293 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
294 struct device *dev = ice_pf_to_dev(vf->pf);
295 struct virtchnl_proto_hdrs *proto;
296 int fld_cnt = 0;
297 int i;
298
299 proto = &rule->proto_hdrs;
300 for (i = 0; i < proto->count; i++) {
301 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
302 int ret;
303
304 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
305 if (ret)
306 return ret;
307 }
308
309 if (fld_cnt == 0) {
310 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
311 return -EINVAL;
312 }
313
314 for (i = 0; i < fld_cnt; i++)
315 ice_flow_set_fld(seg, fld[i],
316 ICE_FLOW_FLD_OFF_INVAL,
317 ICE_FLOW_FLD_OFF_INVAL,
318 ICE_FLOW_FLD_OFF_INVAL, false);
319
320 return 0;
321 }
322
323 /**
324 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
325 * @vf: pointer to the VF structure
326 * @conf: FDIR configuration for each filter
327 * @seg: array of one or more packet segments that describe the flow
328 *
329 * Return: 0 on success, and other on error.
330 */
331 static int
ice_vc_fdir_set_flow_hdr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,struct ice_flow_seg_info * seg)332 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
333 struct virtchnl_fdir_fltr_conf *conf,
334 struct ice_flow_seg_info *seg)
335 {
336 enum ice_fltr_ptype flow = conf->input.flow_type;
337 enum ice_fdir_tunnel_type ttype = conf->ttype;
338 struct device *dev = ice_pf_to_dev(vf->pf);
339
340 switch (flow) {
341 case ICE_FLTR_PTYPE_NON_IP_L2:
342 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
343 break;
344 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
345 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
346 ICE_FLOW_SEG_HDR_IPV4 |
347 ICE_FLOW_SEG_HDR_IPV_OTHER);
348 break;
349 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
350 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
351 ICE_FLOW_SEG_HDR_IPV4 |
352 ICE_FLOW_SEG_HDR_IPV_OTHER);
353 break;
354 case ICE_FLTR_PTYPE_NONF_IPV4_AH:
355 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
356 ICE_FLOW_SEG_HDR_IPV4 |
357 ICE_FLOW_SEG_HDR_IPV_OTHER);
358 break;
359 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
360 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
361 ICE_FLOW_SEG_HDR_IPV4 |
362 ICE_FLOW_SEG_HDR_IPV_OTHER);
363 break;
364 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
365 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
366 ICE_FLOW_SEG_HDR_IPV4 |
367 ICE_FLOW_SEG_HDR_IPV_OTHER);
368 break;
369 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
370 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
371 ICE_FLOW_SEG_HDR_IPV4 |
372 ICE_FLOW_SEG_HDR_IPV_OTHER);
373 break;
374 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
375 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
376 ICE_FLOW_SEG_HDR_IPV_OTHER);
377 break;
378 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
379 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
380 ICE_FLOW_SEG_HDR_IPV4 |
381 ICE_FLOW_SEG_HDR_IPV_OTHER);
382 break;
383 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
384 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
385 ICE_FLOW_SEG_HDR_IPV4 |
386 ICE_FLOW_SEG_HDR_IPV_OTHER);
387 break;
388 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
389 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
392 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
393 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
394 ICE_FLOW_SEG_HDR_IPV4 |
395 ICE_FLOW_SEG_HDR_IPV_OTHER);
396 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
397 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
398 ICE_FLOW_SEG_HDR_GTPU_IP |
399 ICE_FLOW_SEG_HDR_IPV4 |
400 ICE_FLOW_SEG_HDR_IPV_OTHER);
401 } else {
402 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
403 flow, vf->vf_id);
404 return -EINVAL;
405 }
406 break;
407 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
408 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
409 ICE_FLOW_SEG_HDR_IPV4 |
410 ICE_FLOW_SEG_HDR_IPV_OTHER);
411 break;
412 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
413 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
414 ICE_FLOW_SEG_HDR_IPV6 |
415 ICE_FLOW_SEG_HDR_IPV_OTHER);
416 break;
417 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
418 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
419 ICE_FLOW_SEG_HDR_IPV6 |
420 ICE_FLOW_SEG_HDR_IPV_OTHER);
421 break;
422 case ICE_FLTR_PTYPE_NONF_IPV6_AH:
423 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
424 ICE_FLOW_SEG_HDR_IPV6 |
425 ICE_FLOW_SEG_HDR_IPV_OTHER);
426 break;
427 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
428 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
429 ICE_FLOW_SEG_HDR_IPV6 |
430 ICE_FLOW_SEG_HDR_IPV_OTHER);
431 break;
432 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
433 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
434 ICE_FLOW_SEG_HDR_IPV6 |
435 ICE_FLOW_SEG_HDR_IPV_OTHER);
436 break;
437 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
438 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
439 ICE_FLOW_SEG_HDR_IPV6 |
440 ICE_FLOW_SEG_HDR_IPV_OTHER);
441 break;
442 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
443 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
444 ICE_FLOW_SEG_HDR_IPV_OTHER);
445 break;
446 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
447 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
448 ICE_FLOW_SEG_HDR_IPV6 |
449 ICE_FLOW_SEG_HDR_IPV_OTHER);
450 break;
451 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
452 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
453 ICE_FLOW_SEG_HDR_IPV6 |
454 ICE_FLOW_SEG_HDR_IPV_OTHER);
455 break;
456 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
457 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
458 ICE_FLOW_SEG_HDR_IPV6 |
459 ICE_FLOW_SEG_HDR_IPV_OTHER);
460 break;
461 default:
462 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
463 flow, vf->vf_id);
464 return -EINVAL;
465 }
466
467 return 0;
468 }
469
470 /**
471 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
472 * @vf: pointer to the VF structure
473 * @flow: filter flow type
474 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
475 */
476 static void
ice_vc_fdir_rem_prof(struct ice_vf * vf,enum ice_fltr_ptype flow,int tun)477 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
478 {
479 struct ice_vf_fdir *fdir = &vf->fdir;
480 struct ice_fd_hw_prof *vf_prof;
481 struct ice_pf *pf = vf->pf;
482 struct ice_vsi *vf_vsi;
483 struct device *dev;
484 struct ice_hw *hw;
485 u64 prof_id;
486 int i;
487
488 dev = ice_pf_to_dev(pf);
489 hw = &pf->hw;
490 if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
491 return;
492
493 vf_prof = fdir->fdir_prof[flow];
494 prof_id = vf_prof->prof_id[tun];
495
496 vf_vsi = ice_get_vf_vsi(vf);
497 if (!vf_vsi) {
498 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
499 return;
500 }
501
502 if (!fdir->prof_entry_cnt[flow][tun])
503 return;
504
505 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
506 if (vf_prof->entry_h[i][tun]) {
507 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
508
509 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
510 ice_flow_rem_entry(hw, ICE_BLK_FD,
511 vf_prof->entry_h[i][tun]);
512 vf_prof->entry_h[i][tun] = 0;
513 }
514
515 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
516 devm_kfree(dev, vf_prof->fdir_seg[tun]);
517 vf_prof->fdir_seg[tun] = NULL;
518
519 for (i = 0; i < vf_prof->cnt; i++)
520 vf_prof->vsi_h[i] = 0;
521
522 fdir->prof_entry_cnt[flow][tun] = 0;
523 }
524
525 /**
526 * ice_vc_fdir_rem_prof_all - remove profile for this VF
527 * @vf: pointer to the VF structure
528 */
ice_vc_fdir_rem_prof_all(struct ice_vf * vf)529 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
530 {
531 enum ice_fltr_ptype flow;
532
533 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
534 flow < ICE_FLTR_PTYPE_MAX; flow++) {
535 ice_vc_fdir_rem_prof(vf, flow, 0);
536 ice_vc_fdir_rem_prof(vf, flow, 1);
537 }
538 }
539
540 /**
541 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
542 * @fdir: pointer to the VF FDIR structure
543 */
ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir * fdir)544 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
545 {
546 enum ice_fltr_ptype flow;
547
548 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
549 flow < ICE_FLTR_PTYPE_MAX; flow++) {
550 fdir->fdir_fltr_cnt[flow][0] = 0;
551 fdir->fdir_fltr_cnt[flow][1] = 0;
552 }
553
554 fdir->fdir_fltr_cnt_total = 0;
555 }
556
557 /**
558 * ice_vc_fdir_has_prof_conflict
559 * @vf: pointer to the VF structure
560 * @conf: FDIR configuration for each filter
561 *
562 * Check if @conf has conflicting profile with existing profiles
563 *
564 * Return: true on success, and false on error.
565 */
566 static bool
ice_vc_fdir_has_prof_conflict(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf)567 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
568 struct virtchnl_fdir_fltr_conf *conf)
569 {
570 struct ice_fdir_fltr *desc;
571
572 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
573 struct virtchnl_fdir_fltr_conf *existing_conf;
574 enum ice_fltr_ptype flow_type_a, flow_type_b;
575 struct ice_fdir_fltr *a, *b;
576
577 existing_conf = to_fltr_conf_from_desc(desc);
578 a = &existing_conf->input;
579 b = &conf->input;
580 flow_type_a = a->flow_type;
581 flow_type_b = b->flow_type;
582
583 /* No need to compare two rules with different tunnel types or
584 * with the same protocol type.
585 */
586 if (existing_conf->ttype != conf->ttype ||
587 flow_type_a == flow_type_b)
588 continue;
589
590 switch (flow_type_a) {
591 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
592 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
593 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
594 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
595 return true;
596 break;
597 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
598 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
599 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
600 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
601 return true;
602 break;
603 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
604 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
605 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
606 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
607 return true;
608 break;
609 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
610 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
611 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
612 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
613 return true;
614 break;
615 default:
616 break;
617 }
618 }
619
620 return false;
621 }
622
623 /**
624 * ice_vc_fdir_write_flow_prof
625 * @vf: pointer to the VF structure
626 * @flow: filter flow type
627 * @seg: array of one or more packet segments that describe the flow
628 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
629 *
630 * Write the flow's profile config and packet segment into the hardware
631 *
632 * Return: 0 on success, and other on error.
633 */
634 static int
ice_vc_fdir_write_flow_prof(struct ice_vf * vf,enum ice_fltr_ptype flow,struct ice_flow_seg_info * seg,int tun)635 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
636 struct ice_flow_seg_info *seg, int tun)
637 {
638 struct ice_vf_fdir *fdir = &vf->fdir;
639 struct ice_vsi *vf_vsi, *ctrl_vsi;
640 struct ice_flow_seg_info *old_seg;
641 struct ice_flow_prof *prof = NULL;
642 struct ice_fd_hw_prof *vf_prof;
643 struct device *dev;
644 struct ice_pf *pf;
645 struct ice_hw *hw;
646 u64 entry1_h = 0;
647 u64 entry2_h = 0;
648 int ret;
649
650 pf = vf->pf;
651 dev = ice_pf_to_dev(pf);
652 hw = &pf->hw;
653 vf_vsi = ice_get_vf_vsi(vf);
654 if (!vf_vsi)
655 return -EINVAL;
656
657 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
658 if (!ctrl_vsi)
659 return -EINVAL;
660
661 vf_prof = fdir->fdir_prof[flow];
662 old_seg = vf_prof->fdir_seg[tun];
663 if (old_seg) {
664 if (!memcmp(old_seg, seg, sizeof(*seg))) {
665 dev_dbg(dev, "Duplicated profile for VF %d!\n",
666 vf->vf_id);
667 return -EEXIST;
668 }
669
670 if (fdir->fdir_fltr_cnt[flow][tun]) {
671 ret = -EINVAL;
672 dev_dbg(dev, "Input set conflicts for VF %d\n",
673 vf->vf_id);
674 goto err_exit;
675 }
676
677 /* remove previously allocated profile */
678 ice_vc_fdir_rem_prof(vf, flow, tun);
679 }
680
681 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
682 tun + 1, false, &prof);
683 if (ret) {
684 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
685 flow, vf->vf_id);
686 goto err_exit;
687 }
688
689 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
690 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
691 seg, &entry1_h);
692 if (ret) {
693 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
694 flow, vf->vf_id);
695 goto err_prof;
696 }
697
698 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
699 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
700 seg, &entry2_h);
701 if (ret) {
702 dev_dbg(dev,
703 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
704 flow, vf->vf_id);
705 goto err_entry_1;
706 }
707
708 vf_prof->fdir_seg[tun] = seg;
709 vf_prof->cnt = 0;
710 fdir->prof_entry_cnt[flow][tun] = 0;
711
712 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
713 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
714 vf_prof->cnt++;
715 fdir->prof_entry_cnt[flow][tun]++;
716
717 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
718 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
719 vf_prof->cnt++;
720 fdir->prof_entry_cnt[flow][tun]++;
721
722 vf_prof->prof_id[tun] = prof->id;
723
724 return 0;
725
726 err_entry_1:
727 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
728 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
729 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
730 err_prof:
731 ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
732 err_exit:
733 return ret;
734 }
735
736 /**
737 * ice_vc_fdir_config_input_set
738 * @vf: pointer to the VF structure
739 * @fltr: virtual channel add cmd buffer
740 * @conf: FDIR configuration for each filter
741 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
742 *
743 * Config the input set type and value for virtual channel add msg buffer
744 *
745 * Return: 0 on success, and other on error.
746 */
747 static int
ice_vc_fdir_config_input_set(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf,int tun)748 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
749 struct virtchnl_fdir_fltr_conf *conf, int tun)
750 {
751 struct ice_fdir_fltr *input = &conf->input;
752 struct device *dev = ice_pf_to_dev(vf->pf);
753 struct ice_flow_seg_info *seg;
754 enum ice_fltr_ptype flow;
755 int ret;
756
757 ret = ice_vc_fdir_has_prof_conflict(vf, conf);
758 if (ret) {
759 dev_dbg(dev, "Found flow profile conflict for VF %d\n",
760 vf->vf_id);
761 return ret;
762 }
763
764 flow = input->flow_type;
765 ret = ice_vc_fdir_alloc_prof(vf, flow);
766 if (ret) {
767 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
768 return ret;
769 }
770
771 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
772 if (!seg)
773 return -ENOMEM;
774
775 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
776 if (ret) {
777 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
778 goto err_exit;
779 }
780
781 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
782 if (ret) {
783 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
784 goto err_exit;
785 }
786
787 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
788 if (ret == -EEXIST) {
789 devm_kfree(dev, seg);
790 } else if (ret) {
791 dev_dbg(dev, "Write flow profile for VF %d failed\n",
792 vf->vf_id);
793 goto err_exit;
794 }
795
796 return 0;
797
798 err_exit:
799 devm_kfree(dev, seg);
800 return ret;
801 }
802
803 /**
804 * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary)
805 * @proto: virtchnl protocol headers
806 *
807 * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note
808 * that common FDIR rule must have non-zero proto->count. Thus, we choose the
809 * tunnel_level and count of proto as the indicators. If both tunnel_level and
810 * count of proto are zero, this FDIR rule will be regarded as raw flow.
811 *
812 * Returns: true if headers describe raw flow, false otherwise.
813 */
814 static bool
ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs * proto)815 ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
816 {
817 return (proto->tunnel_level == 0 && proto->count == 0);
818 }
819
820 /**
821 * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule
822 * @vf: pointer to the VF info
823 * @proto: virtchnl protocol headers
824 * @conf: FDIR configuration for each filter
825 *
826 * Parse the virtual channel filter's raw flow and store it in @conf
827 *
828 * Return: 0 on success or negative errno on failure.
829 */
830 static int
ice_vc_fdir_parse_raw(struct ice_vf * vf,struct virtchnl_proto_hdrs * proto,struct virtchnl_fdir_fltr_conf * conf)831 ice_vc_fdir_parse_raw(struct ice_vf *vf,
832 struct virtchnl_proto_hdrs *proto,
833 struct virtchnl_fdir_fltr_conf *conf)
834 {
835 u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
836 struct ice_parser_result rslt;
837 struct ice_pf *pf = vf->pf;
838 u16 pkt_len, udp_port = 0;
839 struct ice_parser *psr;
840 int status = -ENOMEM;
841 struct ice_hw *hw;
842
843 pkt_len = proto->raw.pkt_len;
844
845 if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
846 return -EINVAL;
847
848 pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
849 msk_buf = kzalloc(pkt_len, GFP_KERNEL);
850
851 if (!pkt_buf || !msk_buf)
852 goto err_mem_alloc;
853
854 memcpy(pkt_buf, proto->raw.spec, pkt_len);
855 memcpy(msk_buf, proto->raw.mask, pkt_len);
856
857 hw = &pf->hw;
858
859 /* Get raw profile info via Parser Lib */
860 psr = ice_parser_create(hw);
861 if (IS_ERR(psr)) {
862 status = PTR_ERR(psr);
863 goto err_mem_alloc;
864 }
865
866 ice_parser_dvm_set(psr, ice_is_dvm_ena(hw));
867
868 if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
869 ice_parser_vxlan_tunnel_set(psr, udp_port, true);
870
871 status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
872 if (status)
873 goto err_parser_destroy;
874
875 if (hw->debug_mask & ICE_DBG_PARSER)
876 ice_parser_result_dump(hw, &rslt);
877
878 conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL);
879 if (!conf->prof) {
880 status = -ENOMEM;
881 goto err_parser_destroy;
882 }
883
884 status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
885 pkt_len, ICE_BLK_FD,
886 conf->prof);
887 if (status)
888 goto err_parser_profile_init;
889
890 if (hw->debug_mask & ICE_DBG_PARSER)
891 ice_parser_profile_dump(hw, conf->prof);
892
893 /* Store raw flow info into @conf */
894 conf->pkt_len = pkt_len;
895 conf->pkt_buf = pkt_buf;
896 conf->parser_ena = true;
897
898 ice_parser_destroy(psr);
899 return 0;
900
901 err_parser_profile_init:
902 kfree(conf->prof);
903 err_parser_destroy:
904 ice_parser_destroy(psr);
905 err_mem_alloc:
906 kfree(pkt_buf);
907 return status;
908 }
909
910 /**
911 * ice_vc_fdir_parse_pattern
912 * @vf: pointer to the VF info
913 * @fltr: virtual channel add cmd buffer
914 * @conf: FDIR configuration for each filter
915 *
916 * Parse the virtual channel filter's pattern and store them into conf
917 *
918 * Return: 0 on success, and other on error.
919 */
920 static int
ice_vc_fdir_parse_pattern(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)921 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
922 struct virtchnl_fdir_fltr_conf *conf)
923 {
924 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
925 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
926 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
927 struct device *dev = ice_pf_to_dev(vf->pf);
928 struct ice_fdir_fltr *input = &conf->input;
929 int i;
930
931 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
932 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
933 proto->count, vf->vf_id);
934 return -EINVAL;
935 }
936
937 /* For raw FDIR filters created by the parser */
938 if (ice_vc_fdir_is_raw_flow(proto))
939 return ice_vc_fdir_parse_raw(vf, proto, conf);
940
941 for (i = 0; i < proto->count; i++) {
942 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
943 struct ip_esp_hdr *esph;
944 struct ip_auth_hdr *ah;
945 struct sctphdr *sctph;
946 struct ipv6hdr *ip6h;
947 struct udphdr *udph;
948 struct tcphdr *tcph;
949 struct ethhdr *eth;
950 struct iphdr *iph;
951 u8 s_field;
952 u8 *rawh;
953
954 switch (hdr->type) {
955 case VIRTCHNL_PROTO_HDR_ETH:
956 eth = (struct ethhdr *)hdr->buffer;
957 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
958
959 if (hdr->field_selector)
960 input->ext_data.ether_type = eth->h_proto;
961 break;
962 case VIRTCHNL_PROTO_HDR_IPV4:
963 iph = (struct iphdr *)hdr->buffer;
964 l3 = VIRTCHNL_PROTO_HDR_IPV4;
965 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
966
967 if (hdr->field_selector) {
968 input->ip.v4.src_ip = iph->saddr;
969 input->ip.v4.dst_ip = iph->daddr;
970 input->ip.v4.tos = iph->tos;
971 input->ip.v4.proto = iph->protocol;
972 }
973 break;
974 case VIRTCHNL_PROTO_HDR_IPV6:
975 ip6h = (struct ipv6hdr *)hdr->buffer;
976 l3 = VIRTCHNL_PROTO_HDR_IPV6;
977 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
978
979 if (hdr->field_selector) {
980 memcpy(input->ip.v6.src_ip,
981 ip6h->saddr.in6_u.u6_addr8,
982 sizeof(ip6h->saddr));
983 memcpy(input->ip.v6.dst_ip,
984 ip6h->daddr.in6_u.u6_addr8,
985 sizeof(ip6h->daddr));
986 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
987 (ip6h->flow_lbl[0] >> 4);
988 input->ip.v6.proto = ip6h->nexthdr;
989 }
990 break;
991 case VIRTCHNL_PROTO_HDR_TCP:
992 tcph = (struct tcphdr *)hdr->buffer;
993 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
994 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
995 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
996 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
997
998 if (hdr->field_selector) {
999 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1000 input->ip.v4.src_port = tcph->source;
1001 input->ip.v4.dst_port = tcph->dest;
1002 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1003 input->ip.v6.src_port = tcph->source;
1004 input->ip.v6.dst_port = tcph->dest;
1005 }
1006 }
1007 break;
1008 case VIRTCHNL_PROTO_HDR_UDP:
1009 udph = (struct udphdr *)hdr->buffer;
1010 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1011 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1012 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1013 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1014
1015 if (hdr->field_selector) {
1016 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1017 input->ip.v4.src_port = udph->source;
1018 input->ip.v4.dst_port = udph->dest;
1019 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1020 input->ip.v6.src_port = udph->source;
1021 input->ip.v6.dst_port = udph->dest;
1022 }
1023 }
1024 break;
1025 case VIRTCHNL_PROTO_HDR_SCTP:
1026 sctph = (struct sctphdr *)hdr->buffer;
1027 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1028 input->flow_type =
1029 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1030 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1031 input->flow_type =
1032 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1033
1034 if (hdr->field_selector) {
1035 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1036 input->ip.v4.src_port = sctph->source;
1037 input->ip.v4.dst_port = sctph->dest;
1038 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1039 input->ip.v6.src_port = sctph->source;
1040 input->ip.v6.dst_port = sctph->dest;
1041 }
1042 }
1043 break;
1044 case VIRTCHNL_PROTO_HDR_L2TPV3:
1045 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1046 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
1047 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1048 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
1049
1050 if (hdr->field_selector)
1051 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
1052 break;
1053 case VIRTCHNL_PROTO_HDR_ESP:
1054 esph = (struct ip_esp_hdr *)hdr->buffer;
1055 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1056 l4 == VIRTCHNL_PROTO_HDR_UDP)
1057 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
1058 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1059 l4 == VIRTCHNL_PROTO_HDR_UDP)
1060 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
1061 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1062 l4 == VIRTCHNL_PROTO_HDR_NONE)
1063 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
1064 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1065 l4 == VIRTCHNL_PROTO_HDR_NONE)
1066 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
1067
1068 if (l4 == VIRTCHNL_PROTO_HDR_UDP)
1069 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
1070 else
1071 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
1072
1073 if (hdr->field_selector) {
1074 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1075 input->ip.v4.sec_parm_idx = esph->spi;
1076 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1077 input->ip.v6.sec_parm_idx = esph->spi;
1078 }
1079 break;
1080 case VIRTCHNL_PROTO_HDR_AH:
1081 ah = (struct ip_auth_hdr *)hdr->buffer;
1082 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1083 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
1084 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1085 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
1086
1087 if (hdr->field_selector) {
1088 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1089 input->ip.v4.sec_parm_idx = ah->spi;
1090 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1091 input->ip.v6.sec_parm_idx = ah->spi;
1092 }
1093 break;
1094 case VIRTCHNL_PROTO_HDR_PFCP:
1095 rawh = (u8 *)hdr->buffer;
1096 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
1097 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
1098 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
1099 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
1100 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
1101 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
1102 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
1103 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
1104 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
1105
1106 if (hdr->field_selector) {
1107 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1108 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1109 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1110 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1111 }
1112 break;
1113 case VIRTCHNL_PROTO_HDR_GTPU_IP:
1114 rawh = (u8 *)hdr->buffer;
1115 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1116
1117 if (hdr->field_selector)
1118 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1119 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1120 break;
1121 case VIRTCHNL_PROTO_HDR_GTPU_EH:
1122 rawh = (u8 *)hdr->buffer;
1123
1124 if (hdr->field_selector)
1125 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1126 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1127 break;
1128 default:
1129 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1130 hdr->type, vf->vf_id);
1131 return -EINVAL;
1132 }
1133 }
1134
1135 return 0;
1136 }
1137
1138 /**
1139 * ice_vc_fdir_parse_action
1140 * @vf: pointer to the VF info
1141 * @fltr: virtual channel add cmd buffer
1142 * @conf: FDIR configuration for each filter
1143 *
1144 * Parse the virtual channel filter's action and store them into conf
1145 *
1146 * Return: 0 on success, and other on error.
1147 */
1148 static int
ice_vc_fdir_parse_action(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)1149 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1150 struct virtchnl_fdir_fltr_conf *conf)
1151 {
1152 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1153 struct device *dev = ice_pf_to_dev(vf->pf);
1154 struct ice_fdir_fltr *input = &conf->input;
1155 u32 dest_num = 0;
1156 u32 mark_num = 0;
1157 int i;
1158
1159 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1160 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1161 as->count, vf->vf_id);
1162 return -EINVAL;
1163 }
1164
1165 for (i = 0; i < as->count; i++) {
1166 struct virtchnl_filter_action *action = &as->actions[i];
1167
1168 switch (action->type) {
1169 case VIRTCHNL_ACTION_PASSTHRU:
1170 dest_num++;
1171 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1172 break;
1173 case VIRTCHNL_ACTION_DROP:
1174 dest_num++;
1175 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1176 break;
1177 case VIRTCHNL_ACTION_QUEUE:
1178 dest_num++;
1179 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1180 input->q_index = action->act_conf.queue.index;
1181 break;
1182 case VIRTCHNL_ACTION_Q_REGION:
1183 dest_num++;
1184 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1185 input->q_index = action->act_conf.queue.index;
1186 input->q_region = action->act_conf.queue.region;
1187 break;
1188 case VIRTCHNL_ACTION_MARK:
1189 mark_num++;
1190 input->fltr_id = action->act_conf.mark_id;
1191 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1192 break;
1193 default:
1194 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1195 action->type, vf->vf_id);
1196 return -EINVAL;
1197 }
1198 }
1199
1200 if (dest_num == 0 || dest_num >= 2) {
1201 dev_dbg(dev, "Invalid destination action for VF %d\n",
1202 vf->vf_id);
1203 return -EINVAL;
1204 }
1205
1206 if (mark_num >= 2) {
1207 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1208 return -EINVAL;
1209 }
1210
1211 return 0;
1212 }
1213
1214 /**
1215 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1216 * @vf: pointer to the VF info
1217 * @fltr: virtual channel add cmd buffer
1218 * @conf: FDIR configuration for each filter
1219 *
1220 * Return: 0 on success, and other on error.
1221 */
1222 static int
ice_vc_validate_fdir_fltr(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)1223 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1224 struct virtchnl_fdir_fltr_conf *conf)
1225 {
1226 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1227 int ret;
1228
1229 /* For raw FDIR filters created by the parser */
1230 if (!ice_vc_fdir_is_raw_flow(proto))
1231 if (!ice_vc_validate_pattern(vf, proto))
1232 return -EINVAL;
1233
1234 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1235 if (ret)
1236 return ret;
1237
1238 return ice_vc_fdir_parse_action(vf, fltr, conf);
1239 }
1240
1241 /**
1242 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1243 * @conf_a: FDIR configuration for filter a
1244 * @conf_b: FDIR configuration for filter b
1245 *
1246 * Return: 0 on success, and other on error.
1247 */
1248 static bool
ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf * conf_a,struct virtchnl_fdir_fltr_conf * conf_b)1249 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1250 struct virtchnl_fdir_fltr_conf *conf_b)
1251 {
1252 struct ice_fdir_fltr *a = &conf_a->input;
1253 struct ice_fdir_fltr *b = &conf_b->input;
1254
1255 if (conf_a->ttype != conf_b->ttype)
1256 return false;
1257 if (a->flow_type != b->flow_type)
1258 return false;
1259 if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1260 return false;
1261 if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1262 return false;
1263 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1264 return false;
1265 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1266 return false;
1267 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1268 return false;
1269 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1270 return false;
1271 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1272 return false;
1273 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1274 return false;
1275
1276 return true;
1277 }
1278
1279 /**
1280 * ice_vc_fdir_is_dup_fltr
1281 * @vf: pointer to the VF info
1282 * @conf: FDIR configuration for each filter
1283 *
1284 * Check if there is duplicated rule with same conf value
1285 *
1286 * Return: 0 true success, and false on error.
1287 */
1288 static bool
ice_vc_fdir_is_dup_fltr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf)1289 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1290 {
1291 struct ice_fdir_fltr *desc;
1292 bool ret;
1293
1294 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1295 struct virtchnl_fdir_fltr_conf *node =
1296 to_fltr_conf_from_desc(desc);
1297
1298 ret = ice_vc_fdir_comp_rules(node, conf);
1299 if (ret)
1300 return true;
1301 }
1302
1303 return false;
1304 }
1305
1306 /**
1307 * ice_vc_fdir_insert_entry
1308 * @vf: pointer to the VF info
1309 * @conf: FDIR configuration for each filter
1310 * @id: pointer to ID value allocated by driver
1311 *
1312 * Insert FDIR conf entry into list and allocate ID for this filter
1313 *
1314 * Return: 0 true success, and other on error.
1315 */
1316 static int
ice_vc_fdir_insert_entry(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,u32 * id)1317 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1318 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1319 {
1320 struct ice_fdir_fltr *input = &conf->input;
1321 int i;
1322
1323 /* alloc ID corresponding with conf */
1324 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1325 ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1326 if (i < 0)
1327 return -EINVAL;
1328 *id = i;
1329
1330 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1331 return 0;
1332 }
1333
1334 /**
1335 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1336 * @vf: pointer to the VF info
1337 * @conf: FDIR configuration for each filter
1338 * @id: filter rule's ID
1339 */
1340 static void
ice_vc_fdir_remove_entry(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,u32 id)1341 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1342 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1343 {
1344 struct ice_fdir_fltr *input = &conf->input;
1345
1346 idr_remove(&vf->fdir.fdir_rule_idr, id);
1347 list_del(&input->fltr_node);
1348 }
1349
1350 /**
1351 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1352 * @vf: pointer to the VF info
1353 * @id: filter rule's ID
1354 *
1355 * Return: NULL on error, and other on success.
1356 */
1357 static struct virtchnl_fdir_fltr_conf *
ice_vc_fdir_lookup_entry(struct ice_vf * vf,u32 id)1358 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1359 {
1360 return idr_find(&vf->fdir.fdir_rule_idr, id);
1361 }
1362
1363 /**
1364 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1365 * @vf: pointer to the VF info
1366 */
ice_vc_fdir_flush_entry(struct ice_vf * vf)1367 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1368 {
1369 struct virtchnl_fdir_fltr_conf *conf;
1370 struct ice_fdir_fltr *desc, *temp;
1371
1372 list_for_each_entry_safe(desc, temp,
1373 &vf->fdir.fdir_rule_list, fltr_node) {
1374 conf = to_fltr_conf_from_desc(desc);
1375 list_del(&desc->fltr_node);
1376 devm_kfree(ice_pf_to_dev(vf->pf), conf);
1377 }
1378 }
1379
1380 /**
1381 * ice_vc_fdir_write_fltr - write filter rule into hardware
1382 * @vf: pointer to the VF info
1383 * @conf: FDIR configuration for each filter
1384 * @add: true implies add rule, false implies del rules
1385 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1386 *
1387 * Return: 0 on success, and other on error.
1388 */
ice_vc_fdir_write_fltr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,bool add,bool is_tun)1389 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1390 struct virtchnl_fdir_fltr_conf *conf,
1391 bool add, bool is_tun)
1392 {
1393 struct ice_fdir_fltr *input = &conf->input;
1394 struct ice_vsi *vsi, *ctrl_vsi;
1395 struct ice_fltr_desc desc;
1396 struct device *dev;
1397 struct ice_pf *pf;
1398 struct ice_hw *hw;
1399 int ret;
1400 u8 *pkt;
1401
1402 pf = vf->pf;
1403 dev = ice_pf_to_dev(pf);
1404 hw = &pf->hw;
1405 vsi = ice_get_vf_vsi(vf);
1406 if (!vsi) {
1407 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1408 return -EINVAL;
1409 }
1410
1411 input->dest_vsi = vsi->idx;
1412 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1413
1414 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1415 if (!ctrl_vsi) {
1416 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1417 return -EINVAL;
1418 }
1419
1420 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1421 if (!pkt)
1422 return -ENOMEM;
1423
1424 ice_fdir_get_prgm_desc(hw, input, &desc, add);
1425 if (conf->parser_ena) {
1426 memcpy(pkt, conf->pkt_buf, conf->pkt_len);
1427 } else {
1428 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1429 if (ret) {
1430 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1431 vf->vf_id, input->flow_type);
1432 goto err_free_pkt;
1433 }
1434 }
1435
1436 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1437 if (ret)
1438 goto err_free_pkt;
1439
1440 return 0;
1441
1442 err_free_pkt:
1443 devm_kfree(dev, pkt);
1444 return ret;
1445 }
1446
1447 /**
1448 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1449 * @t: pointer to timer_list
1450 */
ice_vf_fdir_timer(struct timer_list * t)1451 static void ice_vf_fdir_timer(struct timer_list *t)
1452 {
1453 struct ice_vf_fdir_ctx *ctx_irq = timer_container_of(ctx_irq, t,
1454 rx_tmr);
1455 struct ice_vf_fdir_ctx *ctx_done;
1456 struct ice_vf_fdir *fdir;
1457 unsigned long flags;
1458 struct ice_vf *vf;
1459 struct ice_pf *pf;
1460
1461 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1462 vf = container_of(fdir, struct ice_vf, fdir);
1463 ctx_done = &fdir->ctx_done;
1464 pf = vf->pf;
1465 spin_lock_irqsave(&fdir->ctx_lock, flags);
1466 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1467 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1468 WARN_ON_ONCE(1);
1469 return;
1470 }
1471
1472 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1473
1474 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1475 ctx_done->conf = ctx_irq->conf;
1476 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1477 ctx_done->v_opcode = ctx_irq->v_opcode;
1478 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1479
1480 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1481 ice_service_task_schedule(pf);
1482 }
1483
1484 /**
1485 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1486 * @ctrl_vsi: pointer to a VF's CTRL VSI
1487 * @rx_desc: pointer to FDIR Rx queue descriptor
1488 */
1489 void
ice_vc_fdir_irq_handler(struct ice_vsi * ctrl_vsi,union ice_32b_rx_flex_desc * rx_desc)1490 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1491 union ice_32b_rx_flex_desc *rx_desc)
1492 {
1493 struct ice_pf *pf = ctrl_vsi->back;
1494 struct ice_vf *vf = ctrl_vsi->vf;
1495 struct ice_vf_fdir_ctx *ctx_done;
1496 struct ice_vf_fdir_ctx *ctx_irq;
1497 struct ice_vf_fdir *fdir;
1498 unsigned long flags;
1499 struct device *dev;
1500 int ret;
1501
1502 if (WARN_ON(!vf))
1503 return;
1504
1505 fdir = &vf->fdir;
1506 ctx_done = &fdir->ctx_done;
1507 ctx_irq = &fdir->ctx_irq;
1508 dev = ice_pf_to_dev(pf);
1509 spin_lock_irqsave(&fdir->ctx_lock, flags);
1510 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1511 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1512 WARN_ON_ONCE(1);
1513 return;
1514 }
1515
1516 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1517
1518 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1519 ctx_done->conf = ctx_irq->conf;
1520 ctx_done->stat = ICE_FDIR_CTX_IRQ;
1521 ctx_done->v_opcode = ctx_irq->v_opcode;
1522 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1523 spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1524
1525 ret = timer_delete(&ctx_irq->rx_tmr);
1526 if (!ret)
1527 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1528
1529 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1530 ice_service_task_schedule(pf);
1531 }
1532
1533 /**
1534 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1535 * @vf: pointer to the VF info
1536 */
ice_vf_fdir_dump_info(struct ice_vf * vf)1537 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1538 {
1539 u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1540 struct ice_vsi *vf_vsi;
1541 struct device *dev;
1542 struct ice_pf *pf;
1543 struct ice_hw *hw;
1544 u16 vsi_num;
1545
1546 pf = vf->pf;
1547 hw = &pf->hw;
1548 dev = ice_pf_to_dev(pf);
1549 vf_vsi = ice_get_vf_vsi(vf);
1550 if (!vf_vsi) {
1551 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1552 return;
1553 }
1554
1555 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1556
1557 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1558 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1559 switch (hw->mac_type) {
1560 case ICE_MAC_E830:
1561 fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1562 fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1563 fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1564 fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1565 break;
1566 case ICE_MAC_E810:
1567 default:
1568 fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1569 fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1570 fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1571 fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1572 }
1573
1574 dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1575 vf->vf_id, fd_size_g, fd_size_b);
1576 dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1577 vf->vf_id, fd_cnt_g, fd_cnt_b);
1578 }
1579
1580 /**
1581 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1582 * @vf: pointer to the VF info
1583 * @ctx: FDIR context info for post processing
1584 * @status: virtchnl FDIR program status
1585 *
1586 * Return: 0 on success, and other on error.
1587 */
1588 static int
ice_vf_verify_rx_desc(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status * status)1589 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1590 enum virtchnl_fdir_prgm_status *status)
1591 {
1592 struct device *dev = ice_pf_to_dev(vf->pf);
1593 u32 stat_err, error, prog_id;
1594 int ret;
1595
1596 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1597 if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1598 ICE_FXD_FLTR_WB_QW1_DD_YES) {
1599 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1600 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1601 ret = -EINVAL;
1602 goto err_exit;
1603 }
1604
1605 prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1606 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1607 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1608 dev_err(dev, "VF %d: Desc show add, but ctx not",
1609 vf->vf_id);
1610 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1611 ret = -EINVAL;
1612 goto err_exit;
1613 }
1614
1615 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1616 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1617 dev_err(dev, "VF %d: Desc show del, but ctx not",
1618 vf->vf_id);
1619 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1620 ret = -EINVAL;
1621 goto err_exit;
1622 }
1623
1624 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1625 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1626 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1627 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1628 vf->vf_id);
1629 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1630 } else {
1631 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1632 vf->vf_id);
1633 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1634 }
1635 ret = -EINVAL;
1636 goto err_exit;
1637 }
1638
1639 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1640 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1641 dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1642 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1643 ret = -EINVAL;
1644 goto err_exit;
1645 }
1646
1647 *status = VIRTCHNL_FDIR_SUCCESS;
1648
1649 return 0;
1650
1651 err_exit:
1652 ice_vf_fdir_dump_info(vf);
1653 return ret;
1654 }
1655
ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)1656 static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)
1657 {
1658 return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER ||
1659 ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER ||
1660 ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER ||
1661 ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER ||
1662 ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI ||
1663 ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER);
1664 }
1665
1666 /**
1667 * ice_vc_add_fdir_fltr_post
1668 * @vf: pointer to the VF structure
1669 * @ctx: FDIR context info for post processing
1670 * @status: virtchnl FDIR program status
1671 * @success: true implies success, false implies failure
1672 *
1673 * Post process for flow director add command. If success, then do post process
1674 * and send back success msg by virtchnl. Otherwise, do context reversion and
1675 * send back failure msg by virtchnl.
1676 *
1677 * Return: 0 on success, and other on error.
1678 */
1679 static int
ice_vc_add_fdir_fltr_post(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status status,bool success)1680 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1681 enum virtchnl_fdir_prgm_status status,
1682 bool success)
1683 {
1684 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1685 struct device *dev = ice_pf_to_dev(vf->pf);
1686 enum virtchnl_status_code v_ret;
1687 struct virtchnl_fdir_add *resp;
1688 int ret, len, is_tun;
1689
1690 v_ret = VIRTCHNL_STATUS_SUCCESS;
1691 len = sizeof(*resp);
1692 resp = kzalloc(len, GFP_KERNEL);
1693 if (!resp) {
1694 len = 0;
1695 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1696 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1697 goto err_exit;
1698 }
1699
1700 if (!success)
1701 goto err_exit;
1702
1703 is_tun = 0;
1704 resp->status = status;
1705 resp->flow_id = conf->flow_id;
1706 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1707 vf->fdir.fdir_fltr_cnt_total++;
1708
1709 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1710 (u8 *)resp, len);
1711 kfree(resp);
1712
1713 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1714 vf->vf_id, conf->flow_id,
1715 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1716 "add" : "del");
1717 return ret;
1718
1719 err_exit:
1720 if (resp)
1721 resp->status = status;
1722 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1723 devm_kfree(dev, conf);
1724
1725 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1726 (u8 *)resp, len);
1727 kfree(resp);
1728 return ret;
1729 }
1730
1731 /**
1732 * ice_vc_del_fdir_fltr_post
1733 * @vf: pointer to the VF structure
1734 * @ctx: FDIR context info for post processing
1735 * @status: virtchnl FDIR program status
1736 * @success: true implies success, false implies failure
1737 *
1738 * Post process for flow director del command. If success, then do post process
1739 * and send back success msg by virtchnl. Otherwise, do context reversion and
1740 * send back failure msg by virtchnl.
1741 *
1742 * Return: 0 on success, and other on error.
1743 */
1744 static int
ice_vc_del_fdir_fltr_post(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status status,bool success)1745 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1746 enum virtchnl_fdir_prgm_status status,
1747 bool success)
1748 {
1749 struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1750 struct device *dev = ice_pf_to_dev(vf->pf);
1751 enum virtchnl_status_code v_ret;
1752 struct virtchnl_fdir_del *resp;
1753 int ret, len, is_tun;
1754
1755 v_ret = VIRTCHNL_STATUS_SUCCESS;
1756 len = sizeof(*resp);
1757 resp = kzalloc(len, GFP_KERNEL);
1758 if (!resp) {
1759 len = 0;
1760 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1761 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1762 goto err_exit;
1763 }
1764
1765 if (!success)
1766 goto err_exit;
1767
1768 is_tun = 0;
1769 resp->status = status;
1770 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1771 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1772 vf->fdir.fdir_fltr_cnt_total--;
1773
1774 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1775 (u8 *)resp, len);
1776 kfree(resp);
1777
1778 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1779 vf->vf_id, conf->flow_id,
1780 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1781 "add" : "del");
1782 devm_kfree(dev, conf);
1783 return ret;
1784
1785 err_exit:
1786 if (resp)
1787 resp->status = status;
1788 if (success)
1789 devm_kfree(dev, conf);
1790
1791 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1792 (u8 *)resp, len);
1793 kfree(resp);
1794 return ret;
1795 }
1796
1797 /**
1798 * ice_flush_fdir_ctx
1799 * @pf: pointer to the PF structure
1800 *
1801 * Flush all the pending event on ctx_done list and process them.
1802 */
ice_flush_fdir_ctx(struct ice_pf * pf)1803 void ice_flush_fdir_ctx(struct ice_pf *pf)
1804 {
1805 struct ice_vf *vf;
1806 unsigned int bkt;
1807
1808 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1809 return;
1810
1811 mutex_lock(&pf->vfs.table_lock);
1812 ice_for_each_vf(pf, bkt, vf) {
1813 struct device *dev = ice_pf_to_dev(pf);
1814 enum virtchnl_fdir_prgm_status status;
1815 struct ice_vf_fdir_ctx *ctx;
1816 unsigned long flags;
1817 int ret;
1818
1819 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1820 continue;
1821
1822 if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1823 continue;
1824
1825 ctx = &vf->fdir.ctx_done;
1826 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1827 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1828 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1829 continue;
1830 }
1831 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1832
1833 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1834 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1835 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1836 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1837 vf->vf_id);
1838 goto err_exit;
1839 }
1840
1841 ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1842 if (ret)
1843 goto err_exit;
1844
1845 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1846 ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1847 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1848 ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1849 else
1850 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1851
1852 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1853 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1854 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1855 continue;
1856 err_exit:
1857 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1858 ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1859 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1860 ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1861 else
1862 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1863
1864 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1865 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1866 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1867 }
1868 mutex_unlock(&pf->vfs.table_lock);
1869 }
1870
1871 /**
1872 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1873 * @vf: pointer to the VF structure
1874 * @conf: FDIR configuration for each filter
1875 * @v_opcode: virtual channel operation code
1876 *
1877 * Return: 0 on success, and other on error.
1878 */
1879 static int
ice_vc_fdir_set_irq_ctx(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,enum virtchnl_ops v_opcode)1880 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1881 enum virtchnl_ops v_opcode)
1882 {
1883 struct device *dev = ice_pf_to_dev(vf->pf);
1884 struct ice_vf_fdir_ctx *ctx;
1885 unsigned long flags;
1886
1887 ctx = &vf->fdir.ctx_irq;
1888 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1889 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1890 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1891 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1892 dev_dbg(dev, "VF %d: Last request is still in progress\n",
1893 vf->vf_id);
1894 return -EBUSY;
1895 }
1896 ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1897 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1898
1899 ctx->conf = conf;
1900 ctx->v_opcode = v_opcode;
1901 ctx->stat = ICE_FDIR_CTX_READY;
1902 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1903
1904 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1905
1906 return 0;
1907 }
1908
1909 /**
1910 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1911 * @vf: pointer to the VF structure
1912 *
1913 * Return: 0 on success, and other on error.
1914 */
ice_vc_fdir_clear_irq_ctx(struct ice_vf * vf)1915 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1916 {
1917 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1918 unsigned long flags;
1919
1920 timer_delete(&ctx->rx_tmr);
1921 spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1922 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1923 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1924 }
1925
1926 /**
1927 * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context
1928 * @fv_a: struct of parsed FDIR profile field vector
1929 * @fv_b: struct of parsed FDIR profile field vector
1930 *
1931 * Check if the two parsed FDIR profile field vector context are different,
1932 * including proto_id, offset and mask.
1933 *
1934 * Return: true on different, false on otherwise.
1935 */
ice_vc_parser_fv_check_diff(struct ice_parser_fv * fv_a,struct ice_parser_fv * fv_b)1936 static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a,
1937 struct ice_parser_fv *fv_b)
1938 {
1939 return (fv_a->proto_id != fv_b->proto_id ||
1940 fv_a->offset != fv_b->offset ||
1941 fv_a->msk != fv_b->msk);
1942 }
1943
1944 /**
1945 * ice_vc_parser_fv_save - save parsed FDIR profile fv context
1946 * @fv: struct of parsed FDIR profile field vector
1947 * @fv_src: parsed FDIR profile field vector context to save
1948 *
1949 * Save the parsed FDIR profile field vector context, including proto_id,
1950 * offset and mask.
1951 *
1952 * Return: Void.
1953 */
ice_vc_parser_fv_save(struct ice_parser_fv * fv,struct ice_parser_fv * fv_src)1954 static void ice_vc_parser_fv_save(struct ice_parser_fv *fv,
1955 struct ice_parser_fv *fv_src)
1956 {
1957 fv->proto_id = fv_src->proto_id;
1958 fv->offset = fv_src->offset;
1959 fv->msk = fv_src->msk;
1960 fv->spec = 0;
1961 }
1962
1963 /**
1964 * ice_vc_add_fdir_raw - add a raw FDIR filter for VF
1965 * @vf: pointer to the VF info
1966 * @conf: FDIR configuration for each filter
1967 * @v_ret: the final VIRTCHNL code
1968 * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER
1969 * @len: length of the stat
1970 *
1971 * Return: 0 on success or negative errno on failure.
1972 */
1973 static int
ice_vc_add_fdir_raw(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,enum virtchnl_status_code * v_ret,struct virtchnl_fdir_add * stat,int len)1974 ice_vc_add_fdir_raw(struct ice_vf *vf,
1975 struct virtchnl_fdir_fltr_conf *conf,
1976 enum virtchnl_status_code *v_ret,
1977 struct virtchnl_fdir_add *stat, int len)
1978 {
1979 struct ice_vsi *vf_vsi, *ctrl_vsi;
1980 struct ice_fdir_prof_info *pi;
1981 struct ice_pf *pf = vf->pf;
1982 int ret, ptg, id, i;
1983 struct device *dev;
1984 struct ice_hw *hw;
1985 bool fv_found;
1986
1987 dev = ice_pf_to_dev(pf);
1988 hw = &pf->hw;
1989 *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1990 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1991
1992 id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
1993 ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
1994
1995 vf_vsi = ice_get_vf_vsi(vf);
1996 if (!vf_vsi) {
1997 dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id);
1998 return -ENODEV;
1999 }
2000
2001 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
2002 if (!ctrl_vsi) {
2003 dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
2004 vf->vf_id);
2005 return -ENODEV;
2006 }
2007
2008 fv_found = false;
2009
2010 /* Check if profile info already exists, then update the counter */
2011 pi = &vf->fdir_prof_info[ptg];
2012 if (pi->fdir_active_cnt != 0) {
2013 for (i = 0; i < ICE_MAX_FV_WORDS; i++)
2014 if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
2015 &conf->prof->fv[i]))
2016 break;
2017 if (i == ICE_MAX_FV_WORDS) {
2018 fv_found = true;
2019 pi->fdir_active_cnt++;
2020 }
2021 }
2022
2023 /* HW profile setting is only required for the first time */
2024 if (!fv_found) {
2025 ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
2026 ctrl_vsi->idx, conf->prof,
2027 ICE_BLK_FD);
2028
2029 if (ret) {
2030 *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2031 dev_dbg(dev, "VF %d: insert hw prof failed\n",
2032 vf->vf_id);
2033 return ret;
2034 }
2035 }
2036
2037 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
2038 if (ret) {
2039 *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2040 dev_dbg(dev, "VF %d: insert FDIR list failed\n",
2041 vf->vf_id);
2042 return ret;
2043 }
2044
2045 ret = ice_vc_fdir_set_irq_ctx(vf, conf,
2046 VIRTCHNL_OP_ADD_FDIR_FILTER);
2047 if (ret) {
2048 dev_dbg(dev, "VF %d: set FDIR context failed\n",
2049 vf->vf_id);
2050 goto err_rem_entry;
2051 }
2052
2053 ret = ice_vc_fdir_write_fltr(vf, conf, true, false);
2054 if (ret) {
2055 dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
2056 vf->vf_id, ret);
2057 goto err_clr_irq;
2058 }
2059
2060 /* Save parsed profile fv info of the FDIR rule for the first time */
2061 if (!fv_found) {
2062 for (i = 0; i < conf->prof->fv_num; i++)
2063 ice_vc_parser_fv_save(&pi->prof.fv[i],
2064 &conf->prof->fv[i]);
2065 pi->prof.fv_num = conf->prof->fv_num;
2066 pi->fdir_active_cnt = 1;
2067 }
2068
2069 return 0;
2070
2071 err_clr_irq:
2072 ice_vc_fdir_clear_irq_ctx(vf);
2073 err_rem_entry:
2074 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
2075 return ret;
2076 }
2077
2078 /**
2079 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
2080 * @vf: pointer to the VF info
2081 * @msg: pointer to the msg buffer
2082 *
2083 * Return: 0 on success, and other on error.
2084 */
ice_vc_add_fdir_fltr(struct ice_vf * vf,u8 * msg)2085 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
2086 {
2087 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
2088 struct virtchnl_fdir_add *stat = NULL;
2089 struct virtchnl_fdir_fltr_conf *conf;
2090 enum virtchnl_status_code v_ret;
2091 struct ice_vsi *vf_vsi;
2092 struct device *dev;
2093 struct ice_pf *pf;
2094 int is_tun = 0;
2095 int len = 0;
2096 int ret;
2097
2098 pf = vf->pf;
2099 dev = ice_pf_to_dev(pf);
2100 vf_vsi = ice_get_vf_vsi(vf);
2101 if (!vf_vsi) {
2102 dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
2103 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2104 goto err_exit;
2105 }
2106
2107 #define ICE_VF_MAX_FDIR_FILTERS 128
2108 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
2109 vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
2110 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2111 dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
2112 vf->vf_id);
2113 goto err_exit;
2114 }
2115
2116 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2117 if (ret) {
2118 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2119 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2120 goto err_exit;
2121 }
2122
2123 ret = ice_vf_start_ctrl_vsi(vf);
2124 if (ret && (ret != -EEXIST)) {
2125 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2126 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
2127 vf->vf_id, ret);
2128 goto err_exit;
2129 }
2130
2131 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2132 if (!stat) {
2133 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2134 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2135 goto err_exit;
2136 }
2137
2138 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
2139 if (!conf) {
2140 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2141 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
2142 goto err_exit;
2143 }
2144
2145 len = sizeof(*stat);
2146 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
2147 if (ret) {
2148 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2149 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
2150 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
2151 goto err_free_conf;
2152 }
2153
2154 if (fltr->validate_only) {
2155 v_ret = VIRTCHNL_STATUS_SUCCESS;
2156 stat->status = VIRTCHNL_FDIR_SUCCESS;
2157 devm_kfree(dev, conf);
2158 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
2159 v_ret, (u8 *)stat, len);
2160 goto exit;
2161 }
2162
2163 /* For raw FDIR filters created by the parser */
2164 if (conf->parser_ena) {
2165 ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len);
2166 if (ret)
2167 goto err_free_conf;
2168 goto exit;
2169 }
2170
2171 is_tun = ice_fdir_is_tunnel(conf->ttype);
2172 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
2173 if (ret) {
2174 v_ret = VIRTCHNL_STATUS_SUCCESS;
2175 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
2176 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
2177 vf->vf_id, ret);
2178 goto err_free_conf;
2179 }
2180
2181 ret = ice_vc_fdir_is_dup_fltr(vf, conf);
2182 if (ret) {
2183 v_ret = VIRTCHNL_STATUS_SUCCESS;
2184 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
2185 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
2186 vf->vf_id);
2187 goto err_free_conf;
2188 }
2189
2190 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
2191 if (ret) {
2192 v_ret = VIRTCHNL_STATUS_SUCCESS;
2193 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2194 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
2195 goto err_free_conf;
2196 }
2197
2198 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
2199 if (ret) {
2200 v_ret = VIRTCHNL_STATUS_SUCCESS;
2201 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2202 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2203 goto err_rem_entry;
2204 }
2205
2206 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
2207 if (ret) {
2208 v_ret = VIRTCHNL_STATUS_SUCCESS;
2209 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2210 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2211 vf->vf_id, ret);
2212 goto err_clr_irq;
2213 }
2214
2215 exit:
2216 kfree(stat);
2217 return ret;
2218
2219 err_clr_irq:
2220 ice_vc_fdir_clear_irq_ctx(vf);
2221 err_rem_entry:
2222 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
2223 err_free_conf:
2224 devm_kfree(dev, conf);
2225 err_exit:
2226 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
2227 (u8 *)stat, len);
2228 kfree(stat);
2229 return ret;
2230 }
2231
2232 /**
2233 * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF
2234 * @vf: pointer to the VF info
2235 * @conf: FDIR configuration for each filter
2236 * @v_ret: the final VIRTCHNL code
2237 * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER
2238 * @len: length of the stat
2239 *
2240 * Return: 0 on success or negative errno on failure.
2241 */
2242 static int
ice_vc_del_fdir_raw(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,enum virtchnl_status_code * v_ret,struct virtchnl_fdir_del * stat,int len)2243 ice_vc_del_fdir_raw(struct ice_vf *vf,
2244 struct virtchnl_fdir_fltr_conf *conf,
2245 enum virtchnl_status_code *v_ret,
2246 struct virtchnl_fdir_del *stat, int len)
2247 {
2248 struct ice_vsi *vf_vsi, *ctrl_vsi;
2249 enum ice_block blk = ICE_BLK_FD;
2250 struct ice_fdir_prof_info *pi;
2251 struct ice_pf *pf = vf->pf;
2252 struct device *dev;
2253 struct ice_hw *hw;
2254 unsigned long id;
2255 u16 vsi_num;
2256 int ptg;
2257 int ret;
2258
2259 dev = ice_pf_to_dev(pf);
2260 hw = &pf->hw;
2261 *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2262 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2263
2264 id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
2265 ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
2266
2267 ret = ice_vc_fdir_write_fltr(vf, conf, false, false);
2268 if (ret) {
2269 dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
2270 vf->vf_id, ret);
2271 return ret;
2272 }
2273
2274 vf_vsi = ice_get_vf_vsi(vf);
2275 if (!vf_vsi) {
2276 dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
2277 return -ENODEV;
2278 }
2279
2280 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
2281 if (!ctrl_vsi) {
2282 dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
2283 vf->vf_id);
2284 return -ENODEV;
2285 }
2286
2287 pi = &vf->fdir_prof_info[ptg];
2288 if (pi->fdir_active_cnt != 0) {
2289 pi->fdir_active_cnt--;
2290 /* Remove the profile id flow if no active FDIR rule left */
2291 if (!pi->fdir_active_cnt) {
2292 vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
2293 ice_rem_prof_id_flow(hw, blk, vsi_num, id);
2294
2295 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
2296 ice_rem_prof_id_flow(hw, blk, vsi_num, id);
2297 }
2298 }
2299
2300 conf->parser_ena = false;
2301 return 0;
2302 }
2303
2304 /**
2305 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
2306 * @vf: pointer to the VF info
2307 * @msg: pointer to the msg buffer
2308 *
2309 * Return: 0 on success, and other on error.
2310 */
ice_vc_del_fdir_fltr(struct ice_vf * vf,u8 * msg)2311 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
2312 {
2313 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
2314 struct virtchnl_fdir_del *stat = NULL;
2315 struct virtchnl_fdir_fltr_conf *conf;
2316 struct ice_vf_fdir *fdir = &vf->fdir;
2317 enum virtchnl_status_code v_ret;
2318 struct ice_fdir_fltr *input;
2319 enum ice_fltr_ptype flow;
2320 struct device *dev;
2321 struct ice_pf *pf;
2322 int is_tun = 0;
2323 int len = 0;
2324 int ret;
2325
2326 pf = vf->pf;
2327 dev = ice_pf_to_dev(pf);
2328 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2329 if (ret) {
2330 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2331 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2332 goto err_exit;
2333 }
2334
2335 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2336 if (!stat) {
2337 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2338 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2339 goto err_exit;
2340 }
2341
2342 len = sizeof(*stat);
2343
2344 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
2345 if (!conf) {
2346 v_ret = VIRTCHNL_STATUS_SUCCESS;
2347 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
2348 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
2349 vf->vf_id, fltr->flow_id);
2350 goto err_exit;
2351 }
2352
2353 /* Just return failure when ctrl_vsi idx is invalid */
2354 if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
2355 v_ret = VIRTCHNL_STATUS_SUCCESS;
2356 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2357 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
2358 goto err_exit;
2359 }
2360
2361 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
2362 if (ret) {
2363 v_ret = VIRTCHNL_STATUS_SUCCESS;
2364 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2365 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2366 goto err_exit;
2367 }
2368
2369 /* For raw FDIR filters created by the parser */
2370 if (conf->parser_ena) {
2371 ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len);
2372 if (ret)
2373 goto err_del_tmr;
2374 goto exit;
2375 }
2376
2377 is_tun = ice_fdir_is_tunnel(conf->ttype);
2378 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
2379 if (ret) {
2380 v_ret = VIRTCHNL_STATUS_SUCCESS;
2381 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2382 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2383 vf->vf_id, ret);
2384 goto err_del_tmr;
2385 }
2386
2387 /* Remove unused profiles to avoid unexpected behaviors */
2388 input = &conf->input;
2389 flow = input->flow_type;
2390 if (fdir->fdir_fltr_cnt[flow][is_tun] == 1)
2391 ice_vc_fdir_rem_prof(vf, flow, is_tun);
2392
2393 exit:
2394 kfree(stat);
2395
2396 return ret;
2397
2398 err_del_tmr:
2399 ice_vc_fdir_clear_irq_ctx(vf);
2400 err_exit:
2401 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2402 (u8 *)stat, len);
2403 kfree(stat);
2404 return ret;
2405 }
2406
2407 /**
2408 * ice_vf_fdir_init - init FDIR resource for VF
2409 * @vf: pointer to the VF info
2410 */
ice_vf_fdir_init(struct ice_vf * vf)2411 void ice_vf_fdir_init(struct ice_vf *vf)
2412 {
2413 struct ice_vf_fdir *fdir = &vf->fdir;
2414
2415 idr_init(&fdir->fdir_rule_idr);
2416 INIT_LIST_HEAD(&fdir->fdir_rule_list);
2417
2418 spin_lock_init(&fdir->ctx_lock);
2419 fdir->ctx_irq.flags = 0;
2420 fdir->ctx_done.flags = 0;
2421 ice_vc_fdir_reset_cnt_all(fdir);
2422 }
2423
2424 /**
2425 * ice_vf_fdir_exit - destroy FDIR resource for VF
2426 * @vf: pointer to the VF info
2427 */
ice_vf_fdir_exit(struct ice_vf * vf)2428 void ice_vf_fdir_exit(struct ice_vf *vf)
2429 {
2430 ice_vc_fdir_flush_entry(vf);
2431 idr_destroy(&vf->fdir.fdir_rule_idr);
2432 ice_vc_fdir_rem_prof_all(vf);
2433 ice_vc_fdir_free_prof_all(vf);
2434 }
2435