1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021-2023, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 #include "ice_vf_lib_private.h" 9 10 #define to_fltr_conf_from_desc(p) \ 11 container_of(p, struct virtchnl_fdir_fltr_conf, input) 12 13 #define GTPU_TEID_OFFSET 4 14 #define GTPU_EH_QFI_OFFSET 1 15 #define GTPU_EH_QFI_MASK 0x3F 16 #define PFCP_S_OFFSET 0 17 #define PFCP_S_MASK 0x1 18 #define PFCP_PORT_NR 8805 19 20 #define FDIR_INSET_FLAG_ESP_S 0 21 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 22 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 23 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 24 25 enum ice_fdir_tunnel_type { 26 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 27 ICE_FDIR_TUNNEL_TYPE_GTPU, 28 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 29 ICE_FDIR_TUNNEL_TYPE_ECPRI, 30 ICE_FDIR_TUNNEL_TYPE_GTPU_INNER, 31 ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER, 32 ICE_FDIR_TUNNEL_TYPE_GRE, 33 ICE_FDIR_TUNNEL_TYPE_GTPOGRE, 34 ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER, 35 ICE_FDIR_TUNNEL_TYPE_GRE_INNER, 36 ICE_FDIR_TUNNEL_TYPE_L2TPV2, 37 ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER, 38 }; 39 40 struct virtchnl_fdir_fltr_conf { 41 struct ice_fdir_fltr input; 42 enum ice_fdir_tunnel_type ttype; 43 u64 inset_flag; 44 u32 flow_id; 45 46 struct ice_parser_profile *prof; 47 bool parser_ena; 48 u8 *pkt_buf; 49 u8 pkt_len; 50 }; 51 52 struct virtchnl_fdir_inset_map { 53 enum virtchnl_proto_hdr_field field; 54 enum ice_flow_field fld; 55 u64 flag; 56 u64 mask; 57 }; 58 59 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 60 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 78 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 79 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 80 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 81 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 82 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 83 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 84 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 85 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 86 }; 87 88 /** 89 * ice_vc_fdir_param_check 90 * @vf: pointer to the VF structure 91 * @vsi_id: VF relative VSI ID 92 * 93 * Check for the valid VSI ID, PF's state and VF's state 94 * 95 * Return: 0 on success, and -EINVAL on error. 96 */ 97 static int 98 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 99 { 100 struct ice_pf *pf = vf->pf; 101 102 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 103 return -EINVAL; 104 105 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 106 return -EINVAL; 107 108 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 109 return -EINVAL; 110 111 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 112 return -EINVAL; 113 114 if (!ice_get_vf_vsi(vf)) 115 return -EINVAL; 116 117 return 0; 118 } 119 120 /** 121 * ice_vf_start_ctrl_vsi 122 * @vf: pointer to the VF structure 123 * 124 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 125 * 126 * Return: 0 on success, and other on error. 127 */ 128 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 129 { 130 struct ice_pf *pf = vf->pf; 131 struct ice_vsi *ctrl_vsi; 132 struct device *dev; 133 int err; 134 135 dev = ice_pf_to_dev(pf); 136 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 137 return -EEXIST; 138 139 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 140 if (!ctrl_vsi) { 141 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 142 vf->vf_id); 143 return -ENOMEM; 144 } 145 146 err = ice_vsi_open_ctrl(ctrl_vsi); 147 if (err) { 148 dev_dbg(dev, "Could not open control VSI for VF %d\n", 149 vf->vf_id); 150 goto err_vsi_open; 151 } 152 153 return 0; 154 155 err_vsi_open: 156 ice_vsi_release(ctrl_vsi); 157 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 158 pf->vsi[vf->ctrl_vsi_idx] = NULL; 159 vf->ctrl_vsi_idx = ICE_NO_VSI; 160 } 161 return err; 162 } 163 164 /** 165 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 166 * @vf: pointer to the VF structure 167 * @flow: filter flow type 168 * 169 * Return: 0 on success, and other on error. 170 */ 171 static int 172 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 173 { 174 struct ice_vf_fdir *fdir = &vf->fdir; 175 176 if (!fdir->fdir_prof) { 177 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 178 ICE_FLTR_PTYPE_MAX, 179 sizeof(*fdir->fdir_prof), 180 GFP_KERNEL); 181 if (!fdir->fdir_prof) 182 return -ENOMEM; 183 } 184 185 if (!fdir->fdir_prof[flow]) { 186 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 187 sizeof(**fdir->fdir_prof), 188 GFP_KERNEL); 189 if (!fdir->fdir_prof[flow]) 190 return -ENOMEM; 191 } 192 193 return 0; 194 } 195 196 /** 197 * ice_vc_fdir_free_prof - free profile for this filter flow type 198 * @vf: pointer to the VF structure 199 * @flow: filter flow type 200 */ 201 static void 202 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 203 { 204 struct ice_vf_fdir *fdir = &vf->fdir; 205 206 if (!fdir->fdir_prof) 207 return; 208 209 if (!fdir->fdir_prof[flow]) 210 return; 211 212 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 213 fdir->fdir_prof[flow] = NULL; 214 } 215 216 /** 217 * ice_vc_fdir_free_prof_all - free all the profile for this VF 218 * @vf: pointer to the VF structure 219 */ 220 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 221 { 222 struct ice_vf_fdir *fdir = &vf->fdir; 223 enum ice_fltr_ptype flow; 224 225 if (!fdir->fdir_prof) 226 return; 227 228 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 229 ice_vc_fdir_free_prof(vf, flow); 230 231 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 232 fdir->fdir_prof = NULL; 233 } 234 235 /** 236 * ice_vc_fdir_parse_flow_fld 237 * @proto_hdr: virtual channel protocol filter header 238 * @conf: FDIR configuration for each filter 239 * @fld: field type array 240 * @fld_cnt: field counter 241 * 242 * Parse the virtual channel filter header and store them into field type array 243 * 244 * Return: 0 on success, and other on error. 245 */ 246 static int 247 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 248 struct virtchnl_fdir_fltr_conf *conf, 249 enum ice_flow_field *fld, int *fld_cnt) 250 { 251 struct virtchnl_proto_hdr hdr; 252 u32 i; 253 254 memcpy(&hdr, proto_hdr, sizeof(hdr)); 255 256 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 257 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 258 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 259 if (fdir_inset_map[i].mask && 260 ((fdir_inset_map[i].mask & conf->inset_flag) != 261 fdir_inset_map[i].flag)) 262 continue; 263 264 fld[*fld_cnt] = fdir_inset_map[i].fld; 265 *fld_cnt += 1; 266 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 267 return -EINVAL; 268 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 269 fdir_inset_map[i].field); 270 } 271 272 return 0; 273 } 274 275 /** 276 * ice_vc_fdir_set_flow_fld 277 * @vf: pointer to the VF structure 278 * @fltr: virtual channel add cmd buffer 279 * @conf: FDIR configuration for each filter 280 * @seg: array of one or more packet segments that describe the flow 281 * 282 * Parse the virtual channel add msg buffer's field vector and store them into 283 * flow's packet segment field 284 * 285 * Return: 0 on success, and other on error. 286 */ 287 static int 288 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 289 struct virtchnl_fdir_fltr_conf *conf, 290 struct ice_flow_seg_info *seg) 291 { 292 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 293 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 294 struct device *dev = ice_pf_to_dev(vf->pf); 295 struct virtchnl_proto_hdrs *proto; 296 int fld_cnt = 0; 297 int i; 298 299 proto = &rule->proto_hdrs; 300 for (i = 0; i < proto->count; i++) { 301 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 302 int ret; 303 304 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 305 if (ret) 306 return ret; 307 } 308 309 if (fld_cnt == 0) { 310 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 311 return -EINVAL; 312 } 313 314 for (i = 0; i < fld_cnt; i++) 315 ice_flow_set_fld(seg, fld[i], 316 ICE_FLOW_FLD_OFF_INVAL, 317 ICE_FLOW_FLD_OFF_INVAL, 318 ICE_FLOW_FLD_OFF_INVAL, false); 319 320 return 0; 321 } 322 323 /** 324 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 325 * @vf: pointer to the VF structure 326 * @conf: FDIR configuration for each filter 327 * @seg: array of one or more packet segments that describe the flow 328 * 329 * Return: 0 on success, and other on error. 330 */ 331 static int 332 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 333 struct virtchnl_fdir_fltr_conf *conf, 334 struct ice_flow_seg_info *seg) 335 { 336 enum ice_fltr_ptype flow = conf->input.flow_type; 337 enum ice_fdir_tunnel_type ttype = conf->ttype; 338 struct device *dev = ice_pf_to_dev(vf->pf); 339 340 switch (flow) { 341 case ICE_FLTR_PTYPE_NON_IP_L2: 342 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 343 break; 344 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 345 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 346 ICE_FLOW_SEG_HDR_IPV4 | 347 ICE_FLOW_SEG_HDR_IPV_OTHER); 348 break; 349 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 350 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 351 ICE_FLOW_SEG_HDR_IPV4 | 352 ICE_FLOW_SEG_HDR_IPV_OTHER); 353 break; 354 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 355 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 356 ICE_FLOW_SEG_HDR_IPV4 | 357 ICE_FLOW_SEG_HDR_IPV_OTHER); 358 break; 359 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 360 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 361 ICE_FLOW_SEG_HDR_IPV4 | 362 ICE_FLOW_SEG_HDR_IPV_OTHER); 363 break; 364 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 365 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 366 ICE_FLOW_SEG_HDR_IPV4 | 367 ICE_FLOW_SEG_HDR_IPV_OTHER); 368 break; 369 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 370 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 371 ICE_FLOW_SEG_HDR_IPV4 | 372 ICE_FLOW_SEG_HDR_IPV_OTHER); 373 break; 374 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 375 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 376 ICE_FLOW_SEG_HDR_IPV_OTHER); 377 break; 378 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 379 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 380 ICE_FLOW_SEG_HDR_IPV4 | 381 ICE_FLOW_SEG_HDR_IPV_OTHER); 382 break; 383 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 384 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 385 ICE_FLOW_SEG_HDR_IPV4 | 386 ICE_FLOW_SEG_HDR_IPV_OTHER); 387 break; 388 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 389 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 392 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 393 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 394 ICE_FLOW_SEG_HDR_IPV4 | 395 ICE_FLOW_SEG_HDR_IPV_OTHER); 396 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 397 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 398 ICE_FLOW_SEG_HDR_GTPU_IP | 399 ICE_FLOW_SEG_HDR_IPV4 | 400 ICE_FLOW_SEG_HDR_IPV_OTHER); 401 } else { 402 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 403 flow, vf->vf_id); 404 return -EINVAL; 405 } 406 break; 407 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 408 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 409 ICE_FLOW_SEG_HDR_IPV4 | 410 ICE_FLOW_SEG_HDR_IPV_OTHER); 411 break; 412 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 413 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 414 ICE_FLOW_SEG_HDR_IPV6 | 415 ICE_FLOW_SEG_HDR_IPV_OTHER); 416 break; 417 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 418 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 419 ICE_FLOW_SEG_HDR_IPV6 | 420 ICE_FLOW_SEG_HDR_IPV_OTHER); 421 break; 422 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 423 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 424 ICE_FLOW_SEG_HDR_IPV6 | 425 ICE_FLOW_SEG_HDR_IPV_OTHER); 426 break; 427 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 428 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 429 ICE_FLOW_SEG_HDR_IPV6 | 430 ICE_FLOW_SEG_HDR_IPV_OTHER); 431 break; 432 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 433 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 434 ICE_FLOW_SEG_HDR_IPV6 | 435 ICE_FLOW_SEG_HDR_IPV_OTHER); 436 break; 437 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 438 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 439 ICE_FLOW_SEG_HDR_IPV6 | 440 ICE_FLOW_SEG_HDR_IPV_OTHER); 441 break; 442 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 443 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 444 ICE_FLOW_SEG_HDR_IPV_OTHER); 445 break; 446 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 447 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 448 ICE_FLOW_SEG_HDR_IPV6 | 449 ICE_FLOW_SEG_HDR_IPV_OTHER); 450 break; 451 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 452 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 453 ICE_FLOW_SEG_HDR_IPV6 | 454 ICE_FLOW_SEG_HDR_IPV_OTHER); 455 break; 456 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 457 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 458 ICE_FLOW_SEG_HDR_IPV6 | 459 ICE_FLOW_SEG_HDR_IPV_OTHER); 460 break; 461 default: 462 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 463 flow, vf->vf_id); 464 return -EINVAL; 465 } 466 467 return 0; 468 } 469 470 /** 471 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 472 * @vf: pointer to the VF structure 473 * @flow: filter flow type 474 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 475 */ 476 static void 477 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 478 { 479 struct ice_vf_fdir *fdir = &vf->fdir; 480 struct ice_fd_hw_prof *vf_prof; 481 struct ice_pf *pf = vf->pf; 482 struct ice_vsi *vf_vsi; 483 struct device *dev; 484 struct ice_hw *hw; 485 u64 prof_id; 486 int i; 487 488 dev = ice_pf_to_dev(pf); 489 hw = &pf->hw; 490 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 491 return; 492 493 vf_prof = fdir->fdir_prof[flow]; 494 prof_id = vf_prof->prof_id[tun]; 495 496 vf_vsi = ice_get_vf_vsi(vf); 497 if (!vf_vsi) { 498 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 499 return; 500 } 501 502 if (!fdir->prof_entry_cnt[flow][tun]) 503 return; 504 505 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 506 if (vf_prof->entry_h[i][tun]) { 507 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 508 509 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 510 ice_flow_rem_entry(hw, ICE_BLK_FD, 511 vf_prof->entry_h[i][tun]); 512 vf_prof->entry_h[i][tun] = 0; 513 } 514 515 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 516 devm_kfree(dev, vf_prof->fdir_seg[tun]); 517 vf_prof->fdir_seg[tun] = NULL; 518 519 for (i = 0; i < vf_prof->cnt; i++) 520 vf_prof->vsi_h[i] = 0; 521 522 fdir->prof_entry_cnt[flow][tun] = 0; 523 } 524 525 /** 526 * ice_vc_fdir_rem_prof_all - remove profile for this VF 527 * @vf: pointer to the VF structure 528 */ 529 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 530 { 531 enum ice_fltr_ptype flow; 532 533 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 534 flow < ICE_FLTR_PTYPE_MAX; flow++) { 535 ice_vc_fdir_rem_prof(vf, flow, 0); 536 ice_vc_fdir_rem_prof(vf, flow, 1); 537 } 538 } 539 540 /** 541 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR 542 * @fdir: pointer to the VF FDIR structure 543 */ 544 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) 545 { 546 enum ice_fltr_ptype flow; 547 548 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 549 flow < ICE_FLTR_PTYPE_MAX; flow++) { 550 fdir->fdir_fltr_cnt[flow][0] = 0; 551 fdir->fdir_fltr_cnt[flow][1] = 0; 552 } 553 554 fdir->fdir_fltr_cnt_total = 0; 555 } 556 557 /** 558 * ice_vc_fdir_has_prof_conflict 559 * @vf: pointer to the VF structure 560 * @conf: FDIR configuration for each filter 561 * 562 * Check if @conf has conflicting profile with existing profiles 563 * 564 * Return: true on success, and false on error. 565 */ 566 static bool 567 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, 568 struct virtchnl_fdir_fltr_conf *conf) 569 { 570 struct ice_fdir_fltr *desc; 571 572 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 573 struct virtchnl_fdir_fltr_conf *existing_conf; 574 enum ice_fltr_ptype flow_type_a, flow_type_b; 575 struct ice_fdir_fltr *a, *b; 576 577 existing_conf = to_fltr_conf_from_desc(desc); 578 a = &existing_conf->input; 579 b = &conf->input; 580 flow_type_a = a->flow_type; 581 flow_type_b = b->flow_type; 582 583 /* No need to compare two rules with different tunnel types or 584 * with the same protocol type. 585 */ 586 if (existing_conf->ttype != conf->ttype || 587 flow_type_a == flow_type_b) 588 continue; 589 590 switch (flow_type_a) { 591 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 592 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 593 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 594 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 595 return true; 596 break; 597 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 598 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 599 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 600 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) 601 return true; 602 break; 603 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 604 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 605 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 606 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) 607 return true; 608 break; 609 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 610 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP || 611 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 612 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) 613 return true; 614 break; 615 default: 616 break; 617 } 618 } 619 620 return false; 621 } 622 623 /** 624 * ice_vc_fdir_write_flow_prof 625 * @vf: pointer to the VF structure 626 * @flow: filter flow type 627 * @seg: array of one or more packet segments that describe the flow 628 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 629 * 630 * Write the flow's profile config and packet segment into the hardware 631 * 632 * Return: 0 on success, and other on error. 633 */ 634 static int 635 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 636 struct ice_flow_seg_info *seg, int tun) 637 { 638 struct ice_vf_fdir *fdir = &vf->fdir; 639 struct ice_vsi *vf_vsi, *ctrl_vsi; 640 struct ice_flow_seg_info *old_seg; 641 struct ice_flow_prof *prof = NULL; 642 struct ice_fd_hw_prof *vf_prof; 643 struct device *dev; 644 struct ice_pf *pf; 645 struct ice_hw *hw; 646 u64 entry1_h = 0; 647 u64 entry2_h = 0; 648 int ret; 649 650 pf = vf->pf; 651 dev = ice_pf_to_dev(pf); 652 hw = &pf->hw; 653 vf_vsi = ice_get_vf_vsi(vf); 654 if (!vf_vsi) 655 return -EINVAL; 656 657 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 658 if (!ctrl_vsi) 659 return -EINVAL; 660 661 vf_prof = fdir->fdir_prof[flow]; 662 old_seg = vf_prof->fdir_seg[tun]; 663 if (old_seg) { 664 if (!memcmp(old_seg, seg, sizeof(*seg))) { 665 dev_dbg(dev, "Duplicated profile for VF %d!\n", 666 vf->vf_id); 667 return -EEXIST; 668 } 669 670 if (fdir->fdir_fltr_cnt[flow][tun]) { 671 ret = -EINVAL; 672 dev_dbg(dev, "Input set conflicts for VF %d\n", 673 vf->vf_id); 674 goto err_exit; 675 } 676 677 /* remove previously allocated profile */ 678 ice_vc_fdir_rem_prof(vf, flow, tun); 679 } 680 681 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, 682 tun + 1, false, &prof); 683 if (ret) { 684 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 685 flow, vf->vf_id); 686 goto err_exit; 687 } 688 689 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, 690 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 691 seg, &entry1_h); 692 if (ret) { 693 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 694 flow, vf->vf_id); 695 goto err_prof; 696 } 697 698 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, 699 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 700 seg, &entry2_h); 701 if (ret) { 702 dev_dbg(dev, 703 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 704 flow, vf->vf_id); 705 goto err_entry_1; 706 } 707 708 vf_prof->fdir_seg[tun] = seg; 709 vf_prof->cnt = 0; 710 fdir->prof_entry_cnt[flow][tun] = 0; 711 712 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 713 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 714 vf_prof->cnt++; 715 fdir->prof_entry_cnt[flow][tun]++; 716 717 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 718 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 719 vf_prof->cnt++; 720 fdir->prof_entry_cnt[flow][tun]++; 721 722 vf_prof->prof_id[tun] = prof->id; 723 724 return 0; 725 726 err_entry_1: 727 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 728 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id); 729 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 730 err_prof: 731 ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); 732 err_exit: 733 return ret; 734 } 735 736 /** 737 * ice_vc_fdir_config_input_set 738 * @vf: pointer to the VF structure 739 * @fltr: virtual channel add cmd buffer 740 * @conf: FDIR configuration for each filter 741 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 742 * 743 * Config the input set type and value for virtual channel add msg buffer 744 * 745 * Return: 0 on success, and other on error. 746 */ 747 static int 748 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 749 struct virtchnl_fdir_fltr_conf *conf, int tun) 750 { 751 struct ice_fdir_fltr *input = &conf->input; 752 struct device *dev = ice_pf_to_dev(vf->pf); 753 struct ice_flow_seg_info *seg; 754 enum ice_fltr_ptype flow; 755 int ret; 756 757 ret = ice_vc_fdir_has_prof_conflict(vf, conf); 758 if (ret) { 759 dev_dbg(dev, "Found flow profile conflict for VF %d\n", 760 vf->vf_id); 761 return ret; 762 } 763 764 flow = input->flow_type; 765 ret = ice_vc_fdir_alloc_prof(vf, flow); 766 if (ret) { 767 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 768 return ret; 769 } 770 771 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 772 if (!seg) 773 return -ENOMEM; 774 775 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 776 if (ret) { 777 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 778 goto err_exit; 779 } 780 781 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 782 if (ret) { 783 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 784 goto err_exit; 785 } 786 787 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 788 if (ret == -EEXIST) { 789 devm_kfree(dev, seg); 790 } else if (ret) { 791 dev_dbg(dev, "Write flow profile for VF %d failed\n", 792 vf->vf_id); 793 goto err_exit; 794 } 795 796 return 0; 797 798 err_exit: 799 devm_kfree(dev, seg); 800 return ret; 801 } 802 803 /** 804 * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary) 805 * @proto: virtchnl protocol headers 806 * 807 * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note 808 * that common FDIR rule must have non-zero proto->count. Thus, we choose the 809 * tunnel_level and count of proto as the indicators. If both tunnel_level and 810 * count of proto are zero, this FDIR rule will be regarded as raw flow. 811 * 812 * Returns: true if headers describe raw flow, false otherwise. 813 */ 814 static bool 815 ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto) 816 { 817 return (proto->tunnel_level == 0 && proto->count == 0); 818 } 819 820 /** 821 * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule 822 * @vf: pointer to the VF info 823 * @proto: virtchnl protocol headers 824 * @conf: FDIR configuration for each filter 825 * 826 * Parse the virtual channel filter's raw flow and store it in @conf 827 * 828 * Return: 0 on success or negative errno on failure. 829 */ 830 static int 831 ice_vc_fdir_parse_raw(struct ice_vf *vf, 832 struct virtchnl_proto_hdrs *proto, 833 struct virtchnl_fdir_fltr_conf *conf) 834 { 835 u8 *pkt_buf, *msk_buf __free(kfree); 836 struct ice_parser_result rslt; 837 struct ice_pf *pf = vf->pf; 838 struct ice_parser *psr; 839 int status = -ENOMEM; 840 struct ice_hw *hw; 841 u16 udp_port = 0; 842 843 pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); 844 msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); 845 if (!pkt_buf || !msk_buf) 846 goto err_mem_alloc; 847 848 memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len); 849 memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len); 850 851 hw = &pf->hw; 852 853 /* Get raw profile info via Parser Lib */ 854 psr = ice_parser_create(hw); 855 if (IS_ERR(psr)) { 856 status = PTR_ERR(psr); 857 goto err_mem_alloc; 858 } 859 860 ice_parser_dvm_set(psr, ice_is_dvm_ena(hw)); 861 862 if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN)) 863 ice_parser_vxlan_tunnel_set(psr, udp_port, true); 864 865 status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt); 866 if (status) 867 goto err_parser_destroy; 868 869 if (hw->debug_mask & ICE_DBG_PARSER) 870 ice_parser_result_dump(hw, &rslt); 871 872 conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL); 873 if (!conf->prof) { 874 status = -ENOMEM; 875 goto err_parser_destroy; 876 } 877 878 status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, 879 proto->raw.pkt_len, ICE_BLK_FD, 880 conf->prof); 881 if (status) 882 goto err_parser_profile_init; 883 884 if (hw->debug_mask & ICE_DBG_PARSER) 885 ice_parser_profile_dump(hw, conf->prof); 886 887 /* Store raw flow info into @conf */ 888 conf->pkt_len = proto->raw.pkt_len; 889 conf->pkt_buf = pkt_buf; 890 conf->parser_ena = true; 891 892 ice_parser_destroy(psr); 893 return 0; 894 895 err_parser_profile_init: 896 kfree(conf->prof); 897 err_parser_destroy: 898 ice_parser_destroy(psr); 899 err_mem_alloc: 900 kfree(pkt_buf); 901 return status; 902 } 903 904 /** 905 * ice_vc_fdir_parse_pattern 906 * @vf: pointer to the VF info 907 * @fltr: virtual channel add cmd buffer 908 * @conf: FDIR configuration for each filter 909 * 910 * Parse the virtual channel filter's pattern and store them into conf 911 * 912 * Return: 0 on success, and other on error. 913 */ 914 static int 915 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 916 struct virtchnl_fdir_fltr_conf *conf) 917 { 918 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 919 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 920 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 921 struct device *dev = ice_pf_to_dev(vf->pf); 922 struct ice_fdir_fltr *input = &conf->input; 923 int i; 924 925 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 926 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 927 proto->count, vf->vf_id); 928 return -EINVAL; 929 } 930 931 /* For raw FDIR filters created by the parser */ 932 if (ice_vc_fdir_is_raw_flow(proto)) 933 return ice_vc_fdir_parse_raw(vf, proto, conf); 934 935 for (i = 0; i < proto->count; i++) { 936 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 937 struct ip_esp_hdr *esph; 938 struct ip_auth_hdr *ah; 939 struct sctphdr *sctph; 940 struct ipv6hdr *ip6h; 941 struct udphdr *udph; 942 struct tcphdr *tcph; 943 struct ethhdr *eth; 944 struct iphdr *iph; 945 u8 s_field; 946 u8 *rawh; 947 948 switch (hdr->type) { 949 case VIRTCHNL_PROTO_HDR_ETH: 950 eth = (struct ethhdr *)hdr->buffer; 951 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 952 953 if (hdr->field_selector) 954 input->ext_data.ether_type = eth->h_proto; 955 break; 956 case VIRTCHNL_PROTO_HDR_IPV4: 957 iph = (struct iphdr *)hdr->buffer; 958 l3 = VIRTCHNL_PROTO_HDR_IPV4; 959 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 960 961 if (hdr->field_selector) { 962 input->ip.v4.src_ip = iph->saddr; 963 input->ip.v4.dst_ip = iph->daddr; 964 input->ip.v4.tos = iph->tos; 965 input->ip.v4.proto = iph->protocol; 966 } 967 break; 968 case VIRTCHNL_PROTO_HDR_IPV6: 969 ip6h = (struct ipv6hdr *)hdr->buffer; 970 l3 = VIRTCHNL_PROTO_HDR_IPV6; 971 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 972 973 if (hdr->field_selector) { 974 memcpy(input->ip.v6.src_ip, 975 ip6h->saddr.in6_u.u6_addr8, 976 sizeof(ip6h->saddr)); 977 memcpy(input->ip.v6.dst_ip, 978 ip6h->daddr.in6_u.u6_addr8, 979 sizeof(ip6h->daddr)); 980 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 981 (ip6h->flow_lbl[0] >> 4); 982 input->ip.v6.proto = ip6h->nexthdr; 983 } 984 break; 985 case VIRTCHNL_PROTO_HDR_TCP: 986 tcph = (struct tcphdr *)hdr->buffer; 987 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 988 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 989 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 990 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 991 992 if (hdr->field_selector) { 993 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 994 input->ip.v4.src_port = tcph->source; 995 input->ip.v4.dst_port = tcph->dest; 996 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 997 input->ip.v6.src_port = tcph->source; 998 input->ip.v6.dst_port = tcph->dest; 999 } 1000 } 1001 break; 1002 case VIRTCHNL_PROTO_HDR_UDP: 1003 udph = (struct udphdr *)hdr->buffer; 1004 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1005 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 1006 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1007 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 1008 1009 if (hdr->field_selector) { 1010 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1011 input->ip.v4.src_port = udph->source; 1012 input->ip.v4.dst_port = udph->dest; 1013 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1014 input->ip.v6.src_port = udph->source; 1015 input->ip.v6.dst_port = udph->dest; 1016 } 1017 } 1018 break; 1019 case VIRTCHNL_PROTO_HDR_SCTP: 1020 sctph = (struct sctphdr *)hdr->buffer; 1021 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1022 input->flow_type = 1023 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 1024 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1025 input->flow_type = 1026 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 1027 1028 if (hdr->field_selector) { 1029 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1030 input->ip.v4.src_port = sctph->source; 1031 input->ip.v4.dst_port = sctph->dest; 1032 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1033 input->ip.v6.src_port = sctph->source; 1034 input->ip.v6.dst_port = sctph->dest; 1035 } 1036 } 1037 break; 1038 case VIRTCHNL_PROTO_HDR_L2TPV3: 1039 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1040 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 1041 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1042 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 1043 1044 if (hdr->field_selector) 1045 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 1046 break; 1047 case VIRTCHNL_PROTO_HDR_ESP: 1048 esph = (struct ip_esp_hdr *)hdr->buffer; 1049 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 1050 l4 == VIRTCHNL_PROTO_HDR_UDP) 1051 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 1052 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 1053 l4 == VIRTCHNL_PROTO_HDR_UDP) 1054 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 1055 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 1056 l4 == VIRTCHNL_PROTO_HDR_NONE) 1057 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 1058 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 1059 l4 == VIRTCHNL_PROTO_HDR_NONE) 1060 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 1061 1062 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 1063 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 1064 else 1065 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 1066 1067 if (hdr->field_selector) { 1068 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1069 input->ip.v4.sec_parm_idx = esph->spi; 1070 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1071 input->ip.v6.sec_parm_idx = esph->spi; 1072 } 1073 break; 1074 case VIRTCHNL_PROTO_HDR_AH: 1075 ah = (struct ip_auth_hdr *)hdr->buffer; 1076 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1077 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 1078 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1079 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 1080 1081 if (hdr->field_selector) { 1082 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1083 input->ip.v4.sec_parm_idx = ah->spi; 1084 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1085 input->ip.v6.sec_parm_idx = ah->spi; 1086 } 1087 break; 1088 case VIRTCHNL_PROTO_HDR_PFCP: 1089 rawh = (u8 *)hdr->buffer; 1090 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 1091 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 1092 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 1093 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 1094 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 1095 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 1096 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 1097 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 1098 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 1099 1100 if (hdr->field_selector) { 1101 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1102 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 1103 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1104 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 1105 } 1106 break; 1107 case VIRTCHNL_PROTO_HDR_GTPU_IP: 1108 rawh = (u8 *)hdr->buffer; 1109 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 1110 1111 if (hdr->field_selector) 1112 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 1113 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 1114 break; 1115 case VIRTCHNL_PROTO_HDR_GTPU_EH: 1116 rawh = (u8 *)hdr->buffer; 1117 1118 if (hdr->field_selector) 1119 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 1120 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 1121 break; 1122 default: 1123 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 1124 hdr->type, vf->vf_id); 1125 return -EINVAL; 1126 } 1127 } 1128 1129 return 0; 1130 } 1131 1132 /** 1133 * ice_vc_fdir_parse_action 1134 * @vf: pointer to the VF info 1135 * @fltr: virtual channel add cmd buffer 1136 * @conf: FDIR configuration for each filter 1137 * 1138 * Parse the virtual channel filter's action and store them into conf 1139 * 1140 * Return: 0 on success, and other on error. 1141 */ 1142 static int 1143 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1144 struct virtchnl_fdir_fltr_conf *conf) 1145 { 1146 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 1147 struct device *dev = ice_pf_to_dev(vf->pf); 1148 struct ice_fdir_fltr *input = &conf->input; 1149 u32 dest_num = 0; 1150 u32 mark_num = 0; 1151 int i; 1152 1153 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 1154 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 1155 as->count, vf->vf_id); 1156 return -EINVAL; 1157 } 1158 1159 for (i = 0; i < as->count; i++) { 1160 struct virtchnl_filter_action *action = &as->actions[i]; 1161 1162 switch (action->type) { 1163 case VIRTCHNL_ACTION_PASSTHRU: 1164 dest_num++; 1165 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 1166 break; 1167 case VIRTCHNL_ACTION_DROP: 1168 dest_num++; 1169 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1170 break; 1171 case VIRTCHNL_ACTION_QUEUE: 1172 dest_num++; 1173 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1174 input->q_index = action->act_conf.queue.index; 1175 break; 1176 case VIRTCHNL_ACTION_Q_REGION: 1177 dest_num++; 1178 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 1179 input->q_index = action->act_conf.queue.index; 1180 input->q_region = action->act_conf.queue.region; 1181 break; 1182 case VIRTCHNL_ACTION_MARK: 1183 mark_num++; 1184 input->fltr_id = action->act_conf.mark_id; 1185 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1186 break; 1187 default: 1188 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1189 action->type, vf->vf_id); 1190 return -EINVAL; 1191 } 1192 } 1193 1194 if (dest_num == 0 || dest_num >= 2) { 1195 dev_dbg(dev, "Invalid destination action for VF %d\n", 1196 vf->vf_id); 1197 return -EINVAL; 1198 } 1199 1200 if (mark_num >= 2) { 1201 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1202 return -EINVAL; 1203 } 1204 1205 return 0; 1206 } 1207 1208 /** 1209 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1210 * @vf: pointer to the VF info 1211 * @fltr: virtual channel add cmd buffer 1212 * @conf: FDIR configuration for each filter 1213 * 1214 * Return: 0 on success, and other on error. 1215 */ 1216 static int 1217 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1218 struct virtchnl_fdir_fltr_conf *conf) 1219 { 1220 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1221 int ret; 1222 1223 /* For raw FDIR filters created by the parser */ 1224 if (!ice_vc_fdir_is_raw_flow(proto)) 1225 if (!ice_vc_validate_pattern(vf, proto)) 1226 return -EINVAL; 1227 1228 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1229 if (ret) 1230 return ret; 1231 1232 return ice_vc_fdir_parse_action(vf, fltr, conf); 1233 } 1234 1235 /** 1236 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1237 * @conf_a: FDIR configuration for filter a 1238 * @conf_b: FDIR configuration for filter b 1239 * 1240 * Return: 0 on success, and other on error. 1241 */ 1242 static bool 1243 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1244 struct virtchnl_fdir_fltr_conf *conf_b) 1245 { 1246 struct ice_fdir_fltr *a = &conf_a->input; 1247 struct ice_fdir_fltr *b = &conf_b->input; 1248 1249 if (conf_a->ttype != conf_b->ttype) 1250 return false; 1251 if (a->flow_type != b->flow_type) 1252 return false; 1253 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1254 return false; 1255 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1256 return false; 1257 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1258 return false; 1259 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1260 return false; 1261 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1262 return false; 1263 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1264 return false; 1265 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1266 return false; 1267 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1268 return false; 1269 1270 return true; 1271 } 1272 1273 /** 1274 * ice_vc_fdir_is_dup_fltr 1275 * @vf: pointer to the VF info 1276 * @conf: FDIR configuration for each filter 1277 * 1278 * Check if there is duplicated rule with same conf value 1279 * 1280 * Return: 0 true success, and false on error. 1281 */ 1282 static bool 1283 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1284 { 1285 struct ice_fdir_fltr *desc; 1286 bool ret; 1287 1288 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1289 struct virtchnl_fdir_fltr_conf *node = 1290 to_fltr_conf_from_desc(desc); 1291 1292 ret = ice_vc_fdir_comp_rules(node, conf); 1293 if (ret) 1294 return true; 1295 } 1296 1297 return false; 1298 } 1299 1300 /** 1301 * ice_vc_fdir_insert_entry 1302 * @vf: pointer to the VF info 1303 * @conf: FDIR configuration for each filter 1304 * @id: pointer to ID value allocated by driver 1305 * 1306 * Insert FDIR conf entry into list and allocate ID for this filter 1307 * 1308 * Return: 0 true success, and other on error. 1309 */ 1310 static int 1311 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1312 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1313 { 1314 struct ice_fdir_fltr *input = &conf->input; 1315 int i; 1316 1317 /* alloc ID corresponding with conf */ 1318 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1319 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1320 if (i < 0) 1321 return -EINVAL; 1322 *id = i; 1323 1324 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1325 return 0; 1326 } 1327 1328 /** 1329 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1330 * @vf: pointer to the VF info 1331 * @conf: FDIR configuration for each filter 1332 * @id: filter rule's ID 1333 */ 1334 static void 1335 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1336 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1337 { 1338 struct ice_fdir_fltr *input = &conf->input; 1339 1340 idr_remove(&vf->fdir.fdir_rule_idr, id); 1341 list_del(&input->fltr_node); 1342 } 1343 1344 /** 1345 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1346 * @vf: pointer to the VF info 1347 * @id: filter rule's ID 1348 * 1349 * Return: NULL on error, and other on success. 1350 */ 1351 static struct virtchnl_fdir_fltr_conf * 1352 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1353 { 1354 return idr_find(&vf->fdir.fdir_rule_idr, id); 1355 } 1356 1357 /** 1358 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1359 * @vf: pointer to the VF info 1360 */ 1361 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1362 { 1363 struct virtchnl_fdir_fltr_conf *conf; 1364 struct ice_fdir_fltr *desc, *temp; 1365 1366 list_for_each_entry_safe(desc, temp, 1367 &vf->fdir.fdir_rule_list, fltr_node) { 1368 conf = to_fltr_conf_from_desc(desc); 1369 list_del(&desc->fltr_node); 1370 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1371 } 1372 } 1373 1374 /** 1375 * ice_vc_fdir_write_fltr - write filter rule into hardware 1376 * @vf: pointer to the VF info 1377 * @conf: FDIR configuration for each filter 1378 * @add: true implies add rule, false implies del rules 1379 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1380 * 1381 * Return: 0 on success, and other on error. 1382 */ 1383 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1384 struct virtchnl_fdir_fltr_conf *conf, 1385 bool add, bool is_tun) 1386 { 1387 struct ice_fdir_fltr *input = &conf->input; 1388 struct ice_vsi *vsi, *ctrl_vsi; 1389 struct ice_fltr_desc desc; 1390 struct device *dev; 1391 struct ice_pf *pf; 1392 struct ice_hw *hw; 1393 int ret; 1394 u8 *pkt; 1395 1396 pf = vf->pf; 1397 dev = ice_pf_to_dev(pf); 1398 hw = &pf->hw; 1399 vsi = ice_get_vf_vsi(vf); 1400 if (!vsi) { 1401 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1402 return -EINVAL; 1403 } 1404 1405 input->dest_vsi = vsi->idx; 1406 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1407 1408 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1409 if (!ctrl_vsi) { 1410 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1411 return -EINVAL; 1412 } 1413 1414 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1415 if (!pkt) 1416 return -ENOMEM; 1417 1418 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1419 if (conf->parser_ena) { 1420 memcpy(pkt, conf->pkt_buf, conf->pkt_len); 1421 } else { 1422 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1423 if (ret) { 1424 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1425 vf->vf_id, input->flow_type); 1426 goto err_free_pkt; 1427 } 1428 } 1429 1430 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1431 if (ret) 1432 goto err_free_pkt; 1433 1434 return 0; 1435 1436 err_free_pkt: 1437 devm_kfree(dev, pkt); 1438 return ret; 1439 } 1440 1441 /** 1442 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1443 * @t: pointer to timer_list 1444 */ 1445 static void ice_vf_fdir_timer(struct timer_list *t) 1446 { 1447 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1448 struct ice_vf_fdir_ctx *ctx_done; 1449 struct ice_vf_fdir *fdir; 1450 unsigned long flags; 1451 struct ice_vf *vf; 1452 struct ice_pf *pf; 1453 1454 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1455 vf = container_of(fdir, struct ice_vf, fdir); 1456 ctx_done = &fdir->ctx_done; 1457 pf = vf->pf; 1458 spin_lock_irqsave(&fdir->ctx_lock, flags); 1459 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1460 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1461 WARN_ON_ONCE(1); 1462 return; 1463 } 1464 1465 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1466 1467 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1468 ctx_done->conf = ctx_irq->conf; 1469 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1470 ctx_done->v_opcode = ctx_irq->v_opcode; 1471 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1472 1473 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1474 ice_service_task_schedule(pf); 1475 } 1476 1477 /** 1478 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1479 * @ctrl_vsi: pointer to a VF's CTRL VSI 1480 * @rx_desc: pointer to FDIR Rx queue descriptor 1481 */ 1482 void 1483 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1484 union ice_32b_rx_flex_desc *rx_desc) 1485 { 1486 struct ice_pf *pf = ctrl_vsi->back; 1487 struct ice_vf *vf = ctrl_vsi->vf; 1488 struct ice_vf_fdir_ctx *ctx_done; 1489 struct ice_vf_fdir_ctx *ctx_irq; 1490 struct ice_vf_fdir *fdir; 1491 unsigned long flags; 1492 struct device *dev; 1493 int ret; 1494 1495 if (WARN_ON(!vf)) 1496 return; 1497 1498 fdir = &vf->fdir; 1499 ctx_done = &fdir->ctx_done; 1500 ctx_irq = &fdir->ctx_irq; 1501 dev = ice_pf_to_dev(pf); 1502 spin_lock_irqsave(&fdir->ctx_lock, flags); 1503 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1504 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1505 WARN_ON_ONCE(1); 1506 return; 1507 } 1508 1509 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1510 1511 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1512 ctx_done->conf = ctx_irq->conf; 1513 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1514 ctx_done->v_opcode = ctx_irq->v_opcode; 1515 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1516 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1517 1518 ret = del_timer(&ctx_irq->rx_tmr); 1519 if (!ret) 1520 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1521 1522 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1523 ice_service_task_schedule(pf); 1524 } 1525 1526 /** 1527 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1528 * @vf: pointer to the VF info 1529 */ 1530 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1531 { 1532 u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b; 1533 struct ice_vsi *vf_vsi; 1534 struct device *dev; 1535 struct ice_pf *pf; 1536 struct ice_hw *hw; 1537 u16 vsi_num; 1538 1539 pf = vf->pf; 1540 hw = &pf->hw; 1541 dev = ice_pf_to_dev(pf); 1542 vf_vsi = ice_get_vf_vsi(vf); 1543 if (!vf_vsi) { 1544 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); 1545 return; 1546 } 1547 1548 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1549 1550 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1551 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1552 switch (hw->mac_type) { 1553 case ICE_MAC_E830: 1554 fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size); 1555 fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size); 1556 fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); 1557 fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); 1558 break; 1559 case ICE_MAC_E810: 1560 default: 1561 fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size); 1562 fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size); 1563 fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); 1564 fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); 1565 } 1566 1567 dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n", 1568 vf->vf_id, fd_size_g, fd_size_b); 1569 dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n", 1570 vf->vf_id, fd_cnt_g, fd_cnt_b); 1571 } 1572 1573 /** 1574 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1575 * @vf: pointer to the VF info 1576 * @ctx: FDIR context info for post processing 1577 * @status: virtchnl FDIR program status 1578 * 1579 * Return: 0 on success, and other on error. 1580 */ 1581 static int 1582 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1583 enum virtchnl_fdir_prgm_status *status) 1584 { 1585 struct device *dev = ice_pf_to_dev(vf->pf); 1586 u32 stat_err, error, prog_id; 1587 int ret; 1588 1589 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1590 if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) != 1591 ICE_FXD_FLTR_WB_QW1_DD_YES) { 1592 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1593 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1594 ret = -EINVAL; 1595 goto err_exit; 1596 } 1597 1598 prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err); 1599 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1600 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1601 dev_err(dev, "VF %d: Desc show add, but ctx not", 1602 vf->vf_id); 1603 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1604 ret = -EINVAL; 1605 goto err_exit; 1606 } 1607 1608 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1609 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1610 dev_err(dev, "VF %d: Desc show del, but ctx not", 1611 vf->vf_id); 1612 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1613 ret = -EINVAL; 1614 goto err_exit; 1615 } 1616 1617 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err); 1618 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1619 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1620 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1621 vf->vf_id); 1622 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1623 } else { 1624 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1625 vf->vf_id); 1626 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1627 } 1628 ret = -EINVAL; 1629 goto err_exit; 1630 } 1631 1632 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err); 1633 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1634 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1635 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1636 ret = -EINVAL; 1637 goto err_exit; 1638 } 1639 1640 *status = VIRTCHNL_FDIR_SUCCESS; 1641 1642 return 0; 1643 1644 err_exit: 1645 ice_vf_fdir_dump_info(vf); 1646 return ret; 1647 } 1648 1649 static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype) 1650 { 1651 return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER || 1652 ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER || 1653 ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER || 1654 ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER || 1655 ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI || 1656 ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER); 1657 } 1658 1659 /** 1660 * ice_vc_add_fdir_fltr_post 1661 * @vf: pointer to the VF structure 1662 * @ctx: FDIR context info for post processing 1663 * @status: virtchnl FDIR program status 1664 * @success: true implies success, false implies failure 1665 * 1666 * Post process for flow director add command. If success, then do post process 1667 * and send back success msg by virtchnl. Otherwise, do context reversion and 1668 * send back failure msg by virtchnl. 1669 * 1670 * Return: 0 on success, and other on error. 1671 */ 1672 static int 1673 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1674 enum virtchnl_fdir_prgm_status status, 1675 bool success) 1676 { 1677 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1678 struct device *dev = ice_pf_to_dev(vf->pf); 1679 enum virtchnl_status_code v_ret; 1680 struct virtchnl_fdir_add *resp; 1681 int ret, len, is_tun; 1682 1683 v_ret = VIRTCHNL_STATUS_SUCCESS; 1684 len = sizeof(*resp); 1685 resp = kzalloc(len, GFP_KERNEL); 1686 if (!resp) { 1687 len = 0; 1688 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1689 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1690 goto err_exit; 1691 } 1692 1693 if (!success) 1694 goto err_exit; 1695 1696 is_tun = 0; 1697 resp->status = status; 1698 resp->flow_id = conf->flow_id; 1699 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1700 vf->fdir.fdir_fltr_cnt_total++; 1701 1702 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1703 (u8 *)resp, len); 1704 kfree(resp); 1705 1706 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1707 vf->vf_id, conf->flow_id, 1708 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1709 "add" : "del"); 1710 return ret; 1711 1712 err_exit: 1713 if (resp) 1714 resp->status = status; 1715 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1716 devm_kfree(dev, conf); 1717 1718 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1719 (u8 *)resp, len); 1720 kfree(resp); 1721 return ret; 1722 } 1723 1724 /** 1725 * ice_vc_del_fdir_fltr_post 1726 * @vf: pointer to the VF structure 1727 * @ctx: FDIR context info for post processing 1728 * @status: virtchnl FDIR program status 1729 * @success: true implies success, false implies failure 1730 * 1731 * Post process for flow director del command. If success, then do post process 1732 * and send back success msg by virtchnl. Otherwise, do context reversion and 1733 * send back failure msg by virtchnl. 1734 * 1735 * Return: 0 on success, and other on error. 1736 */ 1737 static int 1738 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1739 enum virtchnl_fdir_prgm_status status, 1740 bool success) 1741 { 1742 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1743 struct device *dev = ice_pf_to_dev(vf->pf); 1744 enum virtchnl_status_code v_ret; 1745 struct virtchnl_fdir_del *resp; 1746 int ret, len, is_tun; 1747 1748 v_ret = VIRTCHNL_STATUS_SUCCESS; 1749 len = sizeof(*resp); 1750 resp = kzalloc(len, GFP_KERNEL); 1751 if (!resp) { 1752 len = 0; 1753 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1754 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1755 goto err_exit; 1756 } 1757 1758 if (!success) 1759 goto err_exit; 1760 1761 is_tun = 0; 1762 resp->status = status; 1763 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1764 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1765 vf->fdir.fdir_fltr_cnt_total--; 1766 1767 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1768 (u8 *)resp, len); 1769 kfree(resp); 1770 1771 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1772 vf->vf_id, conf->flow_id, 1773 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1774 "add" : "del"); 1775 devm_kfree(dev, conf); 1776 return ret; 1777 1778 err_exit: 1779 if (resp) 1780 resp->status = status; 1781 if (success) 1782 devm_kfree(dev, conf); 1783 1784 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1785 (u8 *)resp, len); 1786 kfree(resp); 1787 return ret; 1788 } 1789 1790 /** 1791 * ice_flush_fdir_ctx 1792 * @pf: pointer to the PF structure 1793 * 1794 * Flush all the pending event on ctx_done list and process them. 1795 */ 1796 void ice_flush_fdir_ctx(struct ice_pf *pf) 1797 { 1798 struct ice_vf *vf; 1799 unsigned int bkt; 1800 1801 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1802 return; 1803 1804 mutex_lock(&pf->vfs.table_lock); 1805 ice_for_each_vf(pf, bkt, vf) { 1806 struct device *dev = ice_pf_to_dev(pf); 1807 enum virtchnl_fdir_prgm_status status; 1808 struct ice_vf_fdir_ctx *ctx; 1809 unsigned long flags; 1810 int ret; 1811 1812 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1813 continue; 1814 1815 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1816 continue; 1817 1818 ctx = &vf->fdir.ctx_done; 1819 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1820 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1821 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1822 continue; 1823 } 1824 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1825 1826 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1827 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1828 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1829 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1830 vf->vf_id); 1831 goto err_exit; 1832 } 1833 1834 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1835 if (ret) 1836 goto err_exit; 1837 1838 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1839 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1840 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1841 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1842 else 1843 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1844 1845 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1846 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1847 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1848 continue; 1849 err_exit: 1850 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1851 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1852 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1853 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1854 else 1855 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1856 1857 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1858 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1859 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1860 } 1861 mutex_unlock(&pf->vfs.table_lock); 1862 } 1863 1864 /** 1865 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1866 * @vf: pointer to the VF structure 1867 * @conf: FDIR configuration for each filter 1868 * @v_opcode: virtual channel operation code 1869 * 1870 * Return: 0 on success, and other on error. 1871 */ 1872 static int 1873 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1874 enum virtchnl_ops v_opcode) 1875 { 1876 struct device *dev = ice_pf_to_dev(vf->pf); 1877 struct ice_vf_fdir_ctx *ctx; 1878 unsigned long flags; 1879 1880 ctx = &vf->fdir.ctx_irq; 1881 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1882 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1883 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1884 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1885 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1886 vf->vf_id); 1887 return -EBUSY; 1888 } 1889 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1890 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1891 1892 ctx->conf = conf; 1893 ctx->v_opcode = v_opcode; 1894 ctx->stat = ICE_FDIR_CTX_READY; 1895 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1896 1897 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1898 1899 return 0; 1900 } 1901 1902 /** 1903 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1904 * @vf: pointer to the VF structure 1905 * 1906 * Return: 0 on success, and other on error. 1907 */ 1908 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1909 { 1910 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1911 unsigned long flags; 1912 1913 del_timer(&ctx->rx_tmr); 1914 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1915 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1916 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1917 } 1918 1919 /** 1920 * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context 1921 * @fv_a: struct of parsed FDIR profile field vector 1922 * @fv_b: struct of parsed FDIR profile field vector 1923 * 1924 * Check if the two parsed FDIR profile field vector context are different, 1925 * including proto_id, offset and mask. 1926 * 1927 * Return: true on different, false on otherwise. 1928 */ 1929 static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a, 1930 struct ice_parser_fv *fv_b) 1931 { 1932 return (fv_a->proto_id != fv_b->proto_id || 1933 fv_a->offset != fv_b->offset || 1934 fv_a->msk != fv_b->msk); 1935 } 1936 1937 /** 1938 * ice_vc_parser_fv_save - save parsed FDIR profile fv context 1939 * @fv: struct of parsed FDIR profile field vector 1940 * @fv_src: parsed FDIR profile field vector context to save 1941 * 1942 * Save the parsed FDIR profile field vector context, including proto_id, 1943 * offset and mask. 1944 * 1945 * Return: Void. 1946 */ 1947 static void ice_vc_parser_fv_save(struct ice_parser_fv *fv, 1948 struct ice_parser_fv *fv_src) 1949 { 1950 fv->proto_id = fv_src->proto_id; 1951 fv->offset = fv_src->offset; 1952 fv->msk = fv_src->msk; 1953 fv->spec = 0; 1954 } 1955 1956 /** 1957 * ice_vc_add_fdir_raw - add a raw FDIR filter for VF 1958 * @vf: pointer to the VF info 1959 * @conf: FDIR configuration for each filter 1960 * @v_ret: the final VIRTCHNL code 1961 * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER 1962 * @len: length of the stat 1963 * 1964 * Return: 0 on success or negative errno on failure. 1965 */ 1966 static int 1967 ice_vc_add_fdir_raw(struct ice_vf *vf, 1968 struct virtchnl_fdir_fltr_conf *conf, 1969 enum virtchnl_status_code *v_ret, 1970 struct virtchnl_fdir_add *stat, int len) 1971 { 1972 struct ice_vsi *vf_vsi, *ctrl_vsi; 1973 struct ice_fdir_prof_info *pi; 1974 struct ice_pf *pf = vf->pf; 1975 int ret, ptg, id, i; 1976 struct device *dev; 1977 struct ice_hw *hw; 1978 bool fv_found; 1979 1980 dev = ice_pf_to_dev(pf); 1981 hw = &pf->hw; 1982 *v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1983 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1984 1985 id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); 1986 ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; 1987 1988 vf_vsi = ice_get_vf_vsi(vf); 1989 if (!vf_vsi) { 1990 dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); 1991 return -ENODEV; 1992 } 1993 1994 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1995 if (!ctrl_vsi) { 1996 dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n", 1997 vf->vf_id); 1998 return -ENODEV; 1999 } 2000 2001 fv_found = false; 2002 2003 /* Check if profile info already exists, then update the counter */ 2004 pi = &vf->fdir_prof_info[ptg]; 2005 if (pi->fdir_active_cnt != 0) { 2006 for (i = 0; i < ICE_MAX_FV_WORDS; i++) 2007 if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i], 2008 &conf->prof->fv[i])) 2009 break; 2010 if (i == ICE_MAX_FV_WORDS) { 2011 fv_found = true; 2012 pi->fdir_active_cnt++; 2013 } 2014 } 2015 2016 /* HW profile setting is only required for the first time */ 2017 if (!fv_found) { 2018 ret = ice_flow_set_parser_prof(hw, vf_vsi->idx, 2019 ctrl_vsi->idx, conf->prof, 2020 ICE_BLK_FD); 2021 2022 if (ret) { 2023 *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2024 dev_dbg(dev, "VF %d: insert hw prof failed\n", 2025 vf->vf_id); 2026 return ret; 2027 } 2028 } 2029 2030 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 2031 if (ret) { 2032 *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2033 dev_dbg(dev, "VF %d: insert FDIR list failed\n", 2034 vf->vf_id); 2035 return ret; 2036 } 2037 2038 ret = ice_vc_fdir_set_irq_ctx(vf, conf, 2039 VIRTCHNL_OP_ADD_FDIR_FILTER); 2040 if (ret) { 2041 dev_dbg(dev, "VF %d: set FDIR context failed\n", 2042 vf->vf_id); 2043 goto err_rem_entry; 2044 } 2045 2046 ret = ice_vc_fdir_write_fltr(vf, conf, true, false); 2047 if (ret) { 2048 dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n", 2049 vf->vf_id, ret); 2050 goto err_clr_irq; 2051 } 2052 2053 /* Save parsed profile fv info of the FDIR rule for the first time */ 2054 if (!fv_found) { 2055 for (i = 0; i < conf->prof->fv_num; i++) 2056 ice_vc_parser_fv_save(&pi->prof.fv[i], 2057 &conf->prof->fv[i]); 2058 pi->prof.fv_num = conf->prof->fv_num; 2059 pi->fdir_active_cnt = 1; 2060 } 2061 2062 return 0; 2063 2064 err_clr_irq: 2065 ice_vc_fdir_clear_irq_ctx(vf); 2066 err_rem_entry: 2067 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 2068 return ret; 2069 } 2070 2071 /** 2072 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 2073 * @vf: pointer to the VF info 2074 * @msg: pointer to the msg buffer 2075 * 2076 * Return: 0 on success, and other on error. 2077 */ 2078 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 2079 { 2080 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 2081 struct virtchnl_fdir_add *stat = NULL; 2082 struct virtchnl_fdir_fltr_conf *conf; 2083 enum virtchnl_status_code v_ret; 2084 struct ice_vsi *vf_vsi; 2085 struct device *dev; 2086 struct ice_pf *pf; 2087 int is_tun = 0; 2088 int len = 0; 2089 int ret; 2090 2091 pf = vf->pf; 2092 dev = ice_pf_to_dev(pf); 2093 vf_vsi = ice_get_vf_vsi(vf); 2094 2095 #define ICE_VF_MAX_FDIR_FILTERS 128 2096 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || 2097 vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) { 2098 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2099 dev_err(dev, "Max number of FDIR filters for VF %d is reached\n", 2100 vf->vf_id); 2101 goto err_exit; 2102 } 2103 2104 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 2105 if (ret) { 2106 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2107 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 2108 goto err_exit; 2109 } 2110 2111 ret = ice_vf_start_ctrl_vsi(vf); 2112 if (ret && (ret != -EEXIST)) { 2113 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2114 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 2115 vf->vf_id, ret); 2116 goto err_exit; 2117 } 2118 2119 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 2120 if (!stat) { 2121 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2122 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 2123 goto err_exit; 2124 } 2125 2126 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 2127 if (!conf) { 2128 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2129 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 2130 goto err_exit; 2131 } 2132 2133 len = sizeof(*stat); 2134 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 2135 if (ret) { 2136 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2137 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 2138 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 2139 goto err_free_conf; 2140 } 2141 2142 if (fltr->validate_only) { 2143 v_ret = VIRTCHNL_STATUS_SUCCESS; 2144 stat->status = VIRTCHNL_FDIR_SUCCESS; 2145 devm_kfree(dev, conf); 2146 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 2147 v_ret, (u8 *)stat, len); 2148 goto exit; 2149 } 2150 2151 /* For raw FDIR filters created by the parser */ 2152 if (conf->parser_ena) { 2153 ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len); 2154 if (ret) 2155 goto err_free_conf; 2156 goto exit; 2157 } 2158 2159 is_tun = ice_fdir_is_tunnel(conf->ttype); 2160 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 2161 if (ret) { 2162 v_ret = VIRTCHNL_STATUS_SUCCESS; 2163 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 2164 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 2165 vf->vf_id, ret); 2166 goto err_free_conf; 2167 } 2168 2169 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 2170 if (ret) { 2171 v_ret = VIRTCHNL_STATUS_SUCCESS; 2172 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 2173 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 2174 vf->vf_id); 2175 goto err_free_conf; 2176 } 2177 2178 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 2179 if (ret) { 2180 v_ret = VIRTCHNL_STATUS_SUCCESS; 2181 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2182 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 2183 goto err_free_conf; 2184 } 2185 2186 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 2187 if (ret) { 2188 v_ret = VIRTCHNL_STATUS_SUCCESS; 2189 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2190 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 2191 goto err_rem_entry; 2192 } 2193 2194 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 2195 if (ret) { 2196 v_ret = VIRTCHNL_STATUS_SUCCESS; 2197 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2198 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 2199 vf->vf_id, ret); 2200 goto err_clr_irq; 2201 } 2202 2203 exit: 2204 kfree(stat); 2205 return ret; 2206 2207 err_clr_irq: 2208 ice_vc_fdir_clear_irq_ctx(vf); 2209 err_rem_entry: 2210 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 2211 err_free_conf: 2212 devm_kfree(dev, conf); 2213 err_exit: 2214 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 2215 (u8 *)stat, len); 2216 kfree(stat); 2217 return ret; 2218 } 2219 2220 /** 2221 * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF 2222 * @vf: pointer to the VF info 2223 * @conf: FDIR configuration for each filter 2224 * @v_ret: the final VIRTCHNL code 2225 * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER 2226 * @len: length of the stat 2227 * 2228 * Return: 0 on success or negative errno on failure. 2229 */ 2230 static int 2231 ice_vc_del_fdir_raw(struct ice_vf *vf, 2232 struct virtchnl_fdir_fltr_conf *conf, 2233 enum virtchnl_status_code *v_ret, 2234 struct virtchnl_fdir_del *stat, int len) 2235 { 2236 struct ice_vsi *vf_vsi, *ctrl_vsi; 2237 enum ice_block blk = ICE_BLK_FD; 2238 struct ice_fdir_prof_info *pi; 2239 struct ice_pf *pf = vf->pf; 2240 struct device *dev; 2241 struct ice_hw *hw; 2242 unsigned long id; 2243 u16 vsi_num; 2244 int ptg; 2245 int ret; 2246 2247 dev = ice_pf_to_dev(pf); 2248 hw = &pf->hw; 2249 *v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2250 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2251 2252 id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); 2253 ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; 2254 2255 ret = ice_vc_fdir_write_fltr(vf, conf, false, false); 2256 if (ret) { 2257 dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n", 2258 vf->vf_id, ret); 2259 return ret; 2260 } 2261 2262 vf_vsi = ice_get_vf_vsi(vf); 2263 if (!vf_vsi) { 2264 dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); 2265 return -ENODEV; 2266 } 2267 2268 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 2269 if (!ctrl_vsi) { 2270 dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n", 2271 vf->vf_id); 2272 return -ENODEV; 2273 } 2274 2275 pi = &vf->fdir_prof_info[ptg]; 2276 if (pi->fdir_active_cnt != 0) { 2277 pi->fdir_active_cnt--; 2278 /* Remove the profile id flow if no active FDIR rule left */ 2279 if (!pi->fdir_active_cnt) { 2280 vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx); 2281 ice_rem_prof_id_flow(hw, blk, vsi_num, id); 2282 2283 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 2284 ice_rem_prof_id_flow(hw, blk, vsi_num, id); 2285 } 2286 } 2287 2288 conf->parser_ena = false; 2289 return 0; 2290 } 2291 2292 /** 2293 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 2294 * @vf: pointer to the VF info 2295 * @msg: pointer to the msg buffer 2296 * 2297 * Return: 0 on success, and other on error. 2298 */ 2299 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 2300 { 2301 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 2302 struct virtchnl_fdir_del *stat = NULL; 2303 struct virtchnl_fdir_fltr_conf *conf; 2304 struct ice_vf_fdir *fdir = &vf->fdir; 2305 enum virtchnl_status_code v_ret; 2306 struct ice_fdir_fltr *input; 2307 enum ice_fltr_ptype flow; 2308 struct device *dev; 2309 struct ice_pf *pf; 2310 int is_tun = 0; 2311 int len = 0; 2312 int ret; 2313 2314 pf = vf->pf; 2315 dev = ice_pf_to_dev(pf); 2316 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 2317 if (ret) { 2318 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2319 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 2320 goto err_exit; 2321 } 2322 2323 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 2324 if (!stat) { 2325 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2326 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 2327 goto err_exit; 2328 } 2329 2330 len = sizeof(*stat); 2331 2332 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 2333 if (!conf) { 2334 v_ret = VIRTCHNL_STATUS_SUCCESS; 2335 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 2336 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 2337 vf->vf_id, fltr->flow_id); 2338 goto err_exit; 2339 } 2340 2341 /* Just return failure when ctrl_vsi idx is invalid */ 2342 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 2343 v_ret = VIRTCHNL_STATUS_SUCCESS; 2344 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2345 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 2346 goto err_exit; 2347 } 2348 2349 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 2350 if (ret) { 2351 v_ret = VIRTCHNL_STATUS_SUCCESS; 2352 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2353 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 2354 goto err_exit; 2355 } 2356 2357 /* For raw FDIR filters created by the parser */ 2358 if (conf->parser_ena) { 2359 ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len); 2360 if (ret) 2361 goto err_del_tmr; 2362 goto exit; 2363 } 2364 2365 is_tun = ice_fdir_is_tunnel(conf->ttype); 2366 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 2367 if (ret) { 2368 v_ret = VIRTCHNL_STATUS_SUCCESS; 2369 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2370 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 2371 vf->vf_id, ret); 2372 goto err_del_tmr; 2373 } 2374 2375 /* Remove unused profiles to avoid unexpected behaviors */ 2376 input = &conf->input; 2377 flow = input->flow_type; 2378 if (fdir->fdir_fltr_cnt[flow][is_tun] == 1) 2379 ice_vc_fdir_rem_prof(vf, flow, is_tun); 2380 2381 exit: 2382 kfree(stat); 2383 2384 return ret; 2385 2386 err_del_tmr: 2387 ice_vc_fdir_clear_irq_ctx(vf); 2388 err_exit: 2389 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 2390 (u8 *)stat, len); 2391 kfree(stat); 2392 return ret; 2393 } 2394 2395 /** 2396 * ice_vf_fdir_init - init FDIR resource for VF 2397 * @vf: pointer to the VF info 2398 */ 2399 void ice_vf_fdir_init(struct ice_vf *vf) 2400 { 2401 struct ice_vf_fdir *fdir = &vf->fdir; 2402 2403 idr_init(&fdir->fdir_rule_idr); 2404 INIT_LIST_HEAD(&fdir->fdir_rule_list); 2405 2406 spin_lock_init(&fdir->ctx_lock); 2407 fdir->ctx_irq.flags = 0; 2408 fdir->ctx_done.flags = 0; 2409 ice_vc_fdir_reset_cnt_all(fdir); 2410 } 2411 2412 /** 2413 * ice_vf_fdir_exit - destroy FDIR resource for VF 2414 * @vf: pointer to the VF info 2415 */ 2416 void ice_vf_fdir_exit(struct ice_vf *vf) 2417 { 2418 ice_vc_fdir_flush_entry(vf); 2419 idr_destroy(&vf->fdir.fdir_rule_idr); 2420 ice_vc_fdir_rem_prof_all(vf); 2421 ice_vc_fdir_free_prof_all(vf); 2422 } 2423