1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021-2023, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 #include "ice_vf_lib_private.h" 9 10 #define to_fltr_conf_from_desc(p) \ 11 container_of(p, struct virtchnl_fdir_fltr_conf, input) 12 13 #define GTPU_TEID_OFFSET 4 14 #define GTPU_EH_QFI_OFFSET 1 15 #define GTPU_EH_QFI_MASK 0x3F 16 #define PFCP_S_OFFSET 0 17 #define PFCP_S_MASK 0x1 18 #define PFCP_PORT_NR 8805 19 20 #define FDIR_INSET_FLAG_ESP_S 0 21 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 22 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 23 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 24 25 enum ice_fdir_tunnel_type { 26 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 27 ICE_FDIR_TUNNEL_TYPE_GTPU, 28 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 29 ICE_FDIR_TUNNEL_TYPE_ECPRI, 30 ICE_FDIR_TUNNEL_TYPE_GTPU_INNER, 31 ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER, 32 ICE_FDIR_TUNNEL_TYPE_GRE, 33 ICE_FDIR_TUNNEL_TYPE_GTPOGRE, 34 ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER, 35 ICE_FDIR_TUNNEL_TYPE_GRE_INNER, 36 ICE_FDIR_TUNNEL_TYPE_L2TPV2, 37 ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER, 38 }; 39 40 struct virtchnl_fdir_fltr_conf { 41 struct ice_fdir_fltr input; 42 enum ice_fdir_tunnel_type ttype; 43 u64 inset_flag; 44 u32 flow_id; 45 46 struct ice_parser_profile *prof; 47 bool parser_ena; 48 u8 *pkt_buf; 49 u8 pkt_len; 50 }; 51 52 struct virtchnl_fdir_inset_map { 53 enum virtchnl_proto_hdr_field field; 54 enum ice_flow_field fld; 55 u64 flag; 56 u64 mask; 57 }; 58 59 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 60 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 78 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 79 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 80 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 81 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 82 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 83 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 84 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 85 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 86 }; 87 88 /** 89 * ice_vc_fdir_param_check 90 * @vf: pointer to the VF structure 91 * @vsi_id: VF relative VSI ID 92 * 93 * Check for the valid VSI ID, PF's state and VF's state 94 * 95 * Return: 0 on success, and -EINVAL on error. 96 */ 97 static int 98 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 99 { 100 struct ice_pf *pf = vf->pf; 101 102 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 103 return -EINVAL; 104 105 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 106 return -EINVAL; 107 108 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 109 return -EINVAL; 110 111 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 112 return -EINVAL; 113 114 if (!ice_get_vf_vsi(vf)) 115 return -EINVAL; 116 117 return 0; 118 } 119 120 /** 121 * ice_vf_start_ctrl_vsi 122 * @vf: pointer to the VF structure 123 * 124 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 125 * 126 * Return: 0 on success, and other on error. 127 */ 128 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 129 { 130 struct ice_pf *pf = vf->pf; 131 struct ice_vsi *ctrl_vsi; 132 struct device *dev; 133 int err; 134 135 dev = ice_pf_to_dev(pf); 136 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 137 return -EEXIST; 138 139 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 140 if (!ctrl_vsi) { 141 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 142 vf->vf_id); 143 return -ENOMEM; 144 } 145 146 err = ice_vsi_open_ctrl(ctrl_vsi); 147 if (err) { 148 dev_dbg(dev, "Could not open control VSI for VF %d\n", 149 vf->vf_id); 150 goto err_vsi_open; 151 } 152 153 return 0; 154 155 err_vsi_open: 156 ice_vsi_release(ctrl_vsi); 157 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 158 pf->vsi[vf->ctrl_vsi_idx] = NULL; 159 vf->ctrl_vsi_idx = ICE_NO_VSI; 160 } 161 return err; 162 } 163 164 /** 165 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 166 * @vf: pointer to the VF structure 167 * @flow: filter flow type 168 * 169 * Return: 0 on success, and other on error. 170 */ 171 static int 172 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 173 { 174 struct ice_vf_fdir *fdir = &vf->fdir; 175 176 if (!fdir->fdir_prof) { 177 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 178 ICE_FLTR_PTYPE_MAX, 179 sizeof(*fdir->fdir_prof), 180 GFP_KERNEL); 181 if (!fdir->fdir_prof) 182 return -ENOMEM; 183 } 184 185 if (!fdir->fdir_prof[flow]) { 186 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 187 sizeof(**fdir->fdir_prof), 188 GFP_KERNEL); 189 if (!fdir->fdir_prof[flow]) 190 return -ENOMEM; 191 } 192 193 return 0; 194 } 195 196 /** 197 * ice_vc_fdir_free_prof - free profile for this filter flow type 198 * @vf: pointer to the VF structure 199 * @flow: filter flow type 200 */ 201 static void 202 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 203 { 204 struct ice_vf_fdir *fdir = &vf->fdir; 205 206 if (!fdir->fdir_prof) 207 return; 208 209 if (!fdir->fdir_prof[flow]) 210 return; 211 212 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 213 fdir->fdir_prof[flow] = NULL; 214 } 215 216 /** 217 * ice_vc_fdir_free_prof_all - free all the profile for this VF 218 * @vf: pointer to the VF structure 219 */ 220 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 221 { 222 struct ice_vf_fdir *fdir = &vf->fdir; 223 enum ice_fltr_ptype flow; 224 225 if (!fdir->fdir_prof) 226 return; 227 228 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 229 ice_vc_fdir_free_prof(vf, flow); 230 231 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 232 fdir->fdir_prof = NULL; 233 } 234 235 /** 236 * ice_vc_fdir_parse_flow_fld 237 * @proto_hdr: virtual channel protocol filter header 238 * @conf: FDIR configuration for each filter 239 * @fld: field type array 240 * @fld_cnt: field counter 241 * 242 * Parse the virtual channel filter header and store them into field type array 243 * 244 * Return: 0 on success, and other on error. 245 */ 246 static int 247 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 248 struct virtchnl_fdir_fltr_conf *conf, 249 enum ice_flow_field *fld, int *fld_cnt) 250 { 251 struct virtchnl_proto_hdr hdr; 252 u32 i; 253 254 memcpy(&hdr, proto_hdr, sizeof(hdr)); 255 256 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 257 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 258 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 259 if (fdir_inset_map[i].mask && 260 ((fdir_inset_map[i].mask & conf->inset_flag) != 261 fdir_inset_map[i].flag)) 262 continue; 263 264 fld[*fld_cnt] = fdir_inset_map[i].fld; 265 *fld_cnt += 1; 266 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 267 return -EINVAL; 268 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 269 fdir_inset_map[i].field); 270 } 271 272 return 0; 273 } 274 275 /** 276 * ice_vc_fdir_set_flow_fld 277 * @vf: pointer to the VF structure 278 * @fltr: virtual channel add cmd buffer 279 * @conf: FDIR configuration for each filter 280 * @seg: array of one or more packet segments that describe the flow 281 * 282 * Parse the virtual channel add msg buffer's field vector and store them into 283 * flow's packet segment field 284 * 285 * Return: 0 on success, and other on error. 286 */ 287 static int 288 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 289 struct virtchnl_fdir_fltr_conf *conf, 290 struct ice_flow_seg_info *seg) 291 { 292 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 293 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 294 struct device *dev = ice_pf_to_dev(vf->pf); 295 struct virtchnl_proto_hdrs *proto; 296 int fld_cnt = 0; 297 int i; 298 299 proto = &rule->proto_hdrs; 300 for (i = 0; i < proto->count; i++) { 301 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 302 int ret; 303 304 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 305 if (ret) 306 return ret; 307 } 308 309 if (fld_cnt == 0) { 310 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 311 return -EINVAL; 312 } 313 314 for (i = 0; i < fld_cnt; i++) 315 ice_flow_set_fld(seg, fld[i], 316 ICE_FLOW_FLD_OFF_INVAL, 317 ICE_FLOW_FLD_OFF_INVAL, 318 ICE_FLOW_FLD_OFF_INVAL, false); 319 320 return 0; 321 } 322 323 /** 324 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 325 * @vf: pointer to the VF structure 326 * @conf: FDIR configuration for each filter 327 * @seg: array of one or more packet segments that describe the flow 328 * 329 * Return: 0 on success, and other on error. 330 */ 331 static int 332 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 333 struct virtchnl_fdir_fltr_conf *conf, 334 struct ice_flow_seg_info *seg) 335 { 336 enum ice_fltr_ptype flow = conf->input.flow_type; 337 enum ice_fdir_tunnel_type ttype = conf->ttype; 338 struct device *dev = ice_pf_to_dev(vf->pf); 339 340 switch (flow) { 341 case ICE_FLTR_PTYPE_NON_IP_L2: 342 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 343 break; 344 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 345 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 346 ICE_FLOW_SEG_HDR_IPV4 | 347 ICE_FLOW_SEG_HDR_IPV_OTHER); 348 break; 349 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 350 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 351 ICE_FLOW_SEG_HDR_IPV4 | 352 ICE_FLOW_SEG_HDR_IPV_OTHER); 353 break; 354 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 355 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 356 ICE_FLOW_SEG_HDR_IPV4 | 357 ICE_FLOW_SEG_HDR_IPV_OTHER); 358 break; 359 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 360 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 361 ICE_FLOW_SEG_HDR_IPV4 | 362 ICE_FLOW_SEG_HDR_IPV_OTHER); 363 break; 364 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 365 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 366 ICE_FLOW_SEG_HDR_IPV4 | 367 ICE_FLOW_SEG_HDR_IPV_OTHER); 368 break; 369 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 370 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 371 ICE_FLOW_SEG_HDR_IPV4 | 372 ICE_FLOW_SEG_HDR_IPV_OTHER); 373 break; 374 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 375 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 376 ICE_FLOW_SEG_HDR_IPV_OTHER); 377 break; 378 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 379 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 380 ICE_FLOW_SEG_HDR_IPV4 | 381 ICE_FLOW_SEG_HDR_IPV_OTHER); 382 break; 383 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 384 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 385 ICE_FLOW_SEG_HDR_IPV4 | 386 ICE_FLOW_SEG_HDR_IPV_OTHER); 387 break; 388 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 389 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 392 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 393 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 394 ICE_FLOW_SEG_HDR_IPV4 | 395 ICE_FLOW_SEG_HDR_IPV_OTHER); 396 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 397 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 398 ICE_FLOW_SEG_HDR_GTPU_IP | 399 ICE_FLOW_SEG_HDR_IPV4 | 400 ICE_FLOW_SEG_HDR_IPV_OTHER); 401 } else { 402 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 403 flow, vf->vf_id); 404 return -EINVAL; 405 } 406 break; 407 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 408 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 409 ICE_FLOW_SEG_HDR_IPV4 | 410 ICE_FLOW_SEG_HDR_IPV_OTHER); 411 break; 412 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 413 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 414 ICE_FLOW_SEG_HDR_IPV6 | 415 ICE_FLOW_SEG_HDR_IPV_OTHER); 416 break; 417 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 418 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 419 ICE_FLOW_SEG_HDR_IPV6 | 420 ICE_FLOW_SEG_HDR_IPV_OTHER); 421 break; 422 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 423 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 424 ICE_FLOW_SEG_HDR_IPV6 | 425 ICE_FLOW_SEG_HDR_IPV_OTHER); 426 break; 427 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 428 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 429 ICE_FLOW_SEG_HDR_IPV6 | 430 ICE_FLOW_SEG_HDR_IPV_OTHER); 431 break; 432 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 433 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 434 ICE_FLOW_SEG_HDR_IPV6 | 435 ICE_FLOW_SEG_HDR_IPV_OTHER); 436 break; 437 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 438 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 439 ICE_FLOW_SEG_HDR_IPV6 | 440 ICE_FLOW_SEG_HDR_IPV_OTHER); 441 break; 442 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 443 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 444 ICE_FLOW_SEG_HDR_IPV_OTHER); 445 break; 446 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 447 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 448 ICE_FLOW_SEG_HDR_IPV6 | 449 ICE_FLOW_SEG_HDR_IPV_OTHER); 450 break; 451 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 452 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 453 ICE_FLOW_SEG_HDR_IPV6 | 454 ICE_FLOW_SEG_HDR_IPV_OTHER); 455 break; 456 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 457 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 458 ICE_FLOW_SEG_HDR_IPV6 | 459 ICE_FLOW_SEG_HDR_IPV_OTHER); 460 break; 461 default: 462 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 463 flow, vf->vf_id); 464 return -EINVAL; 465 } 466 467 return 0; 468 } 469 470 /** 471 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 472 * @vf: pointer to the VF structure 473 * @flow: filter flow type 474 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 475 */ 476 static void 477 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 478 { 479 struct ice_vf_fdir *fdir = &vf->fdir; 480 struct ice_fd_hw_prof *vf_prof; 481 struct ice_pf *pf = vf->pf; 482 struct ice_vsi *vf_vsi; 483 struct device *dev; 484 struct ice_hw *hw; 485 u64 prof_id; 486 int i; 487 488 dev = ice_pf_to_dev(pf); 489 hw = &pf->hw; 490 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 491 return; 492 493 vf_prof = fdir->fdir_prof[flow]; 494 prof_id = vf_prof->prof_id[tun]; 495 496 vf_vsi = ice_get_vf_vsi(vf); 497 if (!vf_vsi) { 498 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 499 return; 500 } 501 502 if (!fdir->prof_entry_cnt[flow][tun]) 503 return; 504 505 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 506 if (vf_prof->entry_h[i][tun]) { 507 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 508 509 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 510 ice_flow_rem_entry(hw, ICE_BLK_FD, 511 vf_prof->entry_h[i][tun]); 512 vf_prof->entry_h[i][tun] = 0; 513 } 514 515 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 516 devm_kfree(dev, vf_prof->fdir_seg[tun]); 517 vf_prof->fdir_seg[tun] = NULL; 518 519 for (i = 0; i < vf_prof->cnt; i++) 520 vf_prof->vsi_h[i] = 0; 521 522 fdir->prof_entry_cnt[flow][tun] = 0; 523 } 524 525 /** 526 * ice_vc_fdir_rem_prof_all - remove profile for this VF 527 * @vf: pointer to the VF structure 528 */ 529 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 530 { 531 enum ice_fltr_ptype flow; 532 533 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 534 flow < ICE_FLTR_PTYPE_MAX; flow++) { 535 ice_vc_fdir_rem_prof(vf, flow, 0); 536 ice_vc_fdir_rem_prof(vf, flow, 1); 537 } 538 } 539 540 /** 541 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR 542 * @fdir: pointer to the VF FDIR structure 543 */ 544 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) 545 { 546 enum ice_fltr_ptype flow; 547 548 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 549 flow < ICE_FLTR_PTYPE_MAX; flow++) { 550 fdir->fdir_fltr_cnt[flow][0] = 0; 551 fdir->fdir_fltr_cnt[flow][1] = 0; 552 } 553 554 fdir->fdir_fltr_cnt_total = 0; 555 } 556 557 /** 558 * ice_vc_fdir_has_prof_conflict 559 * @vf: pointer to the VF structure 560 * @conf: FDIR configuration for each filter 561 * 562 * Check if @conf has conflicting profile with existing profiles 563 * 564 * Return: true on success, and false on error. 565 */ 566 static bool 567 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, 568 struct virtchnl_fdir_fltr_conf *conf) 569 { 570 struct ice_fdir_fltr *desc; 571 572 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 573 struct virtchnl_fdir_fltr_conf *existing_conf; 574 enum ice_fltr_ptype flow_type_a, flow_type_b; 575 struct ice_fdir_fltr *a, *b; 576 577 existing_conf = to_fltr_conf_from_desc(desc); 578 a = &existing_conf->input; 579 b = &conf->input; 580 flow_type_a = a->flow_type; 581 flow_type_b = b->flow_type; 582 583 /* No need to compare two rules with different tunnel types or 584 * with the same protocol type. 585 */ 586 if (existing_conf->ttype != conf->ttype || 587 flow_type_a == flow_type_b) 588 continue; 589 590 switch (flow_type_a) { 591 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 592 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 593 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 594 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 595 return true; 596 break; 597 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 598 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 599 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 600 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) 601 return true; 602 break; 603 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 604 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 605 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 606 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) 607 return true; 608 break; 609 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 610 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP || 611 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 612 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) 613 return true; 614 break; 615 default: 616 break; 617 } 618 } 619 620 return false; 621 } 622 623 /** 624 * ice_vc_fdir_write_flow_prof 625 * @vf: pointer to the VF structure 626 * @flow: filter flow type 627 * @seg: array of one or more packet segments that describe the flow 628 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 629 * 630 * Write the flow's profile config and packet segment into the hardware 631 * 632 * Return: 0 on success, and other on error. 633 */ 634 static int 635 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 636 struct ice_flow_seg_info *seg, int tun) 637 { 638 struct ice_vf_fdir *fdir = &vf->fdir; 639 struct ice_vsi *vf_vsi, *ctrl_vsi; 640 struct ice_flow_seg_info *old_seg; 641 struct ice_flow_prof *prof = NULL; 642 struct ice_fd_hw_prof *vf_prof; 643 struct device *dev; 644 struct ice_pf *pf; 645 struct ice_hw *hw; 646 u64 entry1_h = 0; 647 u64 entry2_h = 0; 648 int ret; 649 650 pf = vf->pf; 651 dev = ice_pf_to_dev(pf); 652 hw = &pf->hw; 653 vf_vsi = ice_get_vf_vsi(vf); 654 if (!vf_vsi) 655 return -EINVAL; 656 657 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 658 if (!ctrl_vsi) 659 return -EINVAL; 660 661 vf_prof = fdir->fdir_prof[flow]; 662 old_seg = vf_prof->fdir_seg[tun]; 663 if (old_seg) { 664 if (!memcmp(old_seg, seg, sizeof(*seg))) { 665 dev_dbg(dev, "Duplicated profile for VF %d!\n", 666 vf->vf_id); 667 return -EEXIST; 668 } 669 670 if (fdir->fdir_fltr_cnt[flow][tun]) { 671 ret = -EINVAL; 672 dev_dbg(dev, "Input set conflicts for VF %d\n", 673 vf->vf_id); 674 goto err_exit; 675 } 676 677 /* remove previously allocated profile */ 678 ice_vc_fdir_rem_prof(vf, flow, tun); 679 } 680 681 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, 682 tun + 1, false, &prof); 683 if (ret) { 684 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 685 flow, vf->vf_id); 686 goto err_exit; 687 } 688 689 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, 690 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 691 seg, &entry1_h); 692 if (ret) { 693 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 694 flow, vf->vf_id); 695 goto err_prof; 696 } 697 698 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, 699 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 700 seg, &entry2_h); 701 if (ret) { 702 dev_dbg(dev, 703 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 704 flow, vf->vf_id); 705 goto err_entry_1; 706 } 707 708 vf_prof->fdir_seg[tun] = seg; 709 vf_prof->cnt = 0; 710 fdir->prof_entry_cnt[flow][tun] = 0; 711 712 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 713 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 714 vf_prof->cnt++; 715 fdir->prof_entry_cnt[flow][tun]++; 716 717 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 718 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 719 vf_prof->cnt++; 720 fdir->prof_entry_cnt[flow][tun]++; 721 722 vf_prof->prof_id[tun] = prof->id; 723 724 return 0; 725 726 err_entry_1: 727 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 728 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id); 729 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 730 err_prof: 731 ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); 732 err_exit: 733 return ret; 734 } 735 736 /** 737 * ice_vc_fdir_config_input_set 738 * @vf: pointer to the VF structure 739 * @fltr: virtual channel add cmd buffer 740 * @conf: FDIR configuration for each filter 741 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 742 * 743 * Config the input set type and value for virtual channel add msg buffer 744 * 745 * Return: 0 on success, and other on error. 746 */ 747 static int 748 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 749 struct virtchnl_fdir_fltr_conf *conf, int tun) 750 { 751 struct ice_fdir_fltr *input = &conf->input; 752 struct device *dev = ice_pf_to_dev(vf->pf); 753 struct ice_flow_seg_info *seg; 754 enum ice_fltr_ptype flow; 755 int ret; 756 757 ret = ice_vc_fdir_has_prof_conflict(vf, conf); 758 if (ret) { 759 dev_dbg(dev, "Found flow profile conflict for VF %d\n", 760 vf->vf_id); 761 return ret; 762 } 763 764 flow = input->flow_type; 765 ret = ice_vc_fdir_alloc_prof(vf, flow); 766 if (ret) { 767 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 768 return ret; 769 } 770 771 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 772 if (!seg) 773 return -ENOMEM; 774 775 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 776 if (ret) { 777 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 778 goto err_exit; 779 } 780 781 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 782 if (ret) { 783 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 784 goto err_exit; 785 } 786 787 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 788 if (ret == -EEXIST) { 789 devm_kfree(dev, seg); 790 } else if (ret) { 791 dev_dbg(dev, "Write flow profile for VF %d failed\n", 792 vf->vf_id); 793 goto err_exit; 794 } 795 796 return 0; 797 798 err_exit: 799 devm_kfree(dev, seg); 800 return ret; 801 } 802 803 /** 804 * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary) 805 * @proto: virtchnl protocol headers 806 * 807 * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note 808 * that common FDIR rule must have non-zero proto->count. Thus, we choose the 809 * tunnel_level and count of proto as the indicators. If both tunnel_level and 810 * count of proto are zero, this FDIR rule will be regarded as raw flow. 811 * 812 * Returns: true if headers describe raw flow, false otherwise. 813 */ 814 static bool 815 ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto) 816 { 817 return (proto->tunnel_level == 0 && proto->count == 0); 818 } 819 820 /** 821 * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule 822 * @vf: pointer to the VF info 823 * @proto: virtchnl protocol headers 824 * @conf: FDIR configuration for each filter 825 * 826 * Parse the virtual channel filter's raw flow and store it in @conf 827 * 828 * Return: 0 on success or negative errno on failure. 829 */ 830 static int 831 ice_vc_fdir_parse_raw(struct ice_vf *vf, 832 struct virtchnl_proto_hdrs *proto, 833 struct virtchnl_fdir_fltr_conf *conf) 834 { 835 u8 *pkt_buf, *msk_buf __free(kfree) = NULL; 836 struct ice_parser_result rslt; 837 struct ice_pf *pf = vf->pf; 838 u16 pkt_len, udp_port = 0; 839 struct ice_parser *psr; 840 int status = -ENOMEM; 841 struct ice_hw *hw; 842 843 pkt_len = proto->raw.pkt_len; 844 845 if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) 846 return -EINVAL; 847 848 pkt_buf = kzalloc(pkt_len, GFP_KERNEL); 849 msk_buf = kzalloc(pkt_len, GFP_KERNEL); 850 851 if (!pkt_buf || !msk_buf) 852 goto err_mem_alloc; 853 854 memcpy(pkt_buf, proto->raw.spec, pkt_len); 855 memcpy(msk_buf, proto->raw.mask, pkt_len); 856 857 hw = &pf->hw; 858 859 /* Get raw profile info via Parser Lib */ 860 psr = ice_parser_create(hw); 861 if (IS_ERR(psr)) { 862 status = PTR_ERR(psr); 863 goto err_mem_alloc; 864 } 865 866 ice_parser_dvm_set(psr, ice_is_dvm_ena(hw)); 867 868 if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN)) 869 ice_parser_vxlan_tunnel_set(psr, udp_port, true); 870 871 status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt); 872 if (status) 873 goto err_parser_destroy; 874 875 if (hw->debug_mask & ICE_DBG_PARSER) 876 ice_parser_result_dump(hw, &rslt); 877 878 conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL); 879 if (!conf->prof) { 880 status = -ENOMEM; 881 goto err_parser_destroy; 882 } 883 884 status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, 885 pkt_len, ICE_BLK_FD, 886 conf->prof); 887 if (status) 888 goto err_parser_profile_init; 889 890 if (hw->debug_mask & ICE_DBG_PARSER) 891 ice_parser_profile_dump(hw, conf->prof); 892 893 /* Store raw flow info into @conf */ 894 conf->pkt_len = pkt_len; 895 conf->pkt_buf = pkt_buf; 896 conf->parser_ena = true; 897 898 ice_parser_destroy(psr); 899 return 0; 900 901 err_parser_profile_init: 902 kfree(conf->prof); 903 err_parser_destroy: 904 ice_parser_destroy(psr); 905 err_mem_alloc: 906 kfree(pkt_buf); 907 return status; 908 } 909 910 /** 911 * ice_vc_fdir_parse_pattern 912 * @vf: pointer to the VF info 913 * @fltr: virtual channel add cmd buffer 914 * @conf: FDIR configuration for each filter 915 * 916 * Parse the virtual channel filter's pattern and store them into conf 917 * 918 * Return: 0 on success, and other on error. 919 */ 920 static int 921 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 922 struct virtchnl_fdir_fltr_conf *conf) 923 { 924 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 925 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 926 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 927 struct device *dev = ice_pf_to_dev(vf->pf); 928 struct ice_fdir_fltr *input = &conf->input; 929 int i; 930 931 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 932 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 933 proto->count, vf->vf_id); 934 return -EINVAL; 935 } 936 937 /* For raw FDIR filters created by the parser */ 938 if (ice_vc_fdir_is_raw_flow(proto)) 939 return ice_vc_fdir_parse_raw(vf, proto, conf); 940 941 for (i = 0; i < proto->count; i++) { 942 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 943 struct ip_esp_hdr *esph; 944 struct ip_auth_hdr *ah; 945 struct sctphdr *sctph; 946 struct ipv6hdr *ip6h; 947 struct udphdr *udph; 948 struct tcphdr *tcph; 949 struct ethhdr *eth; 950 struct iphdr *iph; 951 u8 s_field; 952 u8 *rawh; 953 954 switch (hdr->type) { 955 case VIRTCHNL_PROTO_HDR_ETH: 956 eth = (struct ethhdr *)hdr->buffer; 957 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 958 959 if (hdr->field_selector) 960 input->ext_data.ether_type = eth->h_proto; 961 break; 962 case VIRTCHNL_PROTO_HDR_IPV4: 963 iph = (struct iphdr *)hdr->buffer; 964 l3 = VIRTCHNL_PROTO_HDR_IPV4; 965 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 966 967 if (hdr->field_selector) { 968 input->ip.v4.src_ip = iph->saddr; 969 input->ip.v4.dst_ip = iph->daddr; 970 input->ip.v4.tos = iph->tos; 971 input->ip.v4.proto = iph->protocol; 972 } 973 break; 974 case VIRTCHNL_PROTO_HDR_IPV6: 975 ip6h = (struct ipv6hdr *)hdr->buffer; 976 l3 = VIRTCHNL_PROTO_HDR_IPV6; 977 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 978 979 if (hdr->field_selector) { 980 memcpy(input->ip.v6.src_ip, 981 ip6h->saddr.in6_u.u6_addr8, 982 sizeof(ip6h->saddr)); 983 memcpy(input->ip.v6.dst_ip, 984 ip6h->daddr.in6_u.u6_addr8, 985 sizeof(ip6h->daddr)); 986 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 987 (ip6h->flow_lbl[0] >> 4); 988 input->ip.v6.proto = ip6h->nexthdr; 989 } 990 break; 991 case VIRTCHNL_PROTO_HDR_TCP: 992 tcph = (struct tcphdr *)hdr->buffer; 993 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 994 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 995 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 996 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 997 998 if (hdr->field_selector) { 999 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1000 input->ip.v4.src_port = tcph->source; 1001 input->ip.v4.dst_port = tcph->dest; 1002 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1003 input->ip.v6.src_port = tcph->source; 1004 input->ip.v6.dst_port = tcph->dest; 1005 } 1006 } 1007 break; 1008 case VIRTCHNL_PROTO_HDR_UDP: 1009 udph = (struct udphdr *)hdr->buffer; 1010 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1011 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 1012 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1013 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 1014 1015 if (hdr->field_selector) { 1016 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1017 input->ip.v4.src_port = udph->source; 1018 input->ip.v4.dst_port = udph->dest; 1019 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1020 input->ip.v6.src_port = udph->source; 1021 input->ip.v6.dst_port = udph->dest; 1022 } 1023 } 1024 break; 1025 case VIRTCHNL_PROTO_HDR_SCTP: 1026 sctph = (struct sctphdr *)hdr->buffer; 1027 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1028 input->flow_type = 1029 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 1030 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1031 input->flow_type = 1032 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 1033 1034 if (hdr->field_selector) { 1035 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1036 input->ip.v4.src_port = sctph->source; 1037 input->ip.v4.dst_port = sctph->dest; 1038 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1039 input->ip.v6.src_port = sctph->source; 1040 input->ip.v6.dst_port = sctph->dest; 1041 } 1042 } 1043 break; 1044 case VIRTCHNL_PROTO_HDR_L2TPV3: 1045 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1046 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 1047 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1048 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 1049 1050 if (hdr->field_selector) 1051 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 1052 break; 1053 case VIRTCHNL_PROTO_HDR_ESP: 1054 esph = (struct ip_esp_hdr *)hdr->buffer; 1055 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 1056 l4 == VIRTCHNL_PROTO_HDR_UDP) 1057 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 1058 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 1059 l4 == VIRTCHNL_PROTO_HDR_UDP) 1060 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 1061 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 1062 l4 == VIRTCHNL_PROTO_HDR_NONE) 1063 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 1064 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 1065 l4 == VIRTCHNL_PROTO_HDR_NONE) 1066 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 1067 1068 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 1069 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 1070 else 1071 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 1072 1073 if (hdr->field_selector) { 1074 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1075 input->ip.v4.sec_parm_idx = esph->spi; 1076 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1077 input->ip.v6.sec_parm_idx = esph->spi; 1078 } 1079 break; 1080 case VIRTCHNL_PROTO_HDR_AH: 1081 ah = (struct ip_auth_hdr *)hdr->buffer; 1082 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1083 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 1084 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1085 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 1086 1087 if (hdr->field_selector) { 1088 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1089 input->ip.v4.sec_parm_idx = ah->spi; 1090 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1091 input->ip.v6.sec_parm_idx = ah->spi; 1092 } 1093 break; 1094 case VIRTCHNL_PROTO_HDR_PFCP: 1095 rawh = (u8 *)hdr->buffer; 1096 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 1097 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 1098 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 1099 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 1100 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 1101 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 1102 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 1103 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 1104 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 1105 1106 if (hdr->field_selector) { 1107 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1108 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 1109 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1110 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 1111 } 1112 break; 1113 case VIRTCHNL_PROTO_HDR_GTPU_IP: 1114 rawh = (u8 *)hdr->buffer; 1115 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 1116 1117 if (hdr->field_selector) 1118 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 1119 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 1120 break; 1121 case VIRTCHNL_PROTO_HDR_GTPU_EH: 1122 rawh = (u8 *)hdr->buffer; 1123 1124 if (hdr->field_selector) 1125 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 1126 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 1127 break; 1128 default: 1129 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 1130 hdr->type, vf->vf_id); 1131 return -EINVAL; 1132 } 1133 } 1134 1135 return 0; 1136 } 1137 1138 /** 1139 * ice_vc_fdir_parse_action 1140 * @vf: pointer to the VF info 1141 * @fltr: virtual channel add cmd buffer 1142 * @conf: FDIR configuration for each filter 1143 * 1144 * Parse the virtual channel filter's action and store them into conf 1145 * 1146 * Return: 0 on success, and other on error. 1147 */ 1148 static int 1149 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1150 struct virtchnl_fdir_fltr_conf *conf) 1151 { 1152 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 1153 struct device *dev = ice_pf_to_dev(vf->pf); 1154 struct ice_fdir_fltr *input = &conf->input; 1155 u32 dest_num = 0; 1156 u32 mark_num = 0; 1157 int i; 1158 1159 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 1160 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 1161 as->count, vf->vf_id); 1162 return -EINVAL; 1163 } 1164 1165 for (i = 0; i < as->count; i++) { 1166 struct virtchnl_filter_action *action = &as->actions[i]; 1167 1168 switch (action->type) { 1169 case VIRTCHNL_ACTION_PASSTHRU: 1170 dest_num++; 1171 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 1172 break; 1173 case VIRTCHNL_ACTION_DROP: 1174 dest_num++; 1175 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1176 break; 1177 case VIRTCHNL_ACTION_QUEUE: 1178 dest_num++; 1179 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1180 input->q_index = action->act_conf.queue.index; 1181 break; 1182 case VIRTCHNL_ACTION_Q_REGION: 1183 dest_num++; 1184 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 1185 input->q_index = action->act_conf.queue.index; 1186 input->q_region = action->act_conf.queue.region; 1187 break; 1188 case VIRTCHNL_ACTION_MARK: 1189 mark_num++; 1190 input->fltr_id = action->act_conf.mark_id; 1191 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1192 break; 1193 default: 1194 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1195 action->type, vf->vf_id); 1196 return -EINVAL; 1197 } 1198 } 1199 1200 if (dest_num == 0 || dest_num >= 2) { 1201 dev_dbg(dev, "Invalid destination action for VF %d\n", 1202 vf->vf_id); 1203 return -EINVAL; 1204 } 1205 1206 if (mark_num >= 2) { 1207 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1208 return -EINVAL; 1209 } 1210 1211 return 0; 1212 } 1213 1214 /** 1215 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1216 * @vf: pointer to the VF info 1217 * @fltr: virtual channel add cmd buffer 1218 * @conf: FDIR configuration for each filter 1219 * 1220 * Return: 0 on success, and other on error. 1221 */ 1222 static int 1223 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1224 struct virtchnl_fdir_fltr_conf *conf) 1225 { 1226 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1227 int ret; 1228 1229 /* For raw FDIR filters created by the parser */ 1230 if (!ice_vc_fdir_is_raw_flow(proto)) 1231 if (!ice_vc_validate_pattern(vf, proto)) 1232 return -EINVAL; 1233 1234 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1235 if (ret) 1236 return ret; 1237 1238 return ice_vc_fdir_parse_action(vf, fltr, conf); 1239 } 1240 1241 /** 1242 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1243 * @conf_a: FDIR configuration for filter a 1244 * @conf_b: FDIR configuration for filter b 1245 * 1246 * Return: 0 on success, and other on error. 1247 */ 1248 static bool 1249 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1250 struct virtchnl_fdir_fltr_conf *conf_b) 1251 { 1252 struct ice_fdir_fltr *a = &conf_a->input; 1253 struct ice_fdir_fltr *b = &conf_b->input; 1254 1255 if (conf_a->ttype != conf_b->ttype) 1256 return false; 1257 if (a->flow_type != b->flow_type) 1258 return false; 1259 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1260 return false; 1261 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1262 return false; 1263 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1264 return false; 1265 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1266 return false; 1267 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1268 return false; 1269 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1270 return false; 1271 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1272 return false; 1273 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1274 return false; 1275 1276 return true; 1277 } 1278 1279 /** 1280 * ice_vc_fdir_is_dup_fltr 1281 * @vf: pointer to the VF info 1282 * @conf: FDIR configuration for each filter 1283 * 1284 * Check if there is duplicated rule with same conf value 1285 * 1286 * Return: 0 true success, and false on error. 1287 */ 1288 static bool 1289 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1290 { 1291 struct ice_fdir_fltr *desc; 1292 bool ret; 1293 1294 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1295 struct virtchnl_fdir_fltr_conf *node = 1296 to_fltr_conf_from_desc(desc); 1297 1298 ret = ice_vc_fdir_comp_rules(node, conf); 1299 if (ret) 1300 return true; 1301 } 1302 1303 return false; 1304 } 1305 1306 /** 1307 * ice_vc_fdir_insert_entry 1308 * @vf: pointer to the VF info 1309 * @conf: FDIR configuration for each filter 1310 * @id: pointer to ID value allocated by driver 1311 * 1312 * Insert FDIR conf entry into list and allocate ID for this filter 1313 * 1314 * Return: 0 true success, and other on error. 1315 */ 1316 static int 1317 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1318 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1319 { 1320 struct ice_fdir_fltr *input = &conf->input; 1321 int i; 1322 1323 /* alloc ID corresponding with conf */ 1324 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1325 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1326 if (i < 0) 1327 return -EINVAL; 1328 *id = i; 1329 1330 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1331 return 0; 1332 } 1333 1334 /** 1335 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1336 * @vf: pointer to the VF info 1337 * @conf: FDIR configuration for each filter 1338 * @id: filter rule's ID 1339 */ 1340 static void 1341 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1342 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1343 { 1344 struct ice_fdir_fltr *input = &conf->input; 1345 1346 idr_remove(&vf->fdir.fdir_rule_idr, id); 1347 list_del(&input->fltr_node); 1348 } 1349 1350 /** 1351 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1352 * @vf: pointer to the VF info 1353 * @id: filter rule's ID 1354 * 1355 * Return: NULL on error, and other on success. 1356 */ 1357 static struct virtchnl_fdir_fltr_conf * 1358 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1359 { 1360 return idr_find(&vf->fdir.fdir_rule_idr, id); 1361 } 1362 1363 /** 1364 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1365 * @vf: pointer to the VF info 1366 */ 1367 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1368 { 1369 struct virtchnl_fdir_fltr_conf *conf; 1370 struct ice_fdir_fltr *desc, *temp; 1371 1372 list_for_each_entry_safe(desc, temp, 1373 &vf->fdir.fdir_rule_list, fltr_node) { 1374 conf = to_fltr_conf_from_desc(desc); 1375 list_del(&desc->fltr_node); 1376 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1377 } 1378 } 1379 1380 /** 1381 * ice_vc_fdir_write_fltr - write filter rule into hardware 1382 * @vf: pointer to the VF info 1383 * @conf: FDIR configuration for each filter 1384 * @add: true implies add rule, false implies del rules 1385 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1386 * 1387 * Return: 0 on success, and other on error. 1388 */ 1389 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1390 struct virtchnl_fdir_fltr_conf *conf, 1391 bool add, bool is_tun) 1392 { 1393 struct ice_fdir_fltr *input = &conf->input; 1394 struct ice_vsi *vsi, *ctrl_vsi; 1395 struct ice_fltr_desc desc; 1396 struct device *dev; 1397 struct ice_pf *pf; 1398 struct ice_hw *hw; 1399 int ret; 1400 u8 *pkt; 1401 1402 pf = vf->pf; 1403 dev = ice_pf_to_dev(pf); 1404 hw = &pf->hw; 1405 vsi = ice_get_vf_vsi(vf); 1406 if (!vsi) { 1407 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1408 return -EINVAL; 1409 } 1410 1411 input->dest_vsi = vsi->idx; 1412 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1413 1414 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1415 if (!ctrl_vsi) { 1416 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1417 return -EINVAL; 1418 } 1419 1420 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1421 if (!pkt) 1422 return -ENOMEM; 1423 1424 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1425 if (conf->parser_ena) { 1426 memcpy(pkt, conf->pkt_buf, conf->pkt_len); 1427 } else { 1428 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1429 if (ret) { 1430 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1431 vf->vf_id, input->flow_type); 1432 goto err_free_pkt; 1433 } 1434 } 1435 1436 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1437 if (ret) 1438 goto err_free_pkt; 1439 1440 return 0; 1441 1442 err_free_pkt: 1443 devm_kfree(dev, pkt); 1444 return ret; 1445 } 1446 1447 /** 1448 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1449 * @t: pointer to timer_list 1450 */ 1451 static void ice_vf_fdir_timer(struct timer_list *t) 1452 { 1453 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1454 struct ice_vf_fdir_ctx *ctx_done; 1455 struct ice_vf_fdir *fdir; 1456 unsigned long flags; 1457 struct ice_vf *vf; 1458 struct ice_pf *pf; 1459 1460 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1461 vf = container_of(fdir, struct ice_vf, fdir); 1462 ctx_done = &fdir->ctx_done; 1463 pf = vf->pf; 1464 spin_lock_irqsave(&fdir->ctx_lock, flags); 1465 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1466 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1467 WARN_ON_ONCE(1); 1468 return; 1469 } 1470 1471 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1472 1473 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1474 ctx_done->conf = ctx_irq->conf; 1475 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1476 ctx_done->v_opcode = ctx_irq->v_opcode; 1477 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1478 1479 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1480 ice_service_task_schedule(pf); 1481 } 1482 1483 /** 1484 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1485 * @ctrl_vsi: pointer to a VF's CTRL VSI 1486 * @rx_desc: pointer to FDIR Rx queue descriptor 1487 */ 1488 void 1489 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1490 union ice_32b_rx_flex_desc *rx_desc) 1491 { 1492 struct ice_pf *pf = ctrl_vsi->back; 1493 struct ice_vf *vf = ctrl_vsi->vf; 1494 struct ice_vf_fdir_ctx *ctx_done; 1495 struct ice_vf_fdir_ctx *ctx_irq; 1496 struct ice_vf_fdir *fdir; 1497 unsigned long flags; 1498 struct device *dev; 1499 int ret; 1500 1501 if (WARN_ON(!vf)) 1502 return; 1503 1504 fdir = &vf->fdir; 1505 ctx_done = &fdir->ctx_done; 1506 ctx_irq = &fdir->ctx_irq; 1507 dev = ice_pf_to_dev(pf); 1508 spin_lock_irqsave(&fdir->ctx_lock, flags); 1509 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1510 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1511 WARN_ON_ONCE(1); 1512 return; 1513 } 1514 1515 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1516 1517 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1518 ctx_done->conf = ctx_irq->conf; 1519 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1520 ctx_done->v_opcode = ctx_irq->v_opcode; 1521 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1522 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1523 1524 ret = timer_delete(&ctx_irq->rx_tmr); 1525 if (!ret) 1526 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1527 1528 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1529 ice_service_task_schedule(pf); 1530 } 1531 1532 /** 1533 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1534 * @vf: pointer to the VF info 1535 */ 1536 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1537 { 1538 u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b; 1539 struct ice_vsi *vf_vsi; 1540 struct device *dev; 1541 struct ice_pf *pf; 1542 struct ice_hw *hw; 1543 u16 vsi_num; 1544 1545 pf = vf->pf; 1546 hw = &pf->hw; 1547 dev = ice_pf_to_dev(pf); 1548 vf_vsi = ice_get_vf_vsi(vf); 1549 if (!vf_vsi) { 1550 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); 1551 return; 1552 } 1553 1554 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1555 1556 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1557 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1558 switch (hw->mac_type) { 1559 case ICE_MAC_E830: 1560 fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size); 1561 fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size); 1562 fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); 1563 fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); 1564 break; 1565 case ICE_MAC_E810: 1566 default: 1567 fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size); 1568 fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size); 1569 fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); 1570 fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); 1571 } 1572 1573 dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n", 1574 vf->vf_id, fd_size_g, fd_size_b); 1575 dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n", 1576 vf->vf_id, fd_cnt_g, fd_cnt_b); 1577 } 1578 1579 /** 1580 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1581 * @vf: pointer to the VF info 1582 * @ctx: FDIR context info for post processing 1583 * @status: virtchnl FDIR program status 1584 * 1585 * Return: 0 on success, and other on error. 1586 */ 1587 static int 1588 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1589 enum virtchnl_fdir_prgm_status *status) 1590 { 1591 struct device *dev = ice_pf_to_dev(vf->pf); 1592 u32 stat_err, error, prog_id; 1593 int ret; 1594 1595 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1596 if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) != 1597 ICE_FXD_FLTR_WB_QW1_DD_YES) { 1598 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1599 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1600 ret = -EINVAL; 1601 goto err_exit; 1602 } 1603 1604 prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err); 1605 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1606 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1607 dev_err(dev, "VF %d: Desc show add, but ctx not", 1608 vf->vf_id); 1609 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1610 ret = -EINVAL; 1611 goto err_exit; 1612 } 1613 1614 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1615 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1616 dev_err(dev, "VF %d: Desc show del, but ctx not", 1617 vf->vf_id); 1618 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1619 ret = -EINVAL; 1620 goto err_exit; 1621 } 1622 1623 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err); 1624 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1625 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1626 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1627 vf->vf_id); 1628 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1629 } else { 1630 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1631 vf->vf_id); 1632 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1633 } 1634 ret = -EINVAL; 1635 goto err_exit; 1636 } 1637 1638 error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err); 1639 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1640 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1641 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1642 ret = -EINVAL; 1643 goto err_exit; 1644 } 1645 1646 *status = VIRTCHNL_FDIR_SUCCESS; 1647 1648 return 0; 1649 1650 err_exit: 1651 ice_vf_fdir_dump_info(vf); 1652 return ret; 1653 } 1654 1655 static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype) 1656 { 1657 return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER || 1658 ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER || 1659 ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER || 1660 ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER || 1661 ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI || 1662 ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER); 1663 } 1664 1665 /** 1666 * ice_vc_add_fdir_fltr_post 1667 * @vf: pointer to the VF structure 1668 * @ctx: FDIR context info for post processing 1669 * @status: virtchnl FDIR program status 1670 * @success: true implies success, false implies failure 1671 * 1672 * Post process for flow director add command. If success, then do post process 1673 * and send back success msg by virtchnl. Otherwise, do context reversion and 1674 * send back failure msg by virtchnl. 1675 * 1676 * Return: 0 on success, and other on error. 1677 */ 1678 static int 1679 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1680 enum virtchnl_fdir_prgm_status status, 1681 bool success) 1682 { 1683 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1684 struct device *dev = ice_pf_to_dev(vf->pf); 1685 enum virtchnl_status_code v_ret; 1686 struct virtchnl_fdir_add *resp; 1687 int ret, len, is_tun; 1688 1689 v_ret = VIRTCHNL_STATUS_SUCCESS; 1690 len = sizeof(*resp); 1691 resp = kzalloc(len, GFP_KERNEL); 1692 if (!resp) { 1693 len = 0; 1694 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1695 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1696 goto err_exit; 1697 } 1698 1699 if (!success) 1700 goto err_exit; 1701 1702 is_tun = 0; 1703 resp->status = status; 1704 resp->flow_id = conf->flow_id; 1705 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1706 vf->fdir.fdir_fltr_cnt_total++; 1707 1708 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1709 (u8 *)resp, len); 1710 kfree(resp); 1711 1712 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1713 vf->vf_id, conf->flow_id, 1714 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1715 "add" : "del"); 1716 return ret; 1717 1718 err_exit: 1719 if (resp) 1720 resp->status = status; 1721 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1722 devm_kfree(dev, conf); 1723 1724 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1725 (u8 *)resp, len); 1726 kfree(resp); 1727 return ret; 1728 } 1729 1730 /** 1731 * ice_vc_del_fdir_fltr_post 1732 * @vf: pointer to the VF structure 1733 * @ctx: FDIR context info for post processing 1734 * @status: virtchnl FDIR program status 1735 * @success: true implies success, false implies failure 1736 * 1737 * Post process for flow director del command. If success, then do post process 1738 * and send back success msg by virtchnl. Otherwise, do context reversion and 1739 * send back failure msg by virtchnl. 1740 * 1741 * Return: 0 on success, and other on error. 1742 */ 1743 static int 1744 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1745 enum virtchnl_fdir_prgm_status status, 1746 bool success) 1747 { 1748 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1749 struct device *dev = ice_pf_to_dev(vf->pf); 1750 enum virtchnl_status_code v_ret; 1751 struct virtchnl_fdir_del *resp; 1752 int ret, len, is_tun; 1753 1754 v_ret = VIRTCHNL_STATUS_SUCCESS; 1755 len = sizeof(*resp); 1756 resp = kzalloc(len, GFP_KERNEL); 1757 if (!resp) { 1758 len = 0; 1759 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1760 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1761 goto err_exit; 1762 } 1763 1764 if (!success) 1765 goto err_exit; 1766 1767 is_tun = 0; 1768 resp->status = status; 1769 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1770 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1771 vf->fdir.fdir_fltr_cnt_total--; 1772 1773 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1774 (u8 *)resp, len); 1775 kfree(resp); 1776 1777 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1778 vf->vf_id, conf->flow_id, 1779 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1780 "add" : "del"); 1781 devm_kfree(dev, conf); 1782 return ret; 1783 1784 err_exit: 1785 if (resp) 1786 resp->status = status; 1787 if (success) 1788 devm_kfree(dev, conf); 1789 1790 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1791 (u8 *)resp, len); 1792 kfree(resp); 1793 return ret; 1794 } 1795 1796 /** 1797 * ice_flush_fdir_ctx 1798 * @pf: pointer to the PF structure 1799 * 1800 * Flush all the pending event on ctx_done list and process them. 1801 */ 1802 void ice_flush_fdir_ctx(struct ice_pf *pf) 1803 { 1804 struct ice_vf *vf; 1805 unsigned int bkt; 1806 1807 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1808 return; 1809 1810 mutex_lock(&pf->vfs.table_lock); 1811 ice_for_each_vf(pf, bkt, vf) { 1812 struct device *dev = ice_pf_to_dev(pf); 1813 enum virtchnl_fdir_prgm_status status; 1814 struct ice_vf_fdir_ctx *ctx; 1815 unsigned long flags; 1816 int ret; 1817 1818 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1819 continue; 1820 1821 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1822 continue; 1823 1824 ctx = &vf->fdir.ctx_done; 1825 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1826 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1827 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1828 continue; 1829 } 1830 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1831 1832 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1833 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1834 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1835 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1836 vf->vf_id); 1837 goto err_exit; 1838 } 1839 1840 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1841 if (ret) 1842 goto err_exit; 1843 1844 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1845 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1846 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1847 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1848 else 1849 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1850 1851 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1852 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1853 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1854 continue; 1855 err_exit: 1856 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1857 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1858 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1859 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1860 else 1861 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1862 1863 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1864 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1865 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1866 } 1867 mutex_unlock(&pf->vfs.table_lock); 1868 } 1869 1870 /** 1871 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1872 * @vf: pointer to the VF structure 1873 * @conf: FDIR configuration for each filter 1874 * @v_opcode: virtual channel operation code 1875 * 1876 * Return: 0 on success, and other on error. 1877 */ 1878 static int 1879 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1880 enum virtchnl_ops v_opcode) 1881 { 1882 struct device *dev = ice_pf_to_dev(vf->pf); 1883 struct ice_vf_fdir_ctx *ctx; 1884 unsigned long flags; 1885 1886 ctx = &vf->fdir.ctx_irq; 1887 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1888 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1889 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1890 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1891 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1892 vf->vf_id); 1893 return -EBUSY; 1894 } 1895 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1896 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1897 1898 ctx->conf = conf; 1899 ctx->v_opcode = v_opcode; 1900 ctx->stat = ICE_FDIR_CTX_READY; 1901 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1902 1903 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1904 1905 return 0; 1906 } 1907 1908 /** 1909 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1910 * @vf: pointer to the VF structure 1911 * 1912 * Return: 0 on success, and other on error. 1913 */ 1914 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1915 { 1916 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1917 unsigned long flags; 1918 1919 timer_delete(&ctx->rx_tmr); 1920 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1921 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1922 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1923 } 1924 1925 /** 1926 * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context 1927 * @fv_a: struct of parsed FDIR profile field vector 1928 * @fv_b: struct of parsed FDIR profile field vector 1929 * 1930 * Check if the two parsed FDIR profile field vector context are different, 1931 * including proto_id, offset and mask. 1932 * 1933 * Return: true on different, false on otherwise. 1934 */ 1935 static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a, 1936 struct ice_parser_fv *fv_b) 1937 { 1938 return (fv_a->proto_id != fv_b->proto_id || 1939 fv_a->offset != fv_b->offset || 1940 fv_a->msk != fv_b->msk); 1941 } 1942 1943 /** 1944 * ice_vc_parser_fv_save - save parsed FDIR profile fv context 1945 * @fv: struct of parsed FDIR profile field vector 1946 * @fv_src: parsed FDIR profile field vector context to save 1947 * 1948 * Save the parsed FDIR profile field vector context, including proto_id, 1949 * offset and mask. 1950 * 1951 * Return: Void. 1952 */ 1953 static void ice_vc_parser_fv_save(struct ice_parser_fv *fv, 1954 struct ice_parser_fv *fv_src) 1955 { 1956 fv->proto_id = fv_src->proto_id; 1957 fv->offset = fv_src->offset; 1958 fv->msk = fv_src->msk; 1959 fv->spec = 0; 1960 } 1961 1962 /** 1963 * ice_vc_add_fdir_raw - add a raw FDIR filter for VF 1964 * @vf: pointer to the VF info 1965 * @conf: FDIR configuration for each filter 1966 * @v_ret: the final VIRTCHNL code 1967 * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER 1968 * @len: length of the stat 1969 * 1970 * Return: 0 on success or negative errno on failure. 1971 */ 1972 static int 1973 ice_vc_add_fdir_raw(struct ice_vf *vf, 1974 struct virtchnl_fdir_fltr_conf *conf, 1975 enum virtchnl_status_code *v_ret, 1976 struct virtchnl_fdir_add *stat, int len) 1977 { 1978 struct ice_vsi *vf_vsi, *ctrl_vsi; 1979 struct ice_fdir_prof_info *pi; 1980 struct ice_pf *pf = vf->pf; 1981 int ret, ptg, id, i; 1982 struct device *dev; 1983 struct ice_hw *hw; 1984 bool fv_found; 1985 1986 dev = ice_pf_to_dev(pf); 1987 hw = &pf->hw; 1988 *v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1989 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1990 1991 id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); 1992 ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; 1993 1994 vf_vsi = ice_get_vf_vsi(vf); 1995 if (!vf_vsi) { 1996 dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); 1997 return -ENODEV; 1998 } 1999 2000 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 2001 if (!ctrl_vsi) { 2002 dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n", 2003 vf->vf_id); 2004 return -ENODEV; 2005 } 2006 2007 fv_found = false; 2008 2009 /* Check if profile info already exists, then update the counter */ 2010 pi = &vf->fdir_prof_info[ptg]; 2011 if (pi->fdir_active_cnt != 0) { 2012 for (i = 0; i < ICE_MAX_FV_WORDS; i++) 2013 if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i], 2014 &conf->prof->fv[i])) 2015 break; 2016 if (i == ICE_MAX_FV_WORDS) { 2017 fv_found = true; 2018 pi->fdir_active_cnt++; 2019 } 2020 } 2021 2022 /* HW profile setting is only required for the first time */ 2023 if (!fv_found) { 2024 ret = ice_flow_set_parser_prof(hw, vf_vsi->idx, 2025 ctrl_vsi->idx, conf->prof, 2026 ICE_BLK_FD); 2027 2028 if (ret) { 2029 *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2030 dev_dbg(dev, "VF %d: insert hw prof failed\n", 2031 vf->vf_id); 2032 return ret; 2033 } 2034 } 2035 2036 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 2037 if (ret) { 2038 *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2039 dev_dbg(dev, "VF %d: insert FDIR list failed\n", 2040 vf->vf_id); 2041 return ret; 2042 } 2043 2044 ret = ice_vc_fdir_set_irq_ctx(vf, conf, 2045 VIRTCHNL_OP_ADD_FDIR_FILTER); 2046 if (ret) { 2047 dev_dbg(dev, "VF %d: set FDIR context failed\n", 2048 vf->vf_id); 2049 goto err_rem_entry; 2050 } 2051 2052 ret = ice_vc_fdir_write_fltr(vf, conf, true, false); 2053 if (ret) { 2054 dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n", 2055 vf->vf_id, ret); 2056 goto err_clr_irq; 2057 } 2058 2059 /* Save parsed profile fv info of the FDIR rule for the first time */ 2060 if (!fv_found) { 2061 for (i = 0; i < conf->prof->fv_num; i++) 2062 ice_vc_parser_fv_save(&pi->prof.fv[i], 2063 &conf->prof->fv[i]); 2064 pi->prof.fv_num = conf->prof->fv_num; 2065 pi->fdir_active_cnt = 1; 2066 } 2067 2068 return 0; 2069 2070 err_clr_irq: 2071 ice_vc_fdir_clear_irq_ctx(vf); 2072 err_rem_entry: 2073 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 2074 return ret; 2075 } 2076 2077 /** 2078 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 2079 * @vf: pointer to the VF info 2080 * @msg: pointer to the msg buffer 2081 * 2082 * Return: 0 on success, and other on error. 2083 */ 2084 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 2085 { 2086 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 2087 struct virtchnl_fdir_add *stat = NULL; 2088 struct virtchnl_fdir_fltr_conf *conf; 2089 enum virtchnl_status_code v_ret; 2090 struct ice_vsi *vf_vsi; 2091 struct device *dev; 2092 struct ice_pf *pf; 2093 int is_tun = 0; 2094 int len = 0; 2095 int ret; 2096 2097 pf = vf->pf; 2098 dev = ice_pf_to_dev(pf); 2099 vf_vsi = ice_get_vf_vsi(vf); 2100 2101 #define ICE_VF_MAX_FDIR_FILTERS 128 2102 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || 2103 vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) { 2104 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2105 dev_err(dev, "Max number of FDIR filters for VF %d is reached\n", 2106 vf->vf_id); 2107 goto err_exit; 2108 } 2109 2110 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 2111 if (ret) { 2112 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2113 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 2114 goto err_exit; 2115 } 2116 2117 ret = ice_vf_start_ctrl_vsi(vf); 2118 if (ret && (ret != -EEXIST)) { 2119 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2120 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 2121 vf->vf_id, ret); 2122 goto err_exit; 2123 } 2124 2125 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 2126 if (!stat) { 2127 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2128 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 2129 goto err_exit; 2130 } 2131 2132 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 2133 if (!conf) { 2134 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2135 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 2136 goto err_exit; 2137 } 2138 2139 len = sizeof(*stat); 2140 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 2141 if (ret) { 2142 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2143 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 2144 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 2145 goto err_free_conf; 2146 } 2147 2148 if (fltr->validate_only) { 2149 v_ret = VIRTCHNL_STATUS_SUCCESS; 2150 stat->status = VIRTCHNL_FDIR_SUCCESS; 2151 devm_kfree(dev, conf); 2152 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 2153 v_ret, (u8 *)stat, len); 2154 goto exit; 2155 } 2156 2157 /* For raw FDIR filters created by the parser */ 2158 if (conf->parser_ena) { 2159 ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len); 2160 if (ret) 2161 goto err_free_conf; 2162 goto exit; 2163 } 2164 2165 is_tun = ice_fdir_is_tunnel(conf->ttype); 2166 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 2167 if (ret) { 2168 v_ret = VIRTCHNL_STATUS_SUCCESS; 2169 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 2170 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 2171 vf->vf_id, ret); 2172 goto err_free_conf; 2173 } 2174 2175 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 2176 if (ret) { 2177 v_ret = VIRTCHNL_STATUS_SUCCESS; 2178 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 2179 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 2180 vf->vf_id); 2181 goto err_free_conf; 2182 } 2183 2184 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 2185 if (ret) { 2186 v_ret = VIRTCHNL_STATUS_SUCCESS; 2187 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2188 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 2189 goto err_free_conf; 2190 } 2191 2192 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 2193 if (ret) { 2194 v_ret = VIRTCHNL_STATUS_SUCCESS; 2195 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2196 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 2197 goto err_rem_entry; 2198 } 2199 2200 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 2201 if (ret) { 2202 v_ret = VIRTCHNL_STATUS_SUCCESS; 2203 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2204 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 2205 vf->vf_id, ret); 2206 goto err_clr_irq; 2207 } 2208 2209 exit: 2210 kfree(stat); 2211 return ret; 2212 2213 err_clr_irq: 2214 ice_vc_fdir_clear_irq_ctx(vf); 2215 err_rem_entry: 2216 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 2217 err_free_conf: 2218 devm_kfree(dev, conf); 2219 err_exit: 2220 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 2221 (u8 *)stat, len); 2222 kfree(stat); 2223 return ret; 2224 } 2225 2226 /** 2227 * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF 2228 * @vf: pointer to the VF info 2229 * @conf: FDIR configuration for each filter 2230 * @v_ret: the final VIRTCHNL code 2231 * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER 2232 * @len: length of the stat 2233 * 2234 * Return: 0 on success or negative errno on failure. 2235 */ 2236 static int 2237 ice_vc_del_fdir_raw(struct ice_vf *vf, 2238 struct virtchnl_fdir_fltr_conf *conf, 2239 enum virtchnl_status_code *v_ret, 2240 struct virtchnl_fdir_del *stat, int len) 2241 { 2242 struct ice_vsi *vf_vsi, *ctrl_vsi; 2243 enum ice_block blk = ICE_BLK_FD; 2244 struct ice_fdir_prof_info *pi; 2245 struct ice_pf *pf = vf->pf; 2246 struct device *dev; 2247 struct ice_hw *hw; 2248 unsigned long id; 2249 u16 vsi_num; 2250 int ptg; 2251 int ret; 2252 2253 dev = ice_pf_to_dev(pf); 2254 hw = &pf->hw; 2255 *v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2256 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2257 2258 id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); 2259 ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; 2260 2261 ret = ice_vc_fdir_write_fltr(vf, conf, false, false); 2262 if (ret) { 2263 dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n", 2264 vf->vf_id, ret); 2265 return ret; 2266 } 2267 2268 vf_vsi = ice_get_vf_vsi(vf); 2269 if (!vf_vsi) { 2270 dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); 2271 return -ENODEV; 2272 } 2273 2274 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 2275 if (!ctrl_vsi) { 2276 dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n", 2277 vf->vf_id); 2278 return -ENODEV; 2279 } 2280 2281 pi = &vf->fdir_prof_info[ptg]; 2282 if (pi->fdir_active_cnt != 0) { 2283 pi->fdir_active_cnt--; 2284 /* Remove the profile id flow if no active FDIR rule left */ 2285 if (!pi->fdir_active_cnt) { 2286 vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx); 2287 ice_rem_prof_id_flow(hw, blk, vsi_num, id); 2288 2289 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 2290 ice_rem_prof_id_flow(hw, blk, vsi_num, id); 2291 } 2292 } 2293 2294 conf->parser_ena = false; 2295 return 0; 2296 } 2297 2298 /** 2299 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 2300 * @vf: pointer to the VF info 2301 * @msg: pointer to the msg buffer 2302 * 2303 * Return: 0 on success, and other on error. 2304 */ 2305 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 2306 { 2307 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 2308 struct virtchnl_fdir_del *stat = NULL; 2309 struct virtchnl_fdir_fltr_conf *conf; 2310 struct ice_vf_fdir *fdir = &vf->fdir; 2311 enum virtchnl_status_code v_ret; 2312 struct ice_fdir_fltr *input; 2313 enum ice_fltr_ptype flow; 2314 struct device *dev; 2315 struct ice_pf *pf; 2316 int is_tun = 0; 2317 int len = 0; 2318 int ret; 2319 2320 pf = vf->pf; 2321 dev = ice_pf_to_dev(pf); 2322 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 2323 if (ret) { 2324 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2325 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 2326 goto err_exit; 2327 } 2328 2329 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 2330 if (!stat) { 2331 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2332 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 2333 goto err_exit; 2334 } 2335 2336 len = sizeof(*stat); 2337 2338 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 2339 if (!conf) { 2340 v_ret = VIRTCHNL_STATUS_SUCCESS; 2341 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 2342 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 2343 vf->vf_id, fltr->flow_id); 2344 goto err_exit; 2345 } 2346 2347 /* Just return failure when ctrl_vsi idx is invalid */ 2348 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 2349 v_ret = VIRTCHNL_STATUS_SUCCESS; 2350 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2351 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 2352 goto err_exit; 2353 } 2354 2355 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 2356 if (ret) { 2357 v_ret = VIRTCHNL_STATUS_SUCCESS; 2358 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2359 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 2360 goto err_exit; 2361 } 2362 2363 /* For raw FDIR filters created by the parser */ 2364 if (conf->parser_ena) { 2365 ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len); 2366 if (ret) 2367 goto err_del_tmr; 2368 goto exit; 2369 } 2370 2371 is_tun = ice_fdir_is_tunnel(conf->ttype); 2372 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 2373 if (ret) { 2374 v_ret = VIRTCHNL_STATUS_SUCCESS; 2375 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2376 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 2377 vf->vf_id, ret); 2378 goto err_del_tmr; 2379 } 2380 2381 /* Remove unused profiles to avoid unexpected behaviors */ 2382 input = &conf->input; 2383 flow = input->flow_type; 2384 if (fdir->fdir_fltr_cnt[flow][is_tun] == 1) 2385 ice_vc_fdir_rem_prof(vf, flow, is_tun); 2386 2387 exit: 2388 kfree(stat); 2389 2390 return ret; 2391 2392 err_del_tmr: 2393 ice_vc_fdir_clear_irq_ctx(vf); 2394 err_exit: 2395 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 2396 (u8 *)stat, len); 2397 kfree(stat); 2398 return ret; 2399 } 2400 2401 /** 2402 * ice_vf_fdir_init - init FDIR resource for VF 2403 * @vf: pointer to the VF info 2404 */ 2405 void ice_vf_fdir_init(struct ice_vf *vf) 2406 { 2407 struct ice_vf_fdir *fdir = &vf->fdir; 2408 2409 idr_init(&fdir->fdir_rule_idr); 2410 INIT_LIST_HEAD(&fdir->fdir_rule_list); 2411 2412 spin_lock_init(&fdir->ctx_lock); 2413 fdir->ctx_irq.flags = 0; 2414 fdir->ctx_done.flags = 0; 2415 ice_vc_fdir_reset_cnt_all(fdir); 2416 } 2417 2418 /** 2419 * ice_vf_fdir_exit - destroy FDIR resource for VF 2420 * @vf: pointer to the VF info 2421 */ 2422 void ice_vf_fdir_exit(struct ice_vf *vf) 2423 { 2424 ice_vc_fdir_flush_entry(vf); 2425 idr_destroy(&vf->fdir.fdir_rule_idr); 2426 ice_vc_fdir_rem_prof_all(vf); 2427 ice_vc_fdir_free_prof_all(vf); 2428 } 2429