1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021-2023, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 #include "ice_vf_lib_private.h" 9 10 #define to_fltr_conf_from_desc(p) \ 11 container_of(p, struct virtchnl_fdir_fltr_conf, input) 12 13 #define ICE_FLOW_PROF_TYPE_S 0 14 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) 15 #define ICE_FLOW_PROF_VSI_S 32 16 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) 17 18 /* Flow profile ID format: 19 * [0:31] - flow type, flow + tun_offs 20 * [32:63] - VSI index 21 */ 22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ 23 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ 24 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) 25 26 #define GTPU_TEID_OFFSET 4 27 #define GTPU_EH_QFI_OFFSET 1 28 #define GTPU_EH_QFI_MASK 0x3F 29 #define PFCP_S_OFFSET 0 30 #define PFCP_S_MASK 0x1 31 #define PFCP_PORT_NR 8805 32 33 #define FDIR_INSET_FLAG_ESP_S 0 34 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 35 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 36 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 37 38 enum ice_fdir_tunnel_type { 39 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 40 ICE_FDIR_TUNNEL_TYPE_GTPU, 41 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 42 }; 43 44 struct virtchnl_fdir_fltr_conf { 45 struct ice_fdir_fltr input; 46 enum ice_fdir_tunnel_type ttype; 47 u64 inset_flag; 48 u32 flow_id; 49 }; 50 51 struct virtchnl_fdir_inset_map { 52 enum virtchnl_proto_hdr_field field; 53 enum ice_flow_field fld; 54 u64 flag; 55 u64 mask; 56 }; 57 58 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 59 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 60 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 78 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 79 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 80 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 81 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 82 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 83 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 84 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 85 }; 86 87 /** 88 * ice_vc_fdir_param_check 89 * @vf: pointer to the VF structure 90 * @vsi_id: VF relative VSI ID 91 * 92 * Check for the valid VSI ID, PF's state and VF's state 93 * 94 * Return: 0 on success, and -EINVAL on error. 95 */ 96 static int 97 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 98 { 99 struct ice_pf *pf = vf->pf; 100 101 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 102 return -EINVAL; 103 104 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 105 return -EINVAL; 106 107 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 108 return -EINVAL; 109 110 if (vsi_id != vf->lan_vsi_num) 111 return -EINVAL; 112 113 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 114 return -EINVAL; 115 116 if (!ice_get_vf_vsi(vf)) 117 return -EINVAL; 118 119 return 0; 120 } 121 122 /** 123 * ice_vf_start_ctrl_vsi 124 * @vf: pointer to the VF structure 125 * 126 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 127 * 128 * Return: 0 on success, and other on error. 129 */ 130 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 131 { 132 struct ice_pf *pf = vf->pf; 133 struct ice_vsi *ctrl_vsi; 134 struct device *dev; 135 int err; 136 137 dev = ice_pf_to_dev(pf); 138 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 139 return -EEXIST; 140 141 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 142 if (!ctrl_vsi) { 143 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 144 vf->vf_id); 145 return -ENOMEM; 146 } 147 148 err = ice_vsi_open_ctrl(ctrl_vsi); 149 if (err) { 150 dev_dbg(dev, "Could not open control VSI for VF %d\n", 151 vf->vf_id); 152 goto err_vsi_open; 153 } 154 155 return 0; 156 157 err_vsi_open: 158 ice_vsi_release(ctrl_vsi); 159 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 160 pf->vsi[vf->ctrl_vsi_idx] = NULL; 161 vf->ctrl_vsi_idx = ICE_NO_VSI; 162 } 163 return err; 164 } 165 166 /** 167 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 168 * @vf: pointer to the VF structure 169 * @flow: filter flow type 170 * 171 * Return: 0 on success, and other on error. 172 */ 173 static int 174 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 175 { 176 struct ice_vf_fdir *fdir = &vf->fdir; 177 178 if (!fdir->fdir_prof) { 179 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 180 ICE_FLTR_PTYPE_MAX, 181 sizeof(*fdir->fdir_prof), 182 GFP_KERNEL); 183 if (!fdir->fdir_prof) 184 return -ENOMEM; 185 } 186 187 if (!fdir->fdir_prof[flow]) { 188 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 189 sizeof(**fdir->fdir_prof), 190 GFP_KERNEL); 191 if (!fdir->fdir_prof[flow]) 192 return -ENOMEM; 193 } 194 195 return 0; 196 } 197 198 /** 199 * ice_vc_fdir_free_prof - free profile for this filter flow type 200 * @vf: pointer to the VF structure 201 * @flow: filter flow type 202 */ 203 static void 204 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 205 { 206 struct ice_vf_fdir *fdir = &vf->fdir; 207 208 if (!fdir->fdir_prof) 209 return; 210 211 if (!fdir->fdir_prof[flow]) 212 return; 213 214 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 215 fdir->fdir_prof[flow] = NULL; 216 } 217 218 /** 219 * ice_vc_fdir_free_prof_all - free all the profile for this VF 220 * @vf: pointer to the VF structure 221 */ 222 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 223 { 224 struct ice_vf_fdir *fdir = &vf->fdir; 225 enum ice_fltr_ptype flow; 226 227 if (!fdir->fdir_prof) 228 return; 229 230 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 231 ice_vc_fdir_free_prof(vf, flow); 232 233 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 234 fdir->fdir_prof = NULL; 235 } 236 237 /** 238 * ice_vc_fdir_parse_flow_fld 239 * @proto_hdr: virtual channel protocol filter header 240 * @conf: FDIR configuration for each filter 241 * @fld: field type array 242 * @fld_cnt: field counter 243 * 244 * Parse the virtual channel filter header and store them into field type array 245 * 246 * Return: 0 on success, and other on error. 247 */ 248 static int 249 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 250 struct virtchnl_fdir_fltr_conf *conf, 251 enum ice_flow_field *fld, int *fld_cnt) 252 { 253 struct virtchnl_proto_hdr hdr; 254 u32 i; 255 256 memcpy(&hdr, proto_hdr, sizeof(hdr)); 257 258 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 259 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 260 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 261 if (fdir_inset_map[i].mask && 262 ((fdir_inset_map[i].mask & conf->inset_flag) != 263 fdir_inset_map[i].flag)) 264 continue; 265 266 fld[*fld_cnt] = fdir_inset_map[i].fld; 267 *fld_cnt += 1; 268 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 269 return -EINVAL; 270 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 271 fdir_inset_map[i].field); 272 } 273 274 return 0; 275 } 276 277 /** 278 * ice_vc_fdir_set_flow_fld 279 * @vf: pointer to the VF structure 280 * @fltr: virtual channel add cmd buffer 281 * @conf: FDIR configuration for each filter 282 * @seg: array of one or more packet segments that describe the flow 283 * 284 * Parse the virtual channel add msg buffer's field vector and store them into 285 * flow's packet segment field 286 * 287 * Return: 0 on success, and other on error. 288 */ 289 static int 290 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 291 struct virtchnl_fdir_fltr_conf *conf, 292 struct ice_flow_seg_info *seg) 293 { 294 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 295 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 296 struct device *dev = ice_pf_to_dev(vf->pf); 297 struct virtchnl_proto_hdrs *proto; 298 int fld_cnt = 0; 299 int i; 300 301 proto = &rule->proto_hdrs; 302 for (i = 0; i < proto->count; i++) { 303 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 304 int ret; 305 306 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 307 if (ret) 308 return ret; 309 } 310 311 if (fld_cnt == 0) { 312 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 313 return -EINVAL; 314 } 315 316 for (i = 0; i < fld_cnt; i++) 317 ice_flow_set_fld(seg, fld[i], 318 ICE_FLOW_FLD_OFF_INVAL, 319 ICE_FLOW_FLD_OFF_INVAL, 320 ICE_FLOW_FLD_OFF_INVAL, false); 321 322 return 0; 323 } 324 325 /** 326 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 327 * @vf: pointer to the VF structure 328 * @conf: FDIR configuration for each filter 329 * @seg: array of one or more packet segments that describe the flow 330 * 331 * Return: 0 on success, and other on error. 332 */ 333 static int 334 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 335 struct virtchnl_fdir_fltr_conf *conf, 336 struct ice_flow_seg_info *seg) 337 { 338 enum ice_fltr_ptype flow = conf->input.flow_type; 339 enum ice_fdir_tunnel_type ttype = conf->ttype; 340 struct device *dev = ice_pf_to_dev(vf->pf); 341 342 switch (flow) { 343 case ICE_FLTR_PTYPE_NON_IP_L2: 344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 345 break; 346 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 347 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 348 ICE_FLOW_SEG_HDR_IPV4 | 349 ICE_FLOW_SEG_HDR_IPV_OTHER); 350 break; 351 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 352 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 353 ICE_FLOW_SEG_HDR_IPV4 | 354 ICE_FLOW_SEG_HDR_IPV_OTHER); 355 break; 356 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 357 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 358 ICE_FLOW_SEG_HDR_IPV4 | 359 ICE_FLOW_SEG_HDR_IPV_OTHER); 360 break; 361 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 362 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 363 ICE_FLOW_SEG_HDR_IPV4 | 364 ICE_FLOW_SEG_HDR_IPV_OTHER); 365 break; 366 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 367 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 368 ICE_FLOW_SEG_HDR_IPV4 | 369 ICE_FLOW_SEG_HDR_IPV_OTHER); 370 break; 371 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 372 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 373 ICE_FLOW_SEG_HDR_IPV4 | 374 ICE_FLOW_SEG_HDR_IPV_OTHER); 375 break; 376 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 377 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 378 ICE_FLOW_SEG_HDR_IPV_OTHER); 379 break; 380 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 381 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 382 ICE_FLOW_SEG_HDR_IPV4 | 383 ICE_FLOW_SEG_HDR_IPV_OTHER); 384 break; 385 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 386 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 387 ICE_FLOW_SEG_HDR_IPV4 | 388 ICE_FLOW_SEG_HDR_IPV_OTHER); 389 break; 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 392 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 393 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 394 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 395 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 396 ICE_FLOW_SEG_HDR_IPV4 | 397 ICE_FLOW_SEG_HDR_IPV_OTHER); 398 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 400 ICE_FLOW_SEG_HDR_GTPU_IP | 401 ICE_FLOW_SEG_HDR_IPV4 | 402 ICE_FLOW_SEG_HDR_IPV_OTHER); 403 } else { 404 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 405 flow, vf->vf_id); 406 return -EINVAL; 407 } 408 break; 409 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 410 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 411 ICE_FLOW_SEG_HDR_IPV4 | 412 ICE_FLOW_SEG_HDR_IPV_OTHER); 413 break; 414 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 415 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 416 ICE_FLOW_SEG_HDR_IPV6 | 417 ICE_FLOW_SEG_HDR_IPV_OTHER); 418 break; 419 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 420 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 421 ICE_FLOW_SEG_HDR_IPV6 | 422 ICE_FLOW_SEG_HDR_IPV_OTHER); 423 break; 424 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 425 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 426 ICE_FLOW_SEG_HDR_IPV6 | 427 ICE_FLOW_SEG_HDR_IPV_OTHER); 428 break; 429 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 430 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 431 ICE_FLOW_SEG_HDR_IPV6 | 432 ICE_FLOW_SEG_HDR_IPV_OTHER); 433 break; 434 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 435 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 436 ICE_FLOW_SEG_HDR_IPV6 | 437 ICE_FLOW_SEG_HDR_IPV_OTHER); 438 break; 439 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 440 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 441 ICE_FLOW_SEG_HDR_IPV6 | 442 ICE_FLOW_SEG_HDR_IPV_OTHER); 443 break; 444 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 445 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 446 ICE_FLOW_SEG_HDR_IPV_OTHER); 447 break; 448 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 449 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 450 ICE_FLOW_SEG_HDR_IPV6 | 451 ICE_FLOW_SEG_HDR_IPV_OTHER); 452 break; 453 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 454 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 455 ICE_FLOW_SEG_HDR_IPV6 | 456 ICE_FLOW_SEG_HDR_IPV_OTHER); 457 break; 458 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 459 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 460 ICE_FLOW_SEG_HDR_IPV6 | 461 ICE_FLOW_SEG_HDR_IPV_OTHER); 462 break; 463 default: 464 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 465 flow, vf->vf_id); 466 return -EINVAL; 467 } 468 469 return 0; 470 } 471 472 /** 473 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 474 * @vf: pointer to the VF structure 475 * @flow: filter flow type 476 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 477 */ 478 static void 479 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 480 { 481 struct ice_vf_fdir *fdir = &vf->fdir; 482 struct ice_fd_hw_prof *vf_prof; 483 struct ice_pf *pf = vf->pf; 484 struct ice_vsi *vf_vsi; 485 struct device *dev; 486 struct ice_hw *hw; 487 u64 prof_id; 488 int i; 489 490 dev = ice_pf_to_dev(pf); 491 hw = &pf->hw; 492 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 493 return; 494 495 vf_prof = fdir->fdir_prof[flow]; 496 497 vf_vsi = ice_get_vf_vsi(vf); 498 if (!vf_vsi) { 499 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 500 return; 501 } 502 503 if (!fdir->prof_entry_cnt[flow][tun]) 504 return; 505 506 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, 507 flow, tun ? ICE_FLTR_PTYPE_MAX : 0); 508 509 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 510 if (vf_prof->entry_h[i][tun]) { 511 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 512 513 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 514 ice_flow_rem_entry(hw, ICE_BLK_FD, 515 vf_prof->entry_h[i][tun]); 516 vf_prof->entry_h[i][tun] = 0; 517 } 518 519 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 520 devm_kfree(dev, vf_prof->fdir_seg[tun]); 521 vf_prof->fdir_seg[tun] = NULL; 522 523 for (i = 0; i < vf_prof->cnt; i++) 524 vf_prof->vsi_h[i] = 0; 525 526 fdir->prof_entry_cnt[flow][tun] = 0; 527 } 528 529 /** 530 * ice_vc_fdir_rem_prof_all - remove profile for this VF 531 * @vf: pointer to the VF structure 532 */ 533 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 534 { 535 enum ice_fltr_ptype flow; 536 537 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 538 flow < ICE_FLTR_PTYPE_MAX; flow++) { 539 ice_vc_fdir_rem_prof(vf, flow, 0); 540 ice_vc_fdir_rem_prof(vf, flow, 1); 541 } 542 } 543 544 /** 545 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR 546 * @fdir: pointer to the VF FDIR structure 547 */ 548 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) 549 { 550 enum ice_fltr_ptype flow; 551 552 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 553 flow < ICE_FLTR_PTYPE_MAX; flow++) { 554 fdir->fdir_fltr_cnt[flow][0] = 0; 555 fdir->fdir_fltr_cnt[flow][1] = 0; 556 } 557 } 558 559 /** 560 * ice_vc_fdir_has_prof_conflict 561 * @vf: pointer to the VF structure 562 * @conf: FDIR configuration for each filter 563 * 564 * Check if @conf has conflicting profile with existing profiles 565 * 566 * Return: true on success, and false on error. 567 */ 568 static bool 569 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, 570 struct virtchnl_fdir_fltr_conf *conf) 571 { 572 struct ice_fdir_fltr *desc; 573 574 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 575 struct virtchnl_fdir_fltr_conf *existing_conf; 576 enum ice_fltr_ptype flow_type_a, flow_type_b; 577 struct ice_fdir_fltr *a, *b; 578 579 existing_conf = to_fltr_conf_from_desc(desc); 580 a = &existing_conf->input; 581 b = &conf->input; 582 flow_type_a = a->flow_type; 583 flow_type_b = b->flow_type; 584 585 /* No need to compare two rules with different tunnel types or 586 * with the same protocol type. 587 */ 588 if (existing_conf->ttype != conf->ttype || 589 flow_type_a == flow_type_b) 590 continue; 591 592 switch (flow_type_a) { 593 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 594 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 595 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 596 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 597 return true; 598 break; 599 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 600 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 601 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 602 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) 603 return true; 604 break; 605 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 606 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 607 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 608 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) 609 return true; 610 break; 611 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 612 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP || 613 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 614 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) 615 return true; 616 break; 617 default: 618 break; 619 } 620 } 621 622 return false; 623 } 624 625 /** 626 * ice_vc_fdir_write_flow_prof 627 * @vf: pointer to the VF structure 628 * @flow: filter flow type 629 * @seg: array of one or more packet segments that describe the flow 630 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 631 * 632 * Write the flow's profile config and packet segment into the hardware 633 * 634 * Return: 0 on success, and other on error. 635 */ 636 static int 637 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 638 struct ice_flow_seg_info *seg, int tun) 639 { 640 struct ice_vf_fdir *fdir = &vf->fdir; 641 struct ice_vsi *vf_vsi, *ctrl_vsi; 642 struct ice_flow_seg_info *old_seg; 643 struct ice_flow_prof *prof = NULL; 644 struct ice_fd_hw_prof *vf_prof; 645 struct device *dev; 646 struct ice_pf *pf; 647 struct ice_hw *hw; 648 u64 entry1_h = 0; 649 u64 entry2_h = 0; 650 u64 prof_id; 651 int ret; 652 653 pf = vf->pf; 654 dev = ice_pf_to_dev(pf); 655 hw = &pf->hw; 656 vf_vsi = ice_get_vf_vsi(vf); 657 if (!vf_vsi) 658 return -EINVAL; 659 660 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 661 if (!ctrl_vsi) 662 return -EINVAL; 663 664 vf_prof = fdir->fdir_prof[flow]; 665 old_seg = vf_prof->fdir_seg[tun]; 666 if (old_seg) { 667 if (!memcmp(old_seg, seg, sizeof(*seg))) { 668 dev_dbg(dev, "Duplicated profile for VF %d!\n", 669 vf->vf_id); 670 return -EEXIST; 671 } 672 673 if (fdir->fdir_fltr_cnt[flow][tun]) { 674 ret = -EINVAL; 675 dev_dbg(dev, "Input set conflicts for VF %d\n", 676 vf->vf_id); 677 goto err_exit; 678 } 679 680 /* remove previously allocated profile */ 681 ice_vc_fdir_rem_prof(vf, flow, tun); 682 } 683 684 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, 685 tun ? ICE_FLTR_PTYPE_MAX : 0); 686 687 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 688 tun + 1, &prof); 689 if (ret) { 690 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 691 flow, vf->vf_id); 692 goto err_exit; 693 } 694 695 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 696 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 697 seg, &entry1_h); 698 if (ret) { 699 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 700 flow, vf->vf_id); 701 goto err_prof; 702 } 703 704 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 705 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 706 seg, &entry2_h); 707 if (ret) { 708 dev_dbg(dev, 709 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 710 flow, vf->vf_id); 711 goto err_entry_1; 712 } 713 714 vf_prof->fdir_seg[tun] = seg; 715 vf_prof->cnt = 0; 716 fdir->prof_entry_cnt[flow][tun] = 0; 717 718 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 719 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 720 vf_prof->cnt++; 721 fdir->prof_entry_cnt[flow][tun]++; 722 723 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 724 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 725 vf_prof->cnt++; 726 fdir->prof_entry_cnt[flow][tun]++; 727 728 return 0; 729 730 err_entry_1: 731 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 732 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); 733 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 734 err_prof: 735 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 736 err_exit: 737 return ret; 738 } 739 740 /** 741 * ice_vc_fdir_config_input_set 742 * @vf: pointer to the VF structure 743 * @fltr: virtual channel add cmd buffer 744 * @conf: FDIR configuration for each filter 745 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 746 * 747 * Config the input set type and value for virtual channel add msg buffer 748 * 749 * Return: 0 on success, and other on error. 750 */ 751 static int 752 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 753 struct virtchnl_fdir_fltr_conf *conf, int tun) 754 { 755 struct ice_fdir_fltr *input = &conf->input; 756 struct device *dev = ice_pf_to_dev(vf->pf); 757 struct ice_flow_seg_info *seg; 758 enum ice_fltr_ptype flow; 759 int ret; 760 761 ret = ice_vc_fdir_has_prof_conflict(vf, conf); 762 if (ret) { 763 dev_dbg(dev, "Found flow profile conflict for VF %d\n", 764 vf->vf_id); 765 return ret; 766 } 767 768 flow = input->flow_type; 769 ret = ice_vc_fdir_alloc_prof(vf, flow); 770 if (ret) { 771 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 772 return ret; 773 } 774 775 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 776 if (!seg) 777 return -ENOMEM; 778 779 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 780 if (ret) { 781 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 782 goto err_exit; 783 } 784 785 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 786 if (ret) { 787 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 788 goto err_exit; 789 } 790 791 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 792 if (ret == -EEXIST) { 793 devm_kfree(dev, seg); 794 } else if (ret) { 795 dev_dbg(dev, "Write flow profile for VF %d failed\n", 796 vf->vf_id); 797 goto err_exit; 798 } 799 800 return 0; 801 802 err_exit: 803 devm_kfree(dev, seg); 804 return ret; 805 } 806 807 /** 808 * ice_vc_fdir_parse_pattern 809 * @vf: pointer to the VF info 810 * @fltr: virtual channel add cmd buffer 811 * @conf: FDIR configuration for each filter 812 * 813 * Parse the virtual channel filter's pattern and store them into conf 814 * 815 * Return: 0 on success, and other on error. 816 */ 817 static int 818 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 819 struct virtchnl_fdir_fltr_conf *conf) 820 { 821 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 822 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 823 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 824 struct device *dev = ice_pf_to_dev(vf->pf); 825 struct ice_fdir_fltr *input = &conf->input; 826 int i; 827 828 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 829 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 830 proto->count, vf->vf_id); 831 return -EINVAL; 832 } 833 834 for (i = 0; i < proto->count; i++) { 835 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 836 struct ip_esp_hdr *esph; 837 struct ip_auth_hdr *ah; 838 struct sctphdr *sctph; 839 struct ipv6hdr *ip6h; 840 struct udphdr *udph; 841 struct tcphdr *tcph; 842 struct ethhdr *eth; 843 struct iphdr *iph; 844 u8 s_field; 845 u8 *rawh; 846 847 switch (hdr->type) { 848 case VIRTCHNL_PROTO_HDR_ETH: 849 eth = (struct ethhdr *)hdr->buffer; 850 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 851 852 if (hdr->field_selector) 853 input->ext_data.ether_type = eth->h_proto; 854 break; 855 case VIRTCHNL_PROTO_HDR_IPV4: 856 iph = (struct iphdr *)hdr->buffer; 857 l3 = VIRTCHNL_PROTO_HDR_IPV4; 858 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 859 860 if (hdr->field_selector) { 861 input->ip.v4.src_ip = iph->saddr; 862 input->ip.v4.dst_ip = iph->daddr; 863 input->ip.v4.tos = iph->tos; 864 input->ip.v4.proto = iph->protocol; 865 } 866 break; 867 case VIRTCHNL_PROTO_HDR_IPV6: 868 ip6h = (struct ipv6hdr *)hdr->buffer; 869 l3 = VIRTCHNL_PROTO_HDR_IPV6; 870 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 871 872 if (hdr->field_selector) { 873 memcpy(input->ip.v6.src_ip, 874 ip6h->saddr.in6_u.u6_addr8, 875 sizeof(ip6h->saddr)); 876 memcpy(input->ip.v6.dst_ip, 877 ip6h->daddr.in6_u.u6_addr8, 878 sizeof(ip6h->daddr)); 879 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 880 (ip6h->flow_lbl[0] >> 4); 881 input->ip.v6.proto = ip6h->nexthdr; 882 } 883 break; 884 case VIRTCHNL_PROTO_HDR_TCP: 885 tcph = (struct tcphdr *)hdr->buffer; 886 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 887 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 888 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 889 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 890 891 if (hdr->field_selector) { 892 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 893 input->ip.v4.src_port = tcph->source; 894 input->ip.v4.dst_port = tcph->dest; 895 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 896 input->ip.v6.src_port = tcph->source; 897 input->ip.v6.dst_port = tcph->dest; 898 } 899 } 900 break; 901 case VIRTCHNL_PROTO_HDR_UDP: 902 udph = (struct udphdr *)hdr->buffer; 903 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 904 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 905 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 906 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 907 908 if (hdr->field_selector) { 909 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 910 input->ip.v4.src_port = udph->source; 911 input->ip.v4.dst_port = udph->dest; 912 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 913 input->ip.v6.src_port = udph->source; 914 input->ip.v6.dst_port = udph->dest; 915 } 916 } 917 break; 918 case VIRTCHNL_PROTO_HDR_SCTP: 919 sctph = (struct sctphdr *)hdr->buffer; 920 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 921 input->flow_type = 922 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 923 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 924 input->flow_type = 925 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 926 927 if (hdr->field_selector) { 928 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 929 input->ip.v4.src_port = sctph->source; 930 input->ip.v4.dst_port = sctph->dest; 931 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 932 input->ip.v6.src_port = sctph->source; 933 input->ip.v6.dst_port = sctph->dest; 934 } 935 } 936 break; 937 case VIRTCHNL_PROTO_HDR_L2TPV3: 938 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 939 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 940 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 941 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 942 943 if (hdr->field_selector) 944 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 945 break; 946 case VIRTCHNL_PROTO_HDR_ESP: 947 esph = (struct ip_esp_hdr *)hdr->buffer; 948 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 949 l4 == VIRTCHNL_PROTO_HDR_UDP) 950 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 951 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 952 l4 == VIRTCHNL_PROTO_HDR_UDP) 953 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 954 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 955 l4 == VIRTCHNL_PROTO_HDR_NONE) 956 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 957 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 958 l4 == VIRTCHNL_PROTO_HDR_NONE) 959 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 960 961 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 962 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 963 else 964 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 965 966 if (hdr->field_selector) { 967 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 968 input->ip.v4.sec_parm_idx = esph->spi; 969 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 970 input->ip.v6.sec_parm_idx = esph->spi; 971 } 972 break; 973 case VIRTCHNL_PROTO_HDR_AH: 974 ah = (struct ip_auth_hdr *)hdr->buffer; 975 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 976 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 977 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 978 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 979 980 if (hdr->field_selector) { 981 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 982 input->ip.v4.sec_parm_idx = ah->spi; 983 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 984 input->ip.v6.sec_parm_idx = ah->spi; 985 } 986 break; 987 case VIRTCHNL_PROTO_HDR_PFCP: 988 rawh = (u8 *)hdr->buffer; 989 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 990 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 991 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 992 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 993 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 994 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 995 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 996 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 997 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 998 999 if (hdr->field_selector) { 1000 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1001 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 1002 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1003 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 1004 } 1005 break; 1006 case VIRTCHNL_PROTO_HDR_GTPU_IP: 1007 rawh = (u8 *)hdr->buffer; 1008 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 1009 1010 if (hdr->field_selector) 1011 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 1012 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 1013 break; 1014 case VIRTCHNL_PROTO_HDR_GTPU_EH: 1015 rawh = (u8 *)hdr->buffer; 1016 1017 if (hdr->field_selector) 1018 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 1019 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 1020 break; 1021 default: 1022 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 1023 hdr->type, vf->vf_id); 1024 return -EINVAL; 1025 } 1026 } 1027 1028 return 0; 1029 } 1030 1031 /** 1032 * ice_vc_fdir_parse_action 1033 * @vf: pointer to the VF info 1034 * @fltr: virtual channel add cmd buffer 1035 * @conf: FDIR configuration for each filter 1036 * 1037 * Parse the virtual channel filter's action and store them into conf 1038 * 1039 * Return: 0 on success, and other on error. 1040 */ 1041 static int 1042 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1043 struct virtchnl_fdir_fltr_conf *conf) 1044 { 1045 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 1046 struct device *dev = ice_pf_to_dev(vf->pf); 1047 struct ice_fdir_fltr *input = &conf->input; 1048 u32 dest_num = 0; 1049 u32 mark_num = 0; 1050 int i; 1051 1052 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 1053 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 1054 as->count, vf->vf_id); 1055 return -EINVAL; 1056 } 1057 1058 for (i = 0; i < as->count; i++) { 1059 struct virtchnl_filter_action *action = &as->actions[i]; 1060 1061 switch (action->type) { 1062 case VIRTCHNL_ACTION_PASSTHRU: 1063 dest_num++; 1064 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 1065 break; 1066 case VIRTCHNL_ACTION_DROP: 1067 dest_num++; 1068 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1069 break; 1070 case VIRTCHNL_ACTION_QUEUE: 1071 dest_num++; 1072 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1073 input->q_index = action->act_conf.queue.index; 1074 break; 1075 case VIRTCHNL_ACTION_Q_REGION: 1076 dest_num++; 1077 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 1078 input->q_index = action->act_conf.queue.index; 1079 input->q_region = action->act_conf.queue.region; 1080 break; 1081 case VIRTCHNL_ACTION_MARK: 1082 mark_num++; 1083 input->fltr_id = action->act_conf.mark_id; 1084 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1085 break; 1086 default: 1087 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1088 action->type, vf->vf_id); 1089 return -EINVAL; 1090 } 1091 } 1092 1093 if (dest_num == 0 || dest_num >= 2) { 1094 dev_dbg(dev, "Invalid destination action for VF %d\n", 1095 vf->vf_id); 1096 return -EINVAL; 1097 } 1098 1099 if (mark_num >= 2) { 1100 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1101 return -EINVAL; 1102 } 1103 1104 return 0; 1105 } 1106 1107 /** 1108 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1109 * @vf: pointer to the VF info 1110 * @fltr: virtual channel add cmd buffer 1111 * @conf: FDIR configuration for each filter 1112 * 1113 * Return: 0 on success, and other on error. 1114 */ 1115 static int 1116 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1117 struct virtchnl_fdir_fltr_conf *conf) 1118 { 1119 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1120 int ret; 1121 1122 if (!ice_vc_validate_pattern(vf, proto)) 1123 return -EINVAL; 1124 1125 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1126 if (ret) 1127 return ret; 1128 1129 return ice_vc_fdir_parse_action(vf, fltr, conf); 1130 } 1131 1132 /** 1133 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1134 * @conf_a: FDIR configuration for filter a 1135 * @conf_b: FDIR configuration for filter b 1136 * 1137 * Return: 0 on success, and other on error. 1138 */ 1139 static bool 1140 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1141 struct virtchnl_fdir_fltr_conf *conf_b) 1142 { 1143 struct ice_fdir_fltr *a = &conf_a->input; 1144 struct ice_fdir_fltr *b = &conf_b->input; 1145 1146 if (conf_a->ttype != conf_b->ttype) 1147 return false; 1148 if (a->flow_type != b->flow_type) 1149 return false; 1150 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1151 return false; 1152 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1153 return false; 1154 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1155 return false; 1156 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1157 return false; 1158 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1159 return false; 1160 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1161 return false; 1162 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1163 return false; 1164 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1165 return false; 1166 1167 return true; 1168 } 1169 1170 /** 1171 * ice_vc_fdir_is_dup_fltr 1172 * @vf: pointer to the VF info 1173 * @conf: FDIR configuration for each filter 1174 * 1175 * Check if there is duplicated rule with same conf value 1176 * 1177 * Return: 0 true success, and false on error. 1178 */ 1179 static bool 1180 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1181 { 1182 struct ice_fdir_fltr *desc; 1183 bool ret; 1184 1185 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1186 struct virtchnl_fdir_fltr_conf *node = 1187 to_fltr_conf_from_desc(desc); 1188 1189 ret = ice_vc_fdir_comp_rules(node, conf); 1190 if (ret) 1191 return true; 1192 } 1193 1194 return false; 1195 } 1196 1197 /** 1198 * ice_vc_fdir_insert_entry 1199 * @vf: pointer to the VF info 1200 * @conf: FDIR configuration for each filter 1201 * @id: pointer to ID value allocated by driver 1202 * 1203 * Insert FDIR conf entry into list and allocate ID for this filter 1204 * 1205 * Return: 0 true success, and other on error. 1206 */ 1207 static int 1208 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1209 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1210 { 1211 struct ice_fdir_fltr *input = &conf->input; 1212 int i; 1213 1214 /* alloc ID corresponding with conf */ 1215 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1216 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1217 if (i < 0) 1218 return -EINVAL; 1219 *id = i; 1220 1221 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1222 return 0; 1223 } 1224 1225 /** 1226 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1227 * @vf: pointer to the VF info 1228 * @conf: FDIR configuration for each filter 1229 * @id: filter rule's ID 1230 */ 1231 static void 1232 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1233 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1234 { 1235 struct ice_fdir_fltr *input = &conf->input; 1236 1237 idr_remove(&vf->fdir.fdir_rule_idr, id); 1238 list_del(&input->fltr_node); 1239 } 1240 1241 /** 1242 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1243 * @vf: pointer to the VF info 1244 * @id: filter rule's ID 1245 * 1246 * Return: NULL on error, and other on success. 1247 */ 1248 static struct virtchnl_fdir_fltr_conf * 1249 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1250 { 1251 return idr_find(&vf->fdir.fdir_rule_idr, id); 1252 } 1253 1254 /** 1255 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1256 * @vf: pointer to the VF info 1257 */ 1258 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1259 { 1260 struct virtchnl_fdir_fltr_conf *conf; 1261 struct ice_fdir_fltr *desc, *temp; 1262 1263 list_for_each_entry_safe(desc, temp, 1264 &vf->fdir.fdir_rule_list, fltr_node) { 1265 conf = to_fltr_conf_from_desc(desc); 1266 list_del(&desc->fltr_node); 1267 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1268 } 1269 } 1270 1271 /** 1272 * ice_vc_fdir_write_fltr - write filter rule into hardware 1273 * @vf: pointer to the VF info 1274 * @conf: FDIR configuration for each filter 1275 * @add: true implies add rule, false implies del rules 1276 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1277 * 1278 * Return: 0 on success, and other on error. 1279 */ 1280 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1281 struct virtchnl_fdir_fltr_conf *conf, 1282 bool add, bool is_tun) 1283 { 1284 struct ice_fdir_fltr *input = &conf->input; 1285 struct ice_vsi *vsi, *ctrl_vsi; 1286 struct ice_fltr_desc desc; 1287 struct device *dev; 1288 struct ice_pf *pf; 1289 struct ice_hw *hw; 1290 int ret; 1291 u8 *pkt; 1292 1293 pf = vf->pf; 1294 dev = ice_pf_to_dev(pf); 1295 hw = &pf->hw; 1296 vsi = ice_get_vf_vsi(vf); 1297 if (!vsi) { 1298 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1299 return -EINVAL; 1300 } 1301 1302 input->dest_vsi = vsi->idx; 1303 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1304 1305 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1306 if (!ctrl_vsi) { 1307 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1308 return -EINVAL; 1309 } 1310 1311 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1312 if (!pkt) 1313 return -ENOMEM; 1314 1315 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1316 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1317 if (ret) { 1318 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1319 vf->vf_id, input->flow_type); 1320 goto err_free_pkt; 1321 } 1322 1323 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1324 if (ret) 1325 goto err_free_pkt; 1326 1327 return 0; 1328 1329 err_free_pkt: 1330 devm_kfree(dev, pkt); 1331 return ret; 1332 } 1333 1334 /** 1335 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1336 * @t: pointer to timer_list 1337 */ 1338 static void ice_vf_fdir_timer(struct timer_list *t) 1339 { 1340 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1341 struct ice_vf_fdir_ctx *ctx_done; 1342 struct ice_vf_fdir *fdir; 1343 unsigned long flags; 1344 struct ice_vf *vf; 1345 struct ice_pf *pf; 1346 1347 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1348 vf = container_of(fdir, struct ice_vf, fdir); 1349 ctx_done = &fdir->ctx_done; 1350 pf = vf->pf; 1351 spin_lock_irqsave(&fdir->ctx_lock, flags); 1352 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1353 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1354 WARN_ON_ONCE(1); 1355 return; 1356 } 1357 1358 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1359 1360 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1361 ctx_done->conf = ctx_irq->conf; 1362 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1363 ctx_done->v_opcode = ctx_irq->v_opcode; 1364 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1365 1366 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1367 ice_service_task_schedule(pf); 1368 } 1369 1370 /** 1371 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1372 * @ctrl_vsi: pointer to a VF's CTRL VSI 1373 * @rx_desc: pointer to FDIR Rx queue descriptor 1374 */ 1375 void 1376 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1377 union ice_32b_rx_flex_desc *rx_desc) 1378 { 1379 struct ice_pf *pf = ctrl_vsi->back; 1380 struct ice_vf *vf = ctrl_vsi->vf; 1381 struct ice_vf_fdir_ctx *ctx_done; 1382 struct ice_vf_fdir_ctx *ctx_irq; 1383 struct ice_vf_fdir *fdir; 1384 unsigned long flags; 1385 struct device *dev; 1386 int ret; 1387 1388 if (WARN_ON(!vf)) 1389 return; 1390 1391 fdir = &vf->fdir; 1392 ctx_done = &fdir->ctx_done; 1393 ctx_irq = &fdir->ctx_irq; 1394 dev = ice_pf_to_dev(pf); 1395 spin_lock_irqsave(&fdir->ctx_lock, flags); 1396 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1397 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1398 WARN_ON_ONCE(1); 1399 return; 1400 } 1401 1402 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1403 1404 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1405 ctx_done->conf = ctx_irq->conf; 1406 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1407 ctx_done->v_opcode = ctx_irq->v_opcode; 1408 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1409 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1410 1411 ret = del_timer(&ctx_irq->rx_tmr); 1412 if (!ret) 1413 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1414 1415 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1416 ice_service_task_schedule(pf); 1417 } 1418 1419 /** 1420 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1421 * @vf: pointer to the VF info 1422 */ 1423 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1424 { 1425 u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b; 1426 struct ice_vsi *vf_vsi; 1427 struct device *dev; 1428 struct ice_pf *pf; 1429 struct ice_hw *hw; 1430 u16 vsi_num; 1431 1432 pf = vf->pf; 1433 hw = &pf->hw; 1434 dev = ice_pf_to_dev(pf); 1435 vf_vsi = ice_get_vf_vsi(vf); 1436 if (!vf_vsi) { 1437 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); 1438 return; 1439 } 1440 1441 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1442 1443 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1444 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1445 switch (hw->mac_type) { 1446 case ICE_MAC_E830: 1447 fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size); 1448 fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size); 1449 fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); 1450 fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); 1451 break; 1452 case ICE_MAC_E810: 1453 default: 1454 fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size); 1455 fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size); 1456 fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); 1457 fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); 1458 } 1459 1460 dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n", 1461 vf->vf_id, fd_size_g, fd_size_b); 1462 dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n", 1463 vf->vf_id, fd_cnt_g, fd_cnt_b); 1464 } 1465 1466 /** 1467 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1468 * @vf: pointer to the VF info 1469 * @ctx: FDIR context info for post processing 1470 * @status: virtchnl FDIR program status 1471 * 1472 * Return: 0 on success, and other on error. 1473 */ 1474 static int 1475 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1476 enum virtchnl_fdir_prgm_status *status) 1477 { 1478 struct device *dev = ice_pf_to_dev(vf->pf); 1479 u32 stat_err, error, prog_id; 1480 int ret; 1481 1482 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1483 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> 1484 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { 1485 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1486 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1487 ret = -EINVAL; 1488 goto err_exit; 1489 } 1490 1491 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> 1492 ICE_FXD_FLTR_WB_QW1_PROG_ID_S; 1493 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1494 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1495 dev_err(dev, "VF %d: Desc show add, but ctx not", 1496 vf->vf_id); 1497 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1498 ret = -EINVAL; 1499 goto err_exit; 1500 } 1501 1502 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1503 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1504 dev_err(dev, "VF %d: Desc show del, but ctx not", 1505 vf->vf_id); 1506 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1507 ret = -EINVAL; 1508 goto err_exit; 1509 } 1510 1511 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> 1512 ICE_FXD_FLTR_WB_QW1_FAIL_S; 1513 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1514 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1515 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1516 vf->vf_id); 1517 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1518 } else { 1519 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1520 vf->vf_id); 1521 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1522 } 1523 ret = -EINVAL; 1524 goto err_exit; 1525 } 1526 1527 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> 1528 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; 1529 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1530 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1531 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1532 ret = -EINVAL; 1533 goto err_exit; 1534 } 1535 1536 *status = VIRTCHNL_FDIR_SUCCESS; 1537 1538 return 0; 1539 1540 err_exit: 1541 ice_vf_fdir_dump_info(vf); 1542 return ret; 1543 } 1544 1545 /** 1546 * ice_vc_add_fdir_fltr_post 1547 * @vf: pointer to the VF structure 1548 * @ctx: FDIR context info for post processing 1549 * @status: virtchnl FDIR program status 1550 * @success: true implies success, false implies failure 1551 * 1552 * Post process for flow director add command. If success, then do post process 1553 * and send back success msg by virtchnl. Otherwise, do context reversion and 1554 * send back failure msg by virtchnl. 1555 * 1556 * Return: 0 on success, and other on error. 1557 */ 1558 static int 1559 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1560 enum virtchnl_fdir_prgm_status status, 1561 bool success) 1562 { 1563 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1564 struct device *dev = ice_pf_to_dev(vf->pf); 1565 enum virtchnl_status_code v_ret; 1566 struct virtchnl_fdir_add *resp; 1567 int ret, len, is_tun; 1568 1569 v_ret = VIRTCHNL_STATUS_SUCCESS; 1570 len = sizeof(*resp); 1571 resp = kzalloc(len, GFP_KERNEL); 1572 if (!resp) { 1573 len = 0; 1574 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1575 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1576 goto err_exit; 1577 } 1578 1579 if (!success) 1580 goto err_exit; 1581 1582 is_tun = 0; 1583 resp->status = status; 1584 resp->flow_id = conf->flow_id; 1585 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1586 1587 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1588 (u8 *)resp, len); 1589 kfree(resp); 1590 1591 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1592 vf->vf_id, conf->flow_id, 1593 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1594 "add" : "del"); 1595 return ret; 1596 1597 err_exit: 1598 if (resp) 1599 resp->status = status; 1600 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1601 devm_kfree(dev, conf); 1602 1603 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1604 (u8 *)resp, len); 1605 kfree(resp); 1606 return ret; 1607 } 1608 1609 /** 1610 * ice_vc_del_fdir_fltr_post 1611 * @vf: pointer to the VF structure 1612 * @ctx: FDIR context info for post processing 1613 * @status: virtchnl FDIR program status 1614 * @success: true implies success, false implies failure 1615 * 1616 * Post process for flow director del command. If success, then do post process 1617 * and send back success msg by virtchnl. Otherwise, do context reversion and 1618 * send back failure msg by virtchnl. 1619 * 1620 * Return: 0 on success, and other on error. 1621 */ 1622 static int 1623 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1624 enum virtchnl_fdir_prgm_status status, 1625 bool success) 1626 { 1627 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1628 struct device *dev = ice_pf_to_dev(vf->pf); 1629 enum virtchnl_status_code v_ret; 1630 struct virtchnl_fdir_del *resp; 1631 int ret, len, is_tun; 1632 1633 v_ret = VIRTCHNL_STATUS_SUCCESS; 1634 len = sizeof(*resp); 1635 resp = kzalloc(len, GFP_KERNEL); 1636 if (!resp) { 1637 len = 0; 1638 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1639 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1640 goto err_exit; 1641 } 1642 1643 if (!success) 1644 goto err_exit; 1645 1646 is_tun = 0; 1647 resp->status = status; 1648 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1649 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1650 1651 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1652 (u8 *)resp, len); 1653 kfree(resp); 1654 1655 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1656 vf->vf_id, conf->flow_id, 1657 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1658 "add" : "del"); 1659 devm_kfree(dev, conf); 1660 return ret; 1661 1662 err_exit: 1663 if (resp) 1664 resp->status = status; 1665 if (success) 1666 devm_kfree(dev, conf); 1667 1668 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1669 (u8 *)resp, len); 1670 kfree(resp); 1671 return ret; 1672 } 1673 1674 /** 1675 * ice_flush_fdir_ctx 1676 * @pf: pointer to the PF structure 1677 * 1678 * Flush all the pending event on ctx_done list and process them. 1679 */ 1680 void ice_flush_fdir_ctx(struct ice_pf *pf) 1681 { 1682 struct ice_vf *vf; 1683 unsigned int bkt; 1684 1685 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1686 return; 1687 1688 mutex_lock(&pf->vfs.table_lock); 1689 ice_for_each_vf(pf, bkt, vf) { 1690 struct device *dev = ice_pf_to_dev(pf); 1691 enum virtchnl_fdir_prgm_status status; 1692 struct ice_vf_fdir_ctx *ctx; 1693 unsigned long flags; 1694 int ret; 1695 1696 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1697 continue; 1698 1699 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1700 continue; 1701 1702 ctx = &vf->fdir.ctx_done; 1703 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1704 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1705 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1706 continue; 1707 } 1708 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1709 1710 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1711 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1712 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1713 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1714 vf->vf_id); 1715 goto err_exit; 1716 } 1717 1718 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1719 if (ret) 1720 goto err_exit; 1721 1722 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1723 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1724 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1725 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1726 else 1727 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1728 1729 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1730 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1731 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1732 continue; 1733 err_exit: 1734 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1735 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1736 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1737 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1738 else 1739 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1740 1741 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1742 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1743 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1744 } 1745 mutex_unlock(&pf->vfs.table_lock); 1746 } 1747 1748 /** 1749 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1750 * @vf: pointer to the VF structure 1751 * @conf: FDIR configuration for each filter 1752 * @v_opcode: virtual channel operation code 1753 * 1754 * Return: 0 on success, and other on error. 1755 */ 1756 static int 1757 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1758 enum virtchnl_ops v_opcode) 1759 { 1760 struct device *dev = ice_pf_to_dev(vf->pf); 1761 struct ice_vf_fdir_ctx *ctx; 1762 unsigned long flags; 1763 1764 ctx = &vf->fdir.ctx_irq; 1765 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1766 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1767 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1768 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1769 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1770 vf->vf_id); 1771 return -EBUSY; 1772 } 1773 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1774 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1775 1776 ctx->conf = conf; 1777 ctx->v_opcode = v_opcode; 1778 ctx->stat = ICE_FDIR_CTX_READY; 1779 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1780 1781 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1782 1783 return 0; 1784 } 1785 1786 /** 1787 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1788 * @vf: pointer to the VF structure 1789 * 1790 * Return: 0 on success, and other on error. 1791 */ 1792 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1793 { 1794 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1795 unsigned long flags; 1796 1797 del_timer(&ctx->rx_tmr); 1798 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1799 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1800 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1801 } 1802 1803 /** 1804 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1805 * @vf: pointer to the VF info 1806 * @msg: pointer to the msg buffer 1807 * 1808 * Return: 0 on success, and other on error. 1809 */ 1810 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 1811 { 1812 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 1813 struct virtchnl_fdir_add *stat = NULL; 1814 struct virtchnl_fdir_fltr_conf *conf; 1815 enum virtchnl_status_code v_ret; 1816 struct device *dev; 1817 struct ice_pf *pf; 1818 int is_tun = 0; 1819 int len = 0; 1820 int ret; 1821 1822 pf = vf->pf; 1823 dev = ice_pf_to_dev(pf); 1824 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1825 if (ret) { 1826 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1827 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1828 goto err_exit; 1829 } 1830 1831 ret = ice_vf_start_ctrl_vsi(vf); 1832 if (ret && (ret != -EEXIST)) { 1833 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1834 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 1835 vf->vf_id, ret); 1836 goto err_exit; 1837 } 1838 1839 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1840 if (!stat) { 1841 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1842 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1843 goto err_exit; 1844 } 1845 1846 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 1847 if (!conf) { 1848 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1849 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 1850 goto err_exit; 1851 } 1852 1853 len = sizeof(*stat); 1854 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 1855 if (ret) { 1856 v_ret = VIRTCHNL_STATUS_SUCCESS; 1857 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1858 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 1859 goto err_free_conf; 1860 } 1861 1862 if (fltr->validate_only) { 1863 v_ret = VIRTCHNL_STATUS_SUCCESS; 1864 stat->status = VIRTCHNL_FDIR_SUCCESS; 1865 devm_kfree(dev, conf); 1866 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 1867 v_ret, (u8 *)stat, len); 1868 goto exit; 1869 } 1870 1871 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 1872 if (ret) { 1873 v_ret = VIRTCHNL_STATUS_SUCCESS; 1874 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 1875 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 1876 vf->vf_id, ret); 1877 goto err_free_conf; 1878 } 1879 1880 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 1881 if (ret) { 1882 v_ret = VIRTCHNL_STATUS_SUCCESS; 1883 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 1884 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 1885 vf->vf_id); 1886 goto err_free_conf; 1887 } 1888 1889 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 1890 if (ret) { 1891 v_ret = VIRTCHNL_STATUS_SUCCESS; 1892 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1893 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 1894 goto err_free_conf; 1895 } 1896 1897 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 1898 if (ret) { 1899 v_ret = VIRTCHNL_STATUS_SUCCESS; 1900 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1901 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1902 goto err_rem_entry; 1903 } 1904 1905 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 1906 if (ret) { 1907 v_ret = VIRTCHNL_STATUS_SUCCESS; 1908 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1909 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1910 vf->vf_id, ret); 1911 goto err_clr_irq; 1912 } 1913 1914 exit: 1915 kfree(stat); 1916 return ret; 1917 1918 err_clr_irq: 1919 ice_vc_fdir_clear_irq_ctx(vf); 1920 err_rem_entry: 1921 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1922 err_free_conf: 1923 devm_kfree(dev, conf); 1924 err_exit: 1925 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 1926 (u8 *)stat, len); 1927 kfree(stat); 1928 return ret; 1929 } 1930 1931 /** 1932 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 1933 * @vf: pointer to the VF info 1934 * @msg: pointer to the msg buffer 1935 * 1936 * Return: 0 on success, and other on error. 1937 */ 1938 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 1939 { 1940 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 1941 struct virtchnl_fdir_del *stat = NULL; 1942 struct virtchnl_fdir_fltr_conf *conf; 1943 enum virtchnl_status_code v_ret; 1944 struct device *dev; 1945 struct ice_pf *pf; 1946 int is_tun = 0; 1947 int len = 0; 1948 int ret; 1949 1950 pf = vf->pf; 1951 dev = ice_pf_to_dev(pf); 1952 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1953 if (ret) { 1954 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1955 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1956 goto err_exit; 1957 } 1958 1959 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1960 if (!stat) { 1961 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1962 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1963 goto err_exit; 1964 } 1965 1966 len = sizeof(*stat); 1967 1968 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 1969 if (!conf) { 1970 v_ret = VIRTCHNL_STATUS_SUCCESS; 1971 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1972 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 1973 vf->vf_id, fltr->flow_id); 1974 goto err_exit; 1975 } 1976 1977 /* Just return failure when ctrl_vsi idx is invalid */ 1978 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 1979 v_ret = VIRTCHNL_STATUS_SUCCESS; 1980 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1981 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 1982 goto err_exit; 1983 } 1984 1985 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 1986 if (ret) { 1987 v_ret = VIRTCHNL_STATUS_SUCCESS; 1988 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1989 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1990 goto err_exit; 1991 } 1992 1993 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 1994 if (ret) { 1995 v_ret = VIRTCHNL_STATUS_SUCCESS; 1996 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1997 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1998 vf->vf_id, ret); 1999 goto err_del_tmr; 2000 } 2001 2002 kfree(stat); 2003 2004 return ret; 2005 2006 err_del_tmr: 2007 ice_vc_fdir_clear_irq_ctx(vf); 2008 err_exit: 2009 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 2010 (u8 *)stat, len); 2011 kfree(stat); 2012 return ret; 2013 } 2014 2015 /** 2016 * ice_vf_fdir_init - init FDIR resource for VF 2017 * @vf: pointer to the VF info 2018 */ 2019 void ice_vf_fdir_init(struct ice_vf *vf) 2020 { 2021 struct ice_vf_fdir *fdir = &vf->fdir; 2022 2023 idr_init(&fdir->fdir_rule_idr); 2024 INIT_LIST_HEAD(&fdir->fdir_rule_list); 2025 2026 spin_lock_init(&fdir->ctx_lock); 2027 fdir->ctx_irq.flags = 0; 2028 fdir->ctx_done.flags = 0; 2029 ice_vc_fdir_reset_cnt_all(fdir); 2030 } 2031 2032 /** 2033 * ice_vf_fdir_exit - destroy FDIR resource for VF 2034 * @vf: pointer to the VF info 2035 */ 2036 void ice_vf_fdir_exit(struct ice_vf *vf) 2037 { 2038 ice_vc_fdir_flush_entry(vf); 2039 idr_destroy(&vf->fdir.fdir_rule_idr); 2040 ice_vc_fdir_rem_prof_all(vf); 2041 ice_vc_fdir_free_prof_all(vf); 2042 } 2043