1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2018-2023, Intel Corporation. */ 3 4 /* flow director ethtool support for ice */ 5 6 #include "ice.h" 7 #include "ice_lib.h" 8 #include "ice_fdir.h" 9 #include "ice_flow.h" 10 11 static struct in6_addr full_ipv6_addr_mask = { 12 .in6_u = { 13 .u6_addr8 = { 14 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 15 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 16 } 17 } 18 }; 19 20 static struct in6_addr zero_ipv6_addr_mask = { 21 .in6_u = { 22 .u6_addr8 = { 23 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 24 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 25 } 26 } 27 }; 28 29 /* calls to ice_flow_add_prof require the number of segments in the array 30 * for segs_cnt. In this code that is one more than the index. 31 */ 32 #define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1) 33 34 /** 35 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool 36 * flow type values 37 * @flow: filter type to be converted 38 * 39 * Returns the corresponding ethtool flow type. 40 */ 41 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) 42 { 43 switch (flow) { 44 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 45 return TCP_V4_FLOW; 46 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 47 return UDP_V4_FLOW; 48 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 49 return SCTP_V4_FLOW; 50 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 51 return IPV4_USER_FLOW; 52 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 53 return TCP_V6_FLOW; 54 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 55 return UDP_V6_FLOW; 56 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 57 return SCTP_V6_FLOW; 58 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 59 return IPV6_USER_FLOW; 60 default: 61 /* 0 is undefined ethtool flow */ 62 return 0; 63 } 64 } 65 66 /** 67 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum 68 * @eth: Ethtool flow type to be converted 69 * 70 * Returns flow enum 71 */ 72 static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) 73 { 74 switch (eth) { 75 case TCP_V4_FLOW: 76 return ICE_FLTR_PTYPE_NONF_IPV4_TCP; 77 case UDP_V4_FLOW: 78 return ICE_FLTR_PTYPE_NONF_IPV4_UDP; 79 case SCTP_V4_FLOW: 80 return ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 81 case IPV4_USER_FLOW: 82 return ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 83 case TCP_V6_FLOW: 84 return ICE_FLTR_PTYPE_NONF_IPV6_TCP; 85 case UDP_V6_FLOW: 86 return ICE_FLTR_PTYPE_NONF_IPV6_UDP; 87 case SCTP_V6_FLOW: 88 return ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 89 case IPV6_USER_FLOW: 90 return ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 91 default: 92 return ICE_FLTR_PTYPE_NONF_NONE; 93 } 94 } 95 96 /** 97 * ice_is_mask_valid - check mask field set 98 * @mask: full mask to check 99 * @field: field for which mask should be valid 100 * 101 * If the mask is fully set return true. If it is not valid for field return 102 * false. 103 */ 104 static bool ice_is_mask_valid(u64 mask, u64 field) 105 { 106 return (mask & field) == field; 107 } 108 109 /** 110 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data 111 * @hw: hardware structure that contains filter list 112 * @cmd: ethtool command data structure to receive the filter data 113 * 114 * Returns 0 on success and -EINVAL on failure 115 */ 116 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) 117 { 118 struct ethtool_rx_flow_spec *fsp; 119 struct ice_fdir_fltr *rule; 120 int ret = 0; 121 u16 idx; 122 123 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 124 125 mutex_lock(&hw->fdir_fltr_lock); 126 127 rule = ice_fdir_find_fltr_by_idx(hw, fsp->location); 128 129 if (!rule || fsp->location != rule->fltr_id) { 130 ret = -EINVAL; 131 goto release_lock; 132 } 133 134 fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type); 135 136 memset(&fsp->m_u, 0, sizeof(fsp->m_u)); 137 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); 138 139 switch (fsp->flow_type) { 140 case IPV4_USER_FLOW: 141 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 142 fsp->h_u.usr_ip4_spec.proto = 0; 143 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header; 144 fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos; 145 fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip; 146 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 147 fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip; 148 fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 149 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; 150 fsp->m_u.usr_ip4_spec.proto = 0; 151 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header; 152 fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos; 153 break; 154 case TCP_V4_FLOW: 155 case UDP_V4_FLOW: 156 case SCTP_V4_FLOW: 157 fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port; 158 fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port; 159 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip; 160 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip; 161 fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port; 162 fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port; 163 fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip; 164 fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip; 165 break; 166 case IPV6_USER_FLOW: 167 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header; 168 fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc; 169 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto; 170 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 171 sizeof(struct in6_addr)); 172 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 173 sizeof(struct in6_addr)); 174 memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip, 175 sizeof(struct in6_addr)); 176 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip, 177 sizeof(struct in6_addr)); 178 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header; 179 fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc; 180 fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto; 181 break; 182 case TCP_V6_FLOW: 183 case UDP_V6_FLOW: 184 case SCTP_V6_FLOW: 185 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, 186 sizeof(struct in6_addr)); 187 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, 188 sizeof(struct in6_addr)); 189 fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port; 190 fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port; 191 memcpy(fsp->m_u.tcp_ip6_spec.ip6src, 192 rule->mask.v6.src_ip, 193 sizeof(struct in6_addr)); 194 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, 195 rule->mask.v6.dst_ip, 196 sizeof(struct in6_addr)); 197 fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port; 198 fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port; 199 fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc; 200 fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc; 201 break; 202 default: 203 break; 204 } 205 206 if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) 207 fsp->ring_cookie = RX_CLS_FLOW_DISC; 208 else 209 fsp->ring_cookie = rule->orig_q_index; 210 211 idx = ice_ethtool_flow_to_fltr(fsp->flow_type); 212 if (idx == ICE_FLTR_PTYPE_NONF_NONE) { 213 dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n", 214 rule->flow_type); 215 ret = -EINVAL; 216 } 217 218 release_lock: 219 mutex_unlock(&hw->fdir_fltr_lock); 220 return ret; 221 } 222 223 /** 224 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters 225 * @hw: hardware structure containing the filter list 226 * @cmd: ethtool command data structure 227 * @rule_locs: ethtool array passed in from OS to receive filter IDs 228 * 229 * Returns 0 as expected for success by ethtool 230 */ 231 int 232 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 233 u32 *rule_locs) 234 { 235 struct ice_fdir_fltr *f_rule; 236 unsigned int cnt = 0; 237 int val = 0; 238 239 /* report total rule count */ 240 cmd->data = ice_get_fdir_cnt_all(hw); 241 242 mutex_lock(&hw->fdir_fltr_lock); 243 244 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 245 if (cnt == cmd->rule_cnt) { 246 val = -EMSGSIZE; 247 goto release_lock; 248 } 249 rule_locs[cnt] = f_rule->fltr_id; 250 cnt++; 251 } 252 253 release_lock: 254 mutex_unlock(&hw->fdir_fltr_lock); 255 if (!val) 256 cmd->rule_cnt = cnt; 257 return val; 258 } 259 260 /** 261 * ice_fdir_remap_entries - update the FDir entries in profile 262 * @prof: FDir structure pointer 263 * @tun: tunneled or non-tunneled packet 264 * @idx: FDir entry index 265 */ 266 static void 267 ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx) 268 { 269 if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) { 270 int i; 271 272 for (i = idx; i < (prof->cnt - 1); i++) { 273 u64 old_entry_h; 274 275 old_entry_h = prof->entry_h[i + 1][tun]; 276 prof->entry_h[i][tun] = old_entry_h; 277 prof->vsi_h[i] = prof->vsi_h[i + 1]; 278 } 279 280 prof->entry_h[i][tun] = 0; 281 prof->vsi_h[i] = 0; 282 } 283 } 284 285 /** 286 * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules 287 * @hw: hardware structure containing filter list 288 * @vsi_idx: VSI handle 289 */ 290 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) 291 { 292 int status, flow; 293 294 if (!hw->fdir_prof) 295 return; 296 297 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 298 struct ice_fd_hw_prof *prof = hw->fdir_prof[flow]; 299 int tun, i; 300 301 if (!prof || !prof->cnt) 302 continue; 303 304 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 305 u64 prof_id = prof->prof_id[tun]; 306 307 for (i = 0; i < prof->cnt; i++) { 308 if (prof->vsi_h[i] != vsi_idx) 309 continue; 310 311 prof->entry_h[i][tun] = 0; 312 prof->vsi_h[i] = 0; 313 break; 314 } 315 316 /* after clearing FDir entries update the remaining */ 317 ice_fdir_remap_entries(prof, tun, i); 318 319 /* find flow profile corresponding to prof_id and clear 320 * vsi_idx from bitmap. 321 */ 322 status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id); 323 if (status) { 324 dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n", 325 status); 326 } 327 } 328 prof->cnt--; 329 } 330 } 331 332 /** 333 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow 334 * @hw: hardware structure containing the filter list 335 * @blk: hardware block 336 * @flow: FDir flow type to release 337 */ 338 static struct ice_fd_hw_prof * 339 ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow) 340 { 341 if (blk == ICE_BLK_FD && hw->fdir_prof) 342 return hw->fdir_prof[flow]; 343 344 return NULL; 345 } 346 347 /** 348 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables 349 * @hw: hardware structure containing the filter list 350 * @blk: hardware block 351 * @flow: FDir flow type to release 352 */ 353 static void 354 ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) 355 { 356 struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow); 357 int tun; 358 359 if (!prof) 360 return; 361 362 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 363 u64 prof_id = prof->prof_id[tun]; 364 int j; 365 366 for (j = 0; j < prof->cnt; j++) { 367 u16 vsi_num; 368 369 if (!prof->entry_h[j][tun] || !prof->vsi_h[j]) 370 continue; 371 vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]); 372 ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id); 373 ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]); 374 prof->entry_h[j][tun] = 0; 375 } 376 ice_flow_rem_prof(hw, blk, prof_id); 377 } 378 } 379 380 /** 381 * ice_fdir_rem_flow - release the ice_flow structures for a filter type 382 * @hw: hardware structure containing the filter list 383 * @blk: hardware block 384 * @flow_type: FDir flow type to release 385 */ 386 static void 387 ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk, 388 enum ice_fltr_ptype flow_type) 389 { 390 int flow = (int)flow_type & ~FLOW_EXT; 391 struct ice_fd_hw_prof *prof; 392 int tun, i; 393 394 prof = ice_fdir_get_hw_prof(hw, blk, flow); 395 if (!prof) 396 return; 397 398 ice_fdir_erase_flow_from_hw(hw, blk, flow); 399 for (i = 0; i < prof->cnt; i++) 400 prof->vsi_h[i] = 0; 401 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 402 if (!prof->fdir_seg[tun]) 403 continue; 404 devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]); 405 prof->fdir_seg[tun] = NULL; 406 } 407 prof->cnt = 0; 408 } 409 410 /** 411 * ice_fdir_release_flows - release all flows in use for later replay 412 * @hw: pointer to HW instance 413 */ 414 void ice_fdir_release_flows(struct ice_hw *hw) 415 { 416 int flow; 417 418 /* release Flow Director HW table entries */ 419 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) 420 ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow); 421 } 422 423 /** 424 * ice_fdir_replay_flows - replay HW Flow Director filter info 425 * @hw: pointer to HW instance 426 */ 427 void ice_fdir_replay_flows(struct ice_hw *hw) 428 { 429 int flow; 430 431 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 432 int tun; 433 434 if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt) 435 continue; 436 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 437 struct ice_flow_prof *hw_prof; 438 struct ice_fd_hw_prof *prof; 439 int j; 440 441 prof = hw->fdir_prof[flow]; 442 ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, 443 prof->fdir_seg[tun], TNL_SEG_CNT(tun), 444 false, &hw_prof); 445 for (j = 0; j < prof->cnt; j++) { 446 enum ice_flow_priority prio; 447 u64 entry_h = 0; 448 int err; 449 450 prio = ICE_FLOW_PRIO_NORMAL; 451 err = ice_flow_add_entry(hw, ICE_BLK_FD, 452 hw_prof->id, 453 prof->vsi_h[0], 454 prof->vsi_h[j], 455 prio, prof->fdir_seg, 456 &entry_h); 457 if (err) { 458 dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n", 459 flow); 460 continue; 461 } 462 prof->prof_id[tun] = hw_prof->id; 463 prof->entry_h[j][tun] = entry_h; 464 } 465 } 466 } 467 } 468 469 /** 470 * ice_parse_rx_flow_user_data - deconstruct user-defined data 471 * @fsp: pointer to ethtool Rx flow specification 472 * @data: pointer to userdef data structure for storage 473 * 474 * Returns 0 on success, negative error value on failure 475 */ 476 static int 477 ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 478 struct ice_rx_flow_userdef *data) 479 { 480 u64 value, mask; 481 482 memset(data, 0, sizeof(*data)); 483 if (!(fsp->flow_type & FLOW_EXT)) 484 return 0; 485 486 value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data)); 487 mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data)); 488 if (!mask) 489 return 0; 490 491 #define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0) 492 #define ICE_USERDEF_FLEX_OFFS_S 16 493 #define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S) 494 #define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0) 495 496 /* 0x1fe is the maximum value for offsets stored in the internal 497 * filtering tables. 498 */ 499 #define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe 500 501 if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) || 502 value > ICE_USERDEF_FLEX_FLTR_M) 503 return -EINVAL; 504 505 data->flex_word = value & ICE_USERDEF_FLEX_WORD_M; 506 data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >> 507 ICE_USERDEF_FLEX_OFFS_S; 508 if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL) 509 return -EINVAL; 510 511 data->flex_fltr = true; 512 513 return 0; 514 } 515 516 /** 517 * ice_fdir_num_avail_fltr - return the number of unused flow director filters 518 * @hw: pointer to hardware structure 519 * @vsi: software VSI structure 520 * 521 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can 522 * use filters from either pool. The guaranteed pool is divided between VSIs. 523 * The best effort filter pool is common to all VSIs and is a device shared 524 * resource pool. The number of filters available to this VSI is the sum of 525 * the VSIs guaranteed filter pool and the global available best effort 526 * filter pool. 527 * 528 * Returns the number of available flow director filters to this VSI 529 */ 530 static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) 531 { 532 u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 533 u16 num_guar; 534 u16 num_be; 535 536 /* total guaranteed filters assigned to this VSI */ 537 num_guar = vsi->num_gfltr; 538 539 /* total global best effort filters */ 540 num_be = hw->func_caps.fd_fltr_best_effort; 541 542 /* Subtract the number of programmed filters from the global values */ 543 switch (hw->mac_type) { 544 case ICE_MAC_E830: 545 num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, 546 rd32(hw, VSIQF_FD_CNT(vsi_num))); 547 num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M, 548 rd32(hw, GLQF_FD_CNT)); 549 break; 550 case ICE_MAC_E810: 551 default: 552 num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, 553 rd32(hw, VSIQF_FD_CNT(vsi_num))); 554 num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M, 555 rd32(hw, GLQF_FD_CNT)); 556 } 557 558 return num_guar + num_be; 559 } 560 561 /** 562 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s) 563 * @hw: HW structure containing the FDir flow profile structure(s) 564 * @flow: flow type to allocate the flow profile for 565 * 566 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0 567 * on success and negative on error. 568 */ 569 static int 570 ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow) 571 { 572 if (!hw) 573 return -EINVAL; 574 575 if (!hw->fdir_prof) { 576 hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw), 577 ICE_FLTR_PTYPE_MAX, 578 sizeof(*hw->fdir_prof), 579 GFP_KERNEL); 580 if (!hw->fdir_prof) 581 return -ENOMEM; 582 } 583 584 if (!hw->fdir_prof[flow]) { 585 hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw), 586 sizeof(**hw->fdir_prof), 587 GFP_KERNEL); 588 if (!hw->fdir_prof[flow]) 589 return -ENOMEM; 590 } 591 592 return 0; 593 } 594 595 /** 596 * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure 597 * @prof: pointer to flow director HW profile 598 * @vsi_idx: vsi_idx to locate 599 * 600 * return the index of the vsi_idx. if vsi_idx is not found insert it 601 * into the vsi_h table. 602 */ 603 static u16 604 ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx) 605 { 606 u16 idx = 0; 607 608 for (idx = 0; idx < prof->cnt; idx++) 609 if (prof->vsi_h[idx] == vsi_idx) 610 return idx; 611 612 if (idx == prof->cnt) 613 prof->vsi_h[prof->cnt++] = vsi_idx; 614 return idx; 615 } 616 617 /** 618 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule 619 * @pf: pointer to the PF structure 620 * @seg: protocol header description pointer 621 * @flow: filter enum 622 * @tun: FDir segment to program 623 */ 624 static int 625 ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, 626 enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun) 627 { 628 struct device *dev = ice_pf_to_dev(pf); 629 struct ice_vsi *main_vsi, *ctrl_vsi; 630 struct ice_flow_seg_info *old_seg; 631 struct ice_flow_prof *prof = NULL; 632 struct ice_fd_hw_prof *hw_prof; 633 struct ice_hw *hw = &pf->hw; 634 u64 entry1_h = 0; 635 u64 entry2_h = 0; 636 bool del_last; 637 int err; 638 int idx; 639 640 main_vsi = ice_get_main_vsi(pf); 641 if (!main_vsi) 642 return -EINVAL; 643 644 ctrl_vsi = ice_get_ctrl_vsi(pf); 645 if (!ctrl_vsi) 646 return -EINVAL; 647 648 err = ice_fdir_alloc_flow_prof(hw, flow); 649 if (err) 650 return err; 651 652 hw_prof = hw->fdir_prof[flow]; 653 old_seg = hw_prof->fdir_seg[tun]; 654 if (old_seg) { 655 /* This flow_type already has a changed input set. 656 * If it matches the requested input set then we are 657 * done. Or, if it's different then it's an error. 658 */ 659 if (!memcmp(old_seg, seg, sizeof(*seg))) 660 return -EEXIST; 661 662 /* if there are FDir filters using this flow, 663 * then return error. 664 */ 665 if (hw->fdir_fltr_cnt[flow]) { 666 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 667 return -EINVAL; 668 } 669 670 if (ice_is_arfs_using_perfect_flow(hw, flow)) { 671 dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n", 672 flow); 673 return -EINVAL; 674 } 675 676 /* remove HW filter definition */ 677 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 678 } 679 680 /* Adding a profile, but there is only one header supported. 681 * That is the final parameters are 1 header (segment), no 682 * actions (NULL) and zero actions 0. 683 */ 684 err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, 685 TNL_SEG_CNT(tun), false, &prof); 686 if (err) 687 return err; 688 err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, 689 main_vsi->idx, ICE_FLOW_PRIO_NORMAL, 690 seg, &entry1_h); 691 if (err) 692 goto err_prof; 693 err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, 694 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 695 seg, &entry2_h); 696 if (err) 697 goto err_entry; 698 699 hw_prof->fdir_seg[tun] = seg; 700 hw_prof->prof_id[tun] = prof->id; 701 hw_prof->entry_h[0][tun] = entry1_h; 702 hw_prof->entry_h[1][tun] = entry2_h; 703 hw_prof->vsi_h[0] = main_vsi->idx; 704 hw_prof->vsi_h[1] = ctrl_vsi->idx; 705 if (!hw_prof->cnt) 706 hw_prof->cnt = 2; 707 708 for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) { 709 u16 vsi_idx; 710 u16 vsi_h; 711 712 if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx]) 713 continue; 714 715 entry1_h = 0; 716 vsi_h = main_vsi->tc_map_vsi[idx]->idx; 717 err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, 718 main_vsi->idx, vsi_h, 719 ICE_FLOW_PRIO_NORMAL, seg, 720 &entry1_h); 721 if (err) { 722 dev_err(dev, "Could not add Channel VSI %d to flow group\n", 723 idx); 724 goto err_unroll; 725 } 726 727 vsi_idx = ice_fdir_prof_vsi_idx(hw_prof, 728 main_vsi->tc_map_vsi[idx]->idx); 729 hw_prof->entry_h[vsi_idx][tun] = entry1_h; 730 } 731 732 return 0; 733 734 err_unroll: 735 entry1_h = 0; 736 hw_prof->fdir_seg[tun] = NULL; 737 738 /* The variable del_last will be used to determine when to clean up 739 * the VSI group data. The VSI data is not needed if there are no 740 * segments. 741 */ 742 del_last = true; 743 for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++) 744 if (hw_prof->fdir_seg[idx]) { 745 del_last = false; 746 break; 747 } 748 749 for (idx = 0; idx < hw_prof->cnt; idx++) { 750 u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]); 751 752 if (!hw_prof->entry_h[idx][tun]) 753 continue; 754 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id); 755 ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]); 756 hw_prof->entry_h[idx][tun] = 0; 757 if (del_last) 758 hw_prof->vsi_h[idx] = 0; 759 } 760 if (del_last) 761 hw_prof->cnt = 0; 762 err_entry: 763 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 764 ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id); 765 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 766 err_prof: 767 ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); 768 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); 769 770 return err; 771 } 772 773 /** 774 * ice_set_init_fdir_seg 775 * @seg: flow segment for programming 776 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6 777 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP 778 * 779 * Set the configuration for perfect filters to the provided flow segment for 780 * programming the HW filter. This is to be called only when initializing 781 * filters as this function it assumes no filters exist. 782 */ 783 static int 784 ice_set_init_fdir_seg(struct ice_flow_seg_info *seg, 785 enum ice_flow_seg_hdr l3_proto, 786 enum ice_flow_seg_hdr l4_proto) 787 { 788 enum ice_flow_field src_addr, dst_addr, src_port, dst_port; 789 790 if (!seg) 791 return -EINVAL; 792 793 if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) { 794 src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA; 795 dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA; 796 } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) { 797 src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA; 798 dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA; 799 } else { 800 return -EINVAL; 801 } 802 803 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 804 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 805 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 806 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 807 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 808 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 809 } else { 810 return -EINVAL; 811 } 812 813 ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto); 814 815 /* IP source address */ 816 ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL, 817 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 818 819 /* IP destination address */ 820 ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL, 821 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 822 823 /* Layer 4 source port */ 824 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 825 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 826 827 /* Layer 4 destination port */ 828 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 829 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); 830 831 return 0; 832 } 833 834 /** 835 * ice_create_init_fdir_rule 836 * @pf: PF structure 837 * @flow: filter enum 838 * 839 * Return error value or 0 on success. 840 */ 841 static int 842 ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow) 843 { 844 struct ice_flow_seg_info *seg, *tun_seg; 845 struct device *dev = ice_pf_to_dev(pf); 846 struct ice_hw *hw = &pf->hw; 847 int ret; 848 849 /* if there is already a filter rule for kind return -EINVAL */ 850 if (hw->fdir_prof && hw->fdir_prof[flow] && 851 hw->fdir_prof[flow]->fdir_seg[0]) 852 return -EINVAL; 853 854 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 855 if (!seg) 856 return -ENOMEM; 857 858 tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg), 859 GFP_KERNEL); 860 if (!tun_seg) { 861 devm_kfree(dev, seg); 862 return -ENOMEM; 863 } 864 865 if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP) 866 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 867 ICE_FLOW_SEG_HDR_TCP); 868 else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP) 869 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, 870 ICE_FLOW_SEG_HDR_UDP); 871 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP) 872 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 873 ICE_FLOW_SEG_HDR_TCP); 874 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 875 ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, 876 ICE_FLOW_SEG_HDR_UDP); 877 else 878 ret = -EINVAL; 879 if (ret) 880 goto err_exit; 881 882 /* add filter for outer headers */ 883 ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN); 884 if (ret) 885 /* could not write filter, free memory */ 886 goto err_exit; 887 888 /* make tunneled filter HW entries if possible */ 889 memcpy(&tun_seg[1], seg, sizeof(*seg)); 890 ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN); 891 if (ret) 892 /* could not write tunnel filter, but outer header filter 893 * exists 894 */ 895 devm_kfree(dev, tun_seg); 896 897 set_bit(flow, hw->fdir_perfect_fltr); 898 return ret; 899 err_exit: 900 devm_kfree(dev, tun_seg); 901 devm_kfree(dev, seg); 902 903 return -EOPNOTSUPP; 904 } 905 906 /** 907 * ice_set_fdir_ip4_seg 908 * @seg: flow segment for programming 909 * @tcp_ip4_spec: mask data from ethtool 910 * @l4_proto: Layer 4 protocol to program 911 * @perfect_fltr: only valid on success; returns true if perfect filter, 912 * false if not 913 * 914 * Set the mask data into the flow segment to be used to program HW 915 * table based on provided L4 protocol for IPv4 916 */ 917 static int 918 ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg, 919 struct ethtool_tcpip4_spec *tcp_ip4_spec, 920 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 921 { 922 enum ice_flow_field src_port, dst_port; 923 924 /* make sure we don't have any empty rule */ 925 if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src && 926 !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst) 927 return -EINVAL; 928 929 /* filtering on TOS not supported */ 930 if (tcp_ip4_spec->tos) 931 return -EOPNOTSUPP; 932 933 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 934 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 935 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 936 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 937 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 938 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 939 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 940 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 941 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 942 } else { 943 return -EOPNOTSUPP; 944 } 945 946 *perfect_fltr = true; 947 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto); 948 949 /* IP source address */ 950 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 951 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 952 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 953 ICE_FLOW_FLD_OFF_INVAL, false); 954 else if (!tcp_ip4_spec->ip4src) 955 *perfect_fltr = false; 956 else 957 return -EOPNOTSUPP; 958 959 /* IP destination address */ 960 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 961 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 962 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 963 ICE_FLOW_FLD_OFF_INVAL, false); 964 else if (!tcp_ip4_spec->ip4dst) 965 *perfect_fltr = false; 966 else 967 return -EOPNOTSUPP; 968 969 /* Layer 4 source port */ 970 if (tcp_ip4_spec->psrc == htons(0xFFFF)) 971 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 972 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 973 false); 974 else if (!tcp_ip4_spec->psrc) 975 *perfect_fltr = false; 976 else 977 return -EOPNOTSUPP; 978 979 /* Layer 4 destination port */ 980 if (tcp_ip4_spec->pdst == htons(0xFFFF)) 981 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 982 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 983 false); 984 else if (!tcp_ip4_spec->pdst) 985 *perfect_fltr = false; 986 else 987 return -EOPNOTSUPP; 988 989 return 0; 990 } 991 992 /** 993 * ice_set_fdir_ip4_usr_seg 994 * @seg: flow segment for programming 995 * @usr_ip4_spec: ethtool userdef packet offset 996 * @perfect_fltr: only valid on success; returns true if perfect filter, 997 * false if not 998 * 999 * Set the offset data into the flow segment to be used to program HW 1000 * table for IPv4 1001 */ 1002 static int 1003 ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg, 1004 struct ethtool_usrip4_spec *usr_ip4_spec, 1005 bool *perfect_fltr) 1006 { 1007 /* first 4 bytes of Layer 4 header */ 1008 if (usr_ip4_spec->l4_4_bytes) 1009 return -EINVAL; 1010 if (usr_ip4_spec->tos) 1011 return -EINVAL; 1012 if (usr_ip4_spec->ip_ver) 1013 return -EINVAL; 1014 /* Filtering on Layer 4 protocol not supported */ 1015 if (usr_ip4_spec->proto) 1016 return -EOPNOTSUPP; 1017 /* empty rules are not valid */ 1018 if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst) 1019 return -EINVAL; 1020 1021 *perfect_fltr = true; 1022 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); 1023 1024 /* IP source address */ 1025 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 1026 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, 1027 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1028 ICE_FLOW_FLD_OFF_INVAL, false); 1029 else if (!usr_ip4_spec->ip4src) 1030 *perfect_fltr = false; 1031 else 1032 return -EOPNOTSUPP; 1033 1034 /* IP destination address */ 1035 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 1036 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, 1037 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1038 ICE_FLOW_FLD_OFF_INVAL, false); 1039 else if (!usr_ip4_spec->ip4dst) 1040 *perfect_fltr = false; 1041 else 1042 return -EOPNOTSUPP; 1043 1044 return 0; 1045 } 1046 1047 /** 1048 * ice_set_fdir_ip6_seg 1049 * @seg: flow segment for programming 1050 * @tcp_ip6_spec: mask data from ethtool 1051 * @l4_proto: Layer 4 protocol to program 1052 * @perfect_fltr: only valid on success; returns true if perfect filter, 1053 * false if not 1054 * 1055 * Set the mask data into the flow segment to be used to program HW 1056 * table based on provided L4 protocol for IPv6 1057 */ 1058 static int 1059 ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg, 1060 struct ethtool_tcpip6_spec *tcp_ip6_spec, 1061 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) 1062 { 1063 enum ice_flow_field src_port, dst_port; 1064 1065 /* make sure we don't have any empty rule */ 1066 if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1067 sizeof(struct in6_addr)) && 1068 !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1069 sizeof(struct in6_addr)) && 1070 !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst) 1071 return -EINVAL; 1072 1073 /* filtering on TC not supported */ 1074 if (tcp_ip6_spec->tclass) 1075 return -EOPNOTSUPP; 1076 1077 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { 1078 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; 1079 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; 1080 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { 1081 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; 1082 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; 1083 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { 1084 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; 1085 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; 1086 } else { 1087 return -EINVAL; 1088 } 1089 1090 *perfect_fltr = true; 1091 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto); 1092 1093 if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask, 1094 sizeof(struct in6_addr))) 1095 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 1096 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1097 ICE_FLOW_FLD_OFF_INVAL, false); 1098 else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1099 sizeof(struct in6_addr))) 1100 *perfect_fltr = false; 1101 else 1102 return -EOPNOTSUPP; 1103 1104 if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask, 1105 sizeof(struct in6_addr))) 1106 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 1107 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1108 ICE_FLOW_FLD_OFF_INVAL, false); 1109 else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1110 sizeof(struct in6_addr))) 1111 *perfect_fltr = false; 1112 else 1113 return -EOPNOTSUPP; 1114 1115 /* Layer 4 source port */ 1116 if (tcp_ip6_spec->psrc == htons(0xFFFF)) 1117 ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, 1118 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1119 false); 1120 else if (!tcp_ip6_spec->psrc) 1121 *perfect_fltr = false; 1122 else 1123 return -EOPNOTSUPP; 1124 1125 /* Layer 4 destination port */ 1126 if (tcp_ip6_spec->pdst == htons(0xFFFF)) 1127 ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, 1128 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1129 false); 1130 else if (!tcp_ip6_spec->pdst) 1131 *perfect_fltr = false; 1132 else 1133 return -EOPNOTSUPP; 1134 1135 return 0; 1136 } 1137 1138 /** 1139 * ice_set_fdir_ip6_usr_seg 1140 * @seg: flow segment for programming 1141 * @usr_ip6_spec: ethtool userdef packet offset 1142 * @perfect_fltr: only valid on success; returns true if perfect filter, 1143 * false if not 1144 * 1145 * Set the offset data into the flow segment to be used to program HW 1146 * table for IPv6 1147 */ 1148 static int 1149 ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, 1150 struct ethtool_usrip6_spec *usr_ip6_spec, 1151 bool *perfect_fltr) 1152 { 1153 /* filtering on Layer 4 bytes not supported */ 1154 if (usr_ip6_spec->l4_4_bytes) 1155 return -EOPNOTSUPP; 1156 /* filtering on TC not supported */ 1157 if (usr_ip6_spec->tclass) 1158 return -EOPNOTSUPP; 1159 /* filtering on Layer 4 protocol not supported */ 1160 if (usr_ip6_spec->l4_proto) 1161 return -EOPNOTSUPP; 1162 /* empty rules are not valid */ 1163 if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1164 sizeof(struct in6_addr)) && 1165 !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1166 sizeof(struct in6_addr))) 1167 return -EINVAL; 1168 1169 *perfect_fltr = true; 1170 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); 1171 1172 if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask, 1173 sizeof(struct in6_addr))) 1174 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, 1175 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1176 ICE_FLOW_FLD_OFF_INVAL, false); 1177 else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, 1178 sizeof(struct in6_addr))) 1179 *perfect_fltr = false; 1180 else 1181 return -EOPNOTSUPP; 1182 1183 if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask, 1184 sizeof(struct in6_addr))) 1185 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, 1186 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1187 ICE_FLOW_FLD_OFF_INVAL, false); 1188 else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, 1189 sizeof(struct in6_addr))) 1190 *perfect_fltr = false; 1191 else 1192 return -EOPNOTSUPP; 1193 1194 return 0; 1195 } 1196 1197 /** 1198 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter 1199 * @pf: PF structure 1200 * @fsp: pointer to ethtool Rx flow specification 1201 * @user: user defined data from flow specification 1202 * 1203 * Returns 0 on success. 1204 */ 1205 static int 1206 ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, 1207 struct ice_rx_flow_userdef *user) 1208 { 1209 struct ice_flow_seg_info *seg, *tun_seg; 1210 struct device *dev = ice_pf_to_dev(pf); 1211 enum ice_fltr_ptype fltr_idx; 1212 struct ice_hw *hw = &pf->hw; 1213 bool perfect_filter; 1214 int ret; 1215 1216 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 1217 if (!seg) 1218 return -ENOMEM; 1219 1220 tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg), 1221 GFP_KERNEL); 1222 if (!tun_seg) { 1223 devm_kfree(dev, seg); 1224 return -ENOMEM; 1225 } 1226 1227 switch (fsp->flow_type & ~FLOW_EXT) { 1228 case TCP_V4_FLOW: 1229 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1230 ICE_FLOW_SEG_HDR_TCP, 1231 &perfect_filter); 1232 break; 1233 case UDP_V4_FLOW: 1234 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1235 ICE_FLOW_SEG_HDR_UDP, 1236 &perfect_filter); 1237 break; 1238 case SCTP_V4_FLOW: 1239 ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, 1240 ICE_FLOW_SEG_HDR_SCTP, 1241 &perfect_filter); 1242 break; 1243 case IPV4_USER_FLOW: 1244 ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec, 1245 &perfect_filter); 1246 break; 1247 case TCP_V6_FLOW: 1248 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1249 ICE_FLOW_SEG_HDR_TCP, 1250 &perfect_filter); 1251 break; 1252 case UDP_V6_FLOW: 1253 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1254 ICE_FLOW_SEG_HDR_UDP, 1255 &perfect_filter); 1256 break; 1257 case SCTP_V6_FLOW: 1258 ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, 1259 ICE_FLOW_SEG_HDR_SCTP, 1260 &perfect_filter); 1261 break; 1262 case IPV6_USER_FLOW: 1263 ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, 1264 &perfect_filter); 1265 break; 1266 default: 1267 ret = -EINVAL; 1268 } 1269 if (ret) 1270 goto err_exit; 1271 1272 /* tunnel segments are shifted up one. */ 1273 memcpy(&tun_seg[1], seg, sizeof(*seg)); 1274 1275 if (user && user->flex_fltr) { 1276 perfect_filter = false; 1277 ice_flow_add_fld_raw(seg, user->flex_offset, 1278 ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1279 ICE_FLOW_FLD_OFF_INVAL, 1280 ICE_FLOW_FLD_OFF_INVAL); 1281 ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset, 1282 ICE_FLTR_PRGM_FLEX_WORD_SIZE, 1283 ICE_FLOW_FLD_OFF_INVAL, 1284 ICE_FLOW_FLD_OFF_INVAL); 1285 } 1286 1287 fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); 1288 1289 assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter); 1290 1291 /* add filter for outer headers */ 1292 ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx, 1293 ICE_FD_HW_SEG_NON_TUN); 1294 if (ret == -EEXIST) { 1295 /* Rule already exists, free memory and count as success */ 1296 ret = 0; 1297 goto err_exit; 1298 } else if (ret) { 1299 /* could not write filter, free memory */ 1300 goto err_exit; 1301 } 1302 1303 /* make tunneled filter HW entries if possible */ 1304 memcpy(&tun_seg[1], seg, sizeof(*seg)); 1305 ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx, 1306 ICE_FD_HW_SEG_TUN); 1307 if (ret == -EEXIST) { 1308 /* Rule already exists, free memory and count as success */ 1309 devm_kfree(dev, tun_seg); 1310 ret = 0; 1311 } else if (ret) { 1312 /* could not write tunnel filter, but outer filter exists */ 1313 devm_kfree(dev, tun_seg); 1314 } 1315 1316 return ret; 1317 1318 err_exit: 1319 devm_kfree(dev, tun_seg); 1320 devm_kfree(dev, seg); 1321 1322 return ret; 1323 } 1324 1325 /** 1326 * ice_update_per_q_fltr 1327 * @vsi: ptr to VSI 1328 * @q_index: queue index 1329 * @inc: true to increment or false to decrement per queue filter count 1330 * 1331 * This function is used to keep track of per queue sideband filters 1332 */ 1333 static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc) 1334 { 1335 struct ice_rx_ring *rx_ring; 1336 1337 if (!vsi->num_rxq || q_index >= vsi->num_rxq) 1338 return; 1339 1340 rx_ring = vsi->rx_rings[q_index]; 1341 if (!rx_ring || !rx_ring->ch) 1342 return; 1343 1344 if (inc) 1345 atomic_inc(&rx_ring->ch->num_sb_fltr); 1346 else 1347 atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr); 1348 } 1349 1350 /** 1351 * ice_fdir_write_fltr - send a flow director filter to the hardware 1352 * @pf: PF data structure 1353 * @input: filter structure 1354 * @add: true adds filter and false removed filter 1355 * @is_tun: true adds inner filter on tunnel and false outer headers 1356 * 1357 * returns 0 on success and negative value on error 1358 */ 1359 int 1360 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 1361 bool is_tun) 1362 { 1363 struct device *dev = ice_pf_to_dev(pf); 1364 struct ice_hw *hw = &pf->hw; 1365 struct ice_fltr_desc desc; 1366 struct ice_vsi *ctrl_vsi; 1367 u8 *pkt, *frag_pkt; 1368 bool has_frag; 1369 int err; 1370 1371 ctrl_vsi = ice_get_ctrl_vsi(pf); 1372 if (!ctrl_vsi) 1373 return -EINVAL; 1374 1375 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1376 if (!pkt) 1377 return -ENOMEM; 1378 frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1379 if (!frag_pkt) { 1380 err = -ENOMEM; 1381 goto err_free; 1382 } 1383 1384 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1385 err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1386 if (err) 1387 goto err_free_all; 1388 err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1389 if (err) 1390 goto err_free_all; 1391 1392 /* repeat for fragment packet */ 1393 has_frag = ice_fdir_has_frag(input->flow_type); 1394 if (has_frag) { 1395 /* does not return error */ 1396 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1397 err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, 1398 is_tun); 1399 if (err) 1400 goto err_frag; 1401 err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); 1402 if (err) 1403 goto err_frag; 1404 } else { 1405 devm_kfree(dev, frag_pkt); 1406 } 1407 1408 return 0; 1409 1410 err_free_all: 1411 devm_kfree(dev, frag_pkt); 1412 err_free: 1413 devm_kfree(dev, pkt); 1414 return err; 1415 1416 err_frag: 1417 devm_kfree(dev, frag_pkt); 1418 return err; 1419 } 1420 1421 /** 1422 * ice_fdir_write_all_fltr - send a flow director filter to the hardware 1423 * @pf: PF data structure 1424 * @input: filter structure 1425 * @add: true adds filter and false removed filter 1426 * 1427 * returns 0 on success and negative value on error 1428 */ 1429 static int 1430 ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, 1431 bool add) 1432 { 1433 u16 port_num; 1434 int tun; 1435 1436 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 1437 bool is_tun = tun == ICE_FD_HW_SEG_TUN; 1438 int err; 1439 1440 if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL)) 1441 continue; 1442 err = ice_fdir_write_fltr(pf, input, add, is_tun); 1443 if (err) 1444 return err; 1445 } 1446 return 0; 1447 } 1448 1449 /** 1450 * ice_fdir_replay_fltrs - replay filters from the HW filter list 1451 * @pf: board private structure 1452 */ 1453 void ice_fdir_replay_fltrs(struct ice_pf *pf) 1454 { 1455 struct ice_fdir_fltr *f_rule; 1456 struct ice_hw *hw = &pf->hw; 1457 1458 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { 1459 int err = ice_fdir_write_all_fltr(pf, f_rule, true); 1460 1461 if (err) 1462 dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n", 1463 err, f_rule->fltr_id); 1464 } 1465 } 1466 1467 /** 1468 * ice_fdir_create_dflt_rules - create default perfect filters 1469 * @pf: PF data structure 1470 * 1471 * Returns 0 for success or error. 1472 */ 1473 int ice_fdir_create_dflt_rules(struct ice_pf *pf) 1474 { 1475 int err; 1476 1477 /* Create perfect TCP and UDP rules in hardware. */ 1478 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP); 1479 if (err) 1480 return err; 1481 1482 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP); 1483 if (err) 1484 return err; 1485 1486 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP); 1487 if (err) 1488 return err; 1489 1490 err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP); 1491 1492 return err; 1493 } 1494 1495 /** 1496 * ice_fdir_del_all_fltrs - Delete all flow director filters 1497 * @vsi: the VSI being changed 1498 * 1499 * This function needs to be called while holding hw->fdir_fltr_lock 1500 */ 1501 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi) 1502 { 1503 struct ice_fdir_fltr *f_rule, *tmp; 1504 struct ice_pf *pf = vsi->back; 1505 struct ice_hw *hw = &pf->hw; 1506 1507 list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { 1508 ice_fdir_write_all_fltr(pf, f_rule, false); 1509 ice_fdir_update_cntrs(hw, f_rule->flow_type, false); 1510 list_del(&f_rule->fltr_node); 1511 devm_kfree(ice_pf_to_dev(pf), f_rule); 1512 } 1513 } 1514 1515 /** 1516 * ice_vsi_manage_fdir - turn on/off flow director 1517 * @vsi: the VSI being changed 1518 * @ena: boolean value indicating if this is an enable or disable request 1519 */ 1520 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) 1521 { 1522 struct ice_pf *pf = vsi->back; 1523 struct ice_hw *hw = &pf->hw; 1524 enum ice_fltr_ptype flow; 1525 1526 if (ena) { 1527 set_bit(ICE_FLAG_FD_ENA, pf->flags); 1528 ice_fdir_create_dflt_rules(pf); 1529 return; 1530 } 1531 1532 mutex_lock(&hw->fdir_fltr_lock); 1533 if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) 1534 goto release_lock; 1535 1536 ice_fdir_del_all_fltrs(vsi); 1537 1538 if (hw->fdir_prof) 1539 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; 1540 flow++) 1541 if (hw->fdir_prof[flow]) 1542 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); 1543 1544 release_lock: 1545 mutex_unlock(&hw->fdir_fltr_lock); 1546 } 1547 1548 /** 1549 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow 1550 * @pf: PF structure 1551 * @flow_type: FDir flow type to release 1552 */ 1553 static void 1554 ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type) 1555 { 1556 struct ice_hw *hw = &pf->hw; 1557 bool need_perfect = false; 1558 1559 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 1560 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 1561 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 1562 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) 1563 need_perfect = true; 1564 1565 if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr)) 1566 return; 1567 1568 ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type); 1569 if (need_perfect) 1570 ice_create_init_fdir_rule(pf, flow_type); 1571 } 1572 1573 /** 1574 * ice_fdir_update_list_entry - add or delete a filter from the filter list 1575 * @pf: PF structure 1576 * @input: filter structure 1577 * @fltr_idx: ethtool index of filter to modify 1578 * 1579 * returns 0 on success and negative on errors 1580 */ 1581 static int 1582 ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, 1583 int fltr_idx) 1584 { 1585 struct ice_fdir_fltr *old_fltr; 1586 struct ice_hw *hw = &pf->hw; 1587 struct ice_vsi *vsi; 1588 int err = -ENOENT; 1589 1590 /* Do not update filters during reset */ 1591 if (ice_is_reset_in_progress(pf->state)) 1592 return -EBUSY; 1593 1594 vsi = ice_get_main_vsi(pf); 1595 if (!vsi) 1596 return -EINVAL; 1597 1598 old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx); 1599 if (old_fltr) { 1600 err = ice_fdir_write_all_fltr(pf, old_fltr, false); 1601 if (err) 1602 return err; 1603 ice_fdir_update_cntrs(hw, old_fltr->flow_type, false); 1604 /* update sb-filters count, specific to ring->channel */ 1605 ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false); 1606 if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type]) 1607 /* we just deleted the last filter of flow_type so we 1608 * should also delete the HW filter info. 1609 */ 1610 ice_fdir_do_rem_flow(pf, old_fltr->flow_type); 1611 list_del(&old_fltr->fltr_node); 1612 devm_kfree(ice_hw_to_dev(hw), old_fltr); 1613 } 1614 if (!input) 1615 return err; 1616 ice_fdir_list_add_fltr(hw, input); 1617 /* update sb-filters count, specific to ring->channel */ 1618 ice_update_per_q_fltr(vsi, input->orig_q_index, true); 1619 ice_fdir_update_cntrs(hw, input->flow_type, true); 1620 return 0; 1621 } 1622 1623 /** 1624 * ice_del_fdir_ethtool - delete Flow Director filter 1625 * @vsi: pointer to target VSI 1626 * @cmd: command to add or delete Flow Director filter 1627 * 1628 * Returns 0 on success and negative values for failure 1629 */ 1630 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1631 { 1632 struct ethtool_rx_flow_spec *fsp = 1633 (struct ethtool_rx_flow_spec *)&cmd->fs; 1634 struct ice_pf *pf = vsi->back; 1635 struct ice_hw *hw = &pf->hw; 1636 int val; 1637 1638 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1639 return -EOPNOTSUPP; 1640 1641 /* Do not delete filters during reset */ 1642 if (ice_is_reset_in_progress(pf->state)) { 1643 dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n"); 1644 return -EBUSY; 1645 } 1646 1647 if (test_bit(ICE_FD_FLUSH_REQ, pf->state)) 1648 return -EBUSY; 1649 1650 mutex_lock(&hw->fdir_fltr_lock); 1651 val = ice_fdir_update_list_entry(pf, NULL, fsp->location); 1652 mutex_unlock(&hw->fdir_fltr_lock); 1653 1654 return val; 1655 } 1656 1657 /** 1658 * ice_update_ring_dest_vsi - update dest ring and dest VSI 1659 * @vsi: pointer to target VSI 1660 * @dest_vsi: ptr to dest VSI index 1661 * @ring: ptr to dest ring 1662 * 1663 * This function updates destination VSI and queue if user specifies 1664 * target queue which falls in channel's (aka ADQ) queue region 1665 */ 1666 static void 1667 ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring) 1668 { 1669 struct ice_channel *ch; 1670 1671 list_for_each_entry(ch, &vsi->ch_list, list) { 1672 if (!ch->ch_vsi) 1673 continue; 1674 1675 /* make sure to locate corresponding channel based on "queue" 1676 * specified 1677 */ 1678 if ((*ring < ch->base_q) || 1679 (*ring >= (ch->base_q + ch->num_rxq))) 1680 continue; 1681 1682 /* update the dest_vsi based on channel */ 1683 *dest_vsi = ch->ch_vsi->idx; 1684 1685 /* update the "ring" to be correct based on channel */ 1686 *ring -= ch->base_q; 1687 } 1688 } 1689 1690 /** 1691 * ice_set_fdir_input_set - Set the input set for Flow Director 1692 * @vsi: pointer to target VSI 1693 * @fsp: pointer to ethtool Rx flow specification 1694 * @input: filter structure 1695 */ 1696 static int 1697 ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, 1698 struct ice_fdir_fltr *input) 1699 { 1700 u16 dest_vsi, q_index = 0; 1701 u16 orig_q_index = 0; 1702 struct ice_pf *pf; 1703 struct ice_hw *hw; 1704 int flow_type; 1705 u8 dest_ctl; 1706 1707 if (!vsi || !fsp || !input) 1708 return -EINVAL; 1709 1710 pf = vsi->back; 1711 hw = &pf->hw; 1712 1713 dest_vsi = vsi->idx; 1714 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 1715 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1716 } else { 1717 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 1718 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 1719 1720 if (vf) { 1721 dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n"); 1722 return -EINVAL; 1723 } 1724 1725 if (ring >= vsi->num_rxq) 1726 return -EINVAL; 1727 1728 orig_q_index = ring; 1729 ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring); 1730 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1731 q_index = ring; 1732 } 1733 1734 input->fltr_id = fsp->location; 1735 input->q_index = q_index; 1736 flow_type = fsp->flow_type & ~FLOW_EXT; 1737 1738 /* Record the original queue index as specified by user. 1739 * with channel configuration 'q_index' becomes relative 1740 * to TC (channel). 1741 */ 1742 input->orig_q_index = orig_q_index; 1743 input->dest_vsi = dest_vsi; 1744 input->dest_ctl = dest_ctl; 1745 input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID; 1746 input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base); 1747 input->flow_type = ice_ethtool_flow_to_fltr(flow_type); 1748 1749 if (fsp->flow_type & FLOW_EXT) { 1750 memcpy(input->ext_data.usr_def, fsp->h_ext.data, 1751 sizeof(input->ext_data.usr_def)); 1752 input->ext_data.vlan_type = fsp->h_ext.vlan_etype; 1753 input->ext_data.vlan_tag = fsp->h_ext.vlan_tci; 1754 memcpy(input->ext_mask.usr_def, fsp->m_ext.data, 1755 sizeof(input->ext_mask.usr_def)); 1756 input->ext_mask.vlan_type = fsp->m_ext.vlan_etype; 1757 input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci; 1758 } 1759 1760 switch (flow_type) { 1761 case TCP_V4_FLOW: 1762 case UDP_V4_FLOW: 1763 case SCTP_V4_FLOW: 1764 input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1765 input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc; 1766 input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 1767 input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; 1768 input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 1769 input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc; 1770 input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; 1771 input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; 1772 break; 1773 case IPV4_USER_FLOW: 1774 input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; 1775 input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src; 1776 input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; 1777 input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto; 1778 input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver; 1779 input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos; 1780 input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; 1781 input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src; 1782 input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; 1783 input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto; 1784 input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver; 1785 input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos; 1786 break; 1787 case TCP_V6_FLOW: 1788 case UDP_V6_FLOW: 1789 case SCTP_V6_FLOW: 1790 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1791 sizeof(struct in6_addr)); 1792 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1793 sizeof(struct in6_addr)); 1794 input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst; 1795 input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc; 1796 input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass; 1797 memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, 1798 sizeof(struct in6_addr)); 1799 memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, 1800 sizeof(struct in6_addr)); 1801 input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst; 1802 input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc; 1803 input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass; 1804 break; 1805 case IPV6_USER_FLOW: 1806 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1807 sizeof(struct in6_addr)); 1808 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1809 sizeof(struct in6_addr)); 1810 input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; 1811 input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass; 1812 1813 /* if no protocol requested, use IPPROTO_NONE */ 1814 if (!fsp->m_u.usr_ip6_spec.l4_proto) 1815 input->ip.v6.proto = IPPROTO_NONE; 1816 else 1817 input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; 1818 1819 memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1820 sizeof(struct in6_addr)); 1821 memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1822 sizeof(struct in6_addr)); 1823 input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; 1824 input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; 1825 input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; 1826 break; 1827 default: 1828 /* not doing un-parsed flow types */ 1829 return -EINVAL; 1830 } 1831 1832 return 0; 1833 } 1834 1835 /** 1836 * ice_add_fdir_ethtool - Add/Remove Flow Director filter 1837 * @vsi: pointer to target VSI 1838 * @cmd: command to add or delete Flow Director filter 1839 * 1840 * Returns 0 on success and negative values for failure 1841 */ 1842 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) 1843 { 1844 struct ice_rx_flow_userdef userdata; 1845 struct ethtool_rx_flow_spec *fsp; 1846 struct ice_fdir_fltr *input; 1847 struct device *dev; 1848 struct ice_pf *pf; 1849 struct ice_hw *hw; 1850 int fltrs_needed; 1851 u32 max_location; 1852 u16 tunnel_port; 1853 int ret; 1854 1855 if (!vsi) 1856 return -EINVAL; 1857 1858 pf = vsi->back; 1859 hw = &pf->hw; 1860 dev = ice_pf_to_dev(pf); 1861 1862 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1863 return -EOPNOTSUPP; 1864 1865 /* Do not program filters during reset */ 1866 if (ice_is_reset_in_progress(pf->state)) { 1867 dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n"); 1868 return -EBUSY; 1869 } 1870 1871 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1872 1873 if (ice_parse_rx_flow_user_data(fsp, &userdata)) 1874 return -EINVAL; 1875 1876 if (fsp->flow_type & FLOW_MAC_EXT) 1877 return -EINVAL; 1878 1879 ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata); 1880 if (ret) 1881 return ret; 1882 1883 max_location = ice_get_fdir_cnt_all(hw); 1884 if (fsp->location >= max_location) { 1885 dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n", 1886 max_location); 1887 return -ENOSPC; 1888 } 1889 1890 /* return error if not an update and no available filters */ 1891 fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; 1892 if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && 1893 ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { 1894 dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); 1895 return -ENOSPC; 1896 } 1897 1898 input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL); 1899 if (!input) 1900 return -ENOMEM; 1901 1902 ret = ice_set_fdir_input_set(vsi, fsp, input); 1903 if (ret) 1904 goto free_input; 1905 1906 mutex_lock(&hw->fdir_fltr_lock); 1907 if (ice_fdir_is_dup_fltr(hw, input)) { 1908 ret = -EINVAL; 1909 goto release_lock; 1910 } 1911 1912 if (userdata.flex_fltr) { 1913 input->flex_fltr = true; 1914 input->flex_word = cpu_to_be16(userdata.flex_word); 1915 input->flex_offset = userdata.flex_offset; 1916 } 1917 1918 input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; 1919 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1920 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; 1921 1922 /* input struct is added to the HW filter list */ 1923 ret = ice_fdir_update_list_entry(pf, input, fsp->location); 1924 if (ret) 1925 goto release_lock; 1926 1927 ret = ice_fdir_write_all_fltr(pf, input, true); 1928 if (ret) 1929 goto remove_sw_rule; 1930 1931 goto release_lock; 1932 1933 remove_sw_rule: 1934 ice_fdir_update_cntrs(hw, input->flow_type, false); 1935 /* update sb-filters count, specific to ring->channel */ 1936 ice_update_per_q_fltr(vsi, input->orig_q_index, false); 1937 list_del(&input->fltr_node); 1938 release_lock: 1939 mutex_unlock(&hw->fdir_fltr_lock); 1940 free_input: 1941 if (ret) 1942 devm_kfree(dev, input); 1943 1944 return ret; 1945 } 1946