1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2024, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "ice_common.h" 33 #include "ice_ddp_common.h" 34 #include "ice_flex_pipe.h" 35 #include "ice_protocol_type.h" 36 #include "ice_flow.h" 37 38 static const struct ice_tunnel_type_scan tnls[] = { 39 { TNL_VXLAN, "TNL_VXLAN_PF" }, 40 { TNL_GENEVE, "TNL_GENEVE_PF" }, 41 { TNL_LAST, "" } 42 }; 43 44 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { 45 /* SWITCH */ 46 { 47 ICE_SID_XLT0_SW, 48 ICE_SID_XLT_KEY_BUILDER_SW, 49 ICE_SID_XLT1_SW, 50 ICE_SID_XLT2_SW, 51 ICE_SID_PROFID_TCAM_SW, 52 ICE_SID_PROFID_REDIR_SW, 53 ICE_SID_FLD_VEC_SW, 54 ICE_SID_CDID_KEY_BUILDER_SW, 55 ICE_SID_CDID_REDIR_SW 56 }, 57 58 /* ACL */ 59 { 60 ICE_SID_XLT0_ACL, 61 ICE_SID_XLT_KEY_BUILDER_ACL, 62 ICE_SID_XLT1_ACL, 63 ICE_SID_XLT2_ACL, 64 ICE_SID_PROFID_TCAM_ACL, 65 ICE_SID_PROFID_REDIR_ACL, 66 ICE_SID_FLD_VEC_ACL, 67 ICE_SID_CDID_KEY_BUILDER_ACL, 68 ICE_SID_CDID_REDIR_ACL 69 }, 70 71 /* FD */ 72 { 73 ICE_SID_XLT0_FD, 74 ICE_SID_XLT_KEY_BUILDER_FD, 75 ICE_SID_XLT1_FD, 76 ICE_SID_XLT2_FD, 77 ICE_SID_PROFID_TCAM_FD, 78 ICE_SID_PROFID_REDIR_FD, 79 ICE_SID_FLD_VEC_FD, 80 ICE_SID_CDID_KEY_BUILDER_FD, 81 ICE_SID_CDID_REDIR_FD 82 }, 83 84 /* RSS */ 85 { 86 ICE_SID_XLT0_RSS, 87 ICE_SID_XLT_KEY_BUILDER_RSS, 88 ICE_SID_XLT1_RSS, 89 ICE_SID_XLT2_RSS, 90 ICE_SID_PROFID_TCAM_RSS, 91 ICE_SID_PROFID_REDIR_RSS, 92 ICE_SID_FLD_VEC_RSS, 93 ICE_SID_CDID_KEY_BUILDER_RSS, 94 ICE_SID_CDID_REDIR_RSS 95 }, 96 97 /* PE */ 98 { 99 ICE_SID_XLT0_PE, 100 ICE_SID_XLT_KEY_BUILDER_PE, 101 ICE_SID_XLT1_PE, 102 ICE_SID_XLT2_PE, 103 ICE_SID_PROFID_TCAM_PE, 104 ICE_SID_PROFID_REDIR_PE, 105 ICE_SID_FLD_VEC_PE, 106 ICE_SID_CDID_KEY_BUILDER_PE, 107 ICE_SID_CDID_REDIR_PE 108 } 109 }; 110 111 /** 112 * ice_sect_id - returns section ID 113 * @blk: block type 114 * @sect: section type 115 * 116 * This helper function returns the proper section ID given a block type and a 117 * section type. 118 */ 119 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) 120 { 121 return ice_sect_lkup[blk][sect]; 122 } 123 124 /** 125 * ice_add_tunnel_hint 126 * @hw: pointer to the HW structure 127 * @label_name: label text 128 * @val: value of the tunnel port boost entry 129 */ 130 void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) 131 { 132 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { 133 u16 i; 134 135 for (i = 0; tnls[i].type != TNL_LAST; i++) { 136 size_t len = strlen(tnls[i].label_prefix); 137 138 /* Look for matching label start, before continuing */ 139 if (strncmp(label_name, tnls[i].label_prefix, len)) 140 continue; 141 142 /* Make sure this label matches our PF. Note that the PF 143 * character ('0' - '7') will be located where our 144 * prefix string's null terminator is located. 145 */ 146 if ((label_name[len] - '0') == hw->pf_id) { 147 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; 148 hw->tnl.tbl[hw->tnl.count].valid = false; 149 hw->tnl.tbl[hw->tnl.count].in_use = false; 150 hw->tnl.tbl[hw->tnl.count].marked = false; 151 hw->tnl.tbl[hw->tnl.count].boost_addr = val; 152 hw->tnl.tbl[hw->tnl.count].port = 0; 153 hw->tnl.count++; 154 break; 155 } 156 } 157 } 158 } 159 160 /* Key creation */ 161 162 #define ICE_DC_KEY 0x1 /* don't care */ 163 #define ICE_DC_KEYINV 0x1 164 #define ICE_NM_KEY 0x0 /* never match */ 165 #define ICE_NM_KEYINV 0x0 166 #define ICE_0_KEY 0x1 /* match 0 */ 167 #define ICE_0_KEYINV 0x0 168 #define ICE_1_KEY 0x0 /* match 1 */ 169 #define ICE_1_KEYINV 0x1 170 171 /** 172 * ice_gen_key_word - generate 16-bits of a key/mask word 173 * @val: the value 174 * @valid: valid bits mask (change only the valid bits) 175 * @dont_care: don't care mask 176 * @nvr_mtch: never match mask 177 * @key: pointer to an array of where the resulting key portion 178 * @key_inv: pointer to an array of where the resulting key invert portion 179 * 180 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask 181 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits 182 * of key and 8 bits of key invert. 183 * 184 * '0' = b01, always match a 0 bit 185 * '1' = b10, always match a 1 bit 186 * '?' = b11, don't care bit (always matches) 187 * '~' = b00, never match bit 188 * 189 * Input: 190 * val: b0 1 0 1 0 1 191 * dont_care: b0 0 1 1 0 0 192 * never_mtch: b0 0 0 0 1 1 193 * ------------------------------ 194 * Result: key: b01 10 11 11 00 00 195 */ 196 static enum ice_status 197 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, 198 u8 *key_inv) 199 { 200 u8 in_key = *key, in_key_inv = *key_inv; 201 u8 i; 202 203 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ 204 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) 205 return ICE_ERR_CFG; 206 207 *key = 0; 208 *key_inv = 0; 209 210 /* encode the 8 bits into 8-bit key and 8-bit key invert */ 211 for (i = 0; i < 8; i++) { 212 *key >>= 1; 213 *key_inv >>= 1; 214 215 if (!(valid & 0x1)) { /* change only valid bits */ 216 *key |= (in_key & 0x1) << 7; 217 *key_inv |= (in_key_inv & 0x1) << 7; 218 } else if (dont_care & 0x1) { /* don't care bit */ 219 *key |= ICE_DC_KEY << 7; 220 *key_inv |= ICE_DC_KEYINV << 7; 221 } else if (nvr_mtch & 0x1) { /* never match bit */ 222 *key |= ICE_NM_KEY << 7; 223 *key_inv |= ICE_NM_KEYINV << 7; 224 } else if (val & 0x01) { /* exact 1 match */ 225 *key |= ICE_1_KEY << 7; 226 *key_inv |= ICE_1_KEYINV << 7; 227 } else { /* exact 0 match */ 228 *key |= ICE_0_KEY << 7; 229 *key_inv |= ICE_0_KEYINV << 7; 230 } 231 232 dont_care >>= 1; 233 nvr_mtch >>= 1; 234 valid >>= 1; 235 val >>= 1; 236 in_key >>= 1; 237 in_key_inv >>= 1; 238 } 239 240 return ICE_SUCCESS; 241 } 242 243 /** 244 * ice_bits_max_set - determine if the number of bits set is within a maximum 245 * @mask: pointer to the byte array which is the mask 246 * @size: the number of bytes in the mask 247 * @max: the max number of set bits 248 * 249 * This function determines if there are at most 'max' number of bits set in an 250 * array. Returns true if the number for bits set is <= max or will return false 251 * otherwise. 252 */ 253 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) 254 { 255 u16 count = 0; 256 u16 i; 257 258 /* check each byte */ 259 for (i = 0; i < size; i++) { 260 /* if 0, go to next byte */ 261 if (!mask[i]) 262 continue; 263 264 /* We know there is at least one set bit in this byte because of 265 * the above check; if we already have found 'max' number of 266 * bits set, then we can return failure now. 267 */ 268 if (count == max) 269 return false; 270 271 /* count the bits in this byte, checking threshold */ 272 count += ice_hweight8(mask[i]); 273 if (count > max) 274 return false; 275 } 276 277 return true; 278 } 279 280 /** 281 * ice_set_key - generate a variable sized key with multiples of 16-bits 282 * @key: pointer to where the key will be stored 283 * @size: the size of the complete key in bytes (must be even) 284 * @val: array of 8-bit values that makes up the value portion of the key 285 * @upd: array of 8-bit masks that determine what key portion to update 286 * @dc: array of 8-bit masks that make up the don't care mask 287 * @nm: array of 8-bit masks that make up the never match mask 288 * @off: the offset of the first byte in the key to update 289 * @len: the number of bytes in the key update 290 * 291 * This function generates a key from a value, a don't care mask and a never 292 * match mask. 293 * upd, dc, and nm are optional parameters, and can be NULL: 294 * upd == NULL --> upd mask is all 1's (update all bits) 295 * dc == NULL --> dc mask is all 0's (no don't care bits) 296 * nm == NULL --> nm mask is all 0's (no never match bits) 297 */ 298 static enum ice_status 299 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, 300 u16 len) 301 { 302 u16 half_size; 303 u16 i; 304 305 /* size must be a multiple of 2 bytes. */ 306 if (size % 2) 307 return ICE_ERR_CFG; 308 half_size = size / 2; 309 310 if (off + len > half_size) 311 return ICE_ERR_CFG; 312 313 /* Make sure at most one bit is set in the never match mask. Having more 314 * than one never match mask bit set will cause HW to consume excessive 315 * power otherwise; this is a power management efficiency check. 316 */ 317 #define ICE_NVR_MTCH_BITS_MAX 1 318 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) 319 return ICE_ERR_CFG; 320 321 for (i = 0; i < len; i++) 322 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, 323 dc ? dc[i] : 0, nm ? nm[i] : 0, 324 key + off + i, key + half_size + off + i)) 325 return ICE_ERR_CFG; 326 327 return ICE_SUCCESS; 328 } 329 330 /** 331 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage 332 * @hw: pointer to the HW structure 333 * @port: port to search for 334 * @index: optionally returns index 335 * 336 * Returns whether a port is already in use as a tunnel, and optionally its 337 * index 338 */ 339 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) 340 { 341 u16 i; 342 343 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 344 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { 345 if (index) 346 *index = i; 347 return true; 348 } 349 350 return false; 351 } 352 353 /** 354 * ice_tunnel_port_in_use 355 * @hw: pointer to the HW structure 356 * @port: port to search for 357 * @index: optionally returns index 358 * 359 * Returns whether a port is already in use as a tunnel, and optionally its 360 * index 361 */ 362 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) 363 { 364 bool res; 365 366 ice_acquire_lock(&hw->tnl_lock); 367 res = ice_tunnel_port_in_use_hlpr(hw, port, index); 368 ice_release_lock(&hw->tnl_lock); 369 370 return res; 371 } 372 373 /** 374 * ice_tunnel_get_type 375 * @hw: pointer to the HW structure 376 * @port: port to search for 377 * @type: returns tunnel index 378 * 379 * For a given port number, will return the type of tunnel. 380 */ 381 bool 382 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) 383 { 384 bool res = false; 385 u16 i; 386 387 ice_acquire_lock(&hw->tnl_lock); 388 389 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 390 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { 391 *type = hw->tnl.tbl[i].type; 392 res = true; 393 break; 394 } 395 396 ice_release_lock(&hw->tnl_lock); 397 398 return res; 399 } 400 401 /** 402 * ice_find_free_tunnel_entry 403 * @hw: pointer to the HW structure 404 * @type: tunnel type 405 * @index: optionally returns index 406 * 407 * Returns whether there is a free tunnel entry, and optionally its index 408 */ 409 static bool 410 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, 411 u16 *index) 412 { 413 u16 i; 414 415 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 416 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && 417 hw->tnl.tbl[i].type == type) { 418 if (index) 419 *index = i; 420 return true; 421 } 422 423 return false; 424 } 425 426 /** 427 * ice_get_open_tunnel_port - retrieve an open tunnel port 428 * @hw: pointer to the HW structure 429 * @type: tunnel type (TNL_ALL will return any open port) 430 * @port: returns open port 431 */ 432 bool 433 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, 434 u16 *port) 435 { 436 bool res = false; 437 u16 i; 438 439 ice_acquire_lock(&hw->tnl_lock); 440 441 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 442 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 443 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { 444 *port = hw->tnl.tbl[i].port; 445 res = true; 446 break; 447 } 448 449 ice_release_lock(&hw->tnl_lock); 450 451 return res; 452 } 453 454 /** 455 * ice_create_tunnel 456 * @hw: pointer to the HW structure 457 * @type: type of tunnel 458 * @port: port of tunnel to create 459 * 460 * Create a tunnel by updating the parse graph in the parser. We do that by 461 * creating a package buffer with the tunnel info and issuing an update package 462 * command. 463 */ 464 enum ice_status 465 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) 466 { 467 struct ice_boost_tcam_section *sect_rx, *sect_tx; 468 enum ice_status status = ICE_ERR_MAX_LIMIT; 469 struct ice_buf_build *bld; 470 u16 index; 471 472 ice_acquire_lock(&hw->tnl_lock); 473 474 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { 475 hw->tnl.tbl[index].ref++; 476 status = ICE_SUCCESS; 477 goto ice_create_tunnel_end; 478 } 479 480 if (!ice_find_free_tunnel_entry(hw, type, &index)) { 481 status = ICE_ERR_OUT_OF_RANGE; 482 goto ice_create_tunnel_end; 483 } 484 485 bld = ice_pkg_buf_alloc(hw); 486 if (!bld) { 487 status = ICE_ERR_NO_MEMORY; 488 goto ice_create_tunnel_end; 489 } 490 491 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 492 if (ice_pkg_buf_reserve_section(bld, 2)) 493 goto ice_create_tunnel_err; 494 495 sect_rx = (struct ice_boost_tcam_section *) 496 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 497 ice_struct_size(sect_rx, tcam, 1)); 498 if (!sect_rx) 499 goto ice_create_tunnel_err; 500 sect_rx->count = CPU_TO_LE16(1); 501 502 sect_tx = (struct ice_boost_tcam_section *) 503 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 504 ice_struct_size(sect_tx, tcam, 1)); 505 if (!sect_tx) 506 goto ice_create_tunnel_err; 507 sect_tx->count = CPU_TO_LE16(1); 508 509 /* copy original boost entry to update package buffer */ 510 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, 511 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); 512 513 /* over-write the never-match dest port key bits with the encoded port 514 * bits 515 */ 516 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), 517 (u8 *)&port, NULL, NULL, NULL, 518 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), 519 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); 520 521 /* exact copy of entry to Tx section entry */ 522 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), 523 ICE_NONDMA_TO_NONDMA); 524 525 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 526 if (!status) { 527 hw->tnl.tbl[index].port = port; 528 hw->tnl.tbl[index].in_use = true; 529 hw->tnl.tbl[index].ref = 1; 530 } 531 532 ice_create_tunnel_err: 533 ice_pkg_buf_free(hw, bld); 534 535 ice_create_tunnel_end: 536 ice_release_lock(&hw->tnl_lock); 537 538 return status; 539 } 540 541 /** 542 * ice_destroy_tunnel 543 * @hw: pointer to the HW structure 544 * @port: port of tunnel to destroy (ignored if the all parameter is true) 545 * @all: flag that states to destroy all tunnels 546 * 547 * Destroys a tunnel or all tunnels by creating an update package buffer 548 * targeting the specific updates requested and then performing an update 549 * package. 550 */ 551 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) 552 { 553 struct ice_boost_tcam_section *sect_rx, *sect_tx; 554 enum ice_status status = ICE_ERR_MAX_LIMIT; 555 struct ice_buf_build *bld; 556 u16 count = 0; 557 u16 index; 558 u16 size; 559 u16 i, j; 560 561 ice_acquire_lock(&hw->tnl_lock); 562 563 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) 564 if (hw->tnl.tbl[index].ref > 1) { 565 hw->tnl.tbl[index].ref--; 566 status = ICE_SUCCESS; 567 goto ice_destroy_tunnel_end; 568 } 569 570 /* determine count */ 571 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 572 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 573 (all || hw->tnl.tbl[i].port == port)) 574 count++; 575 576 if (!count) { 577 status = ICE_ERR_PARAM; 578 goto ice_destroy_tunnel_end; 579 } 580 581 /* size of section - there is at least one entry */ 582 size = ice_struct_size(sect_rx, tcam, count); 583 584 bld = ice_pkg_buf_alloc(hw); 585 if (!bld) { 586 status = ICE_ERR_NO_MEMORY; 587 goto ice_destroy_tunnel_end; 588 } 589 590 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 591 if (ice_pkg_buf_reserve_section(bld, 2)) 592 goto ice_destroy_tunnel_err; 593 594 sect_rx = (struct ice_boost_tcam_section *) 595 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 596 size); 597 if (!sect_rx) 598 goto ice_destroy_tunnel_err; 599 sect_rx->count = CPU_TO_LE16(count); 600 601 sect_tx = (struct ice_boost_tcam_section *) 602 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 603 size); 604 if (!sect_tx) 605 goto ice_destroy_tunnel_err; 606 sect_tx->count = CPU_TO_LE16(count); 607 608 /* copy original boost entry to update package buffer, one copy to Rx 609 * section, another copy to the Tx section 610 */ 611 for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 612 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 613 (all || hw->tnl.tbl[i].port == port)) { 614 ice_memcpy(sect_rx->tcam + j, 615 hw->tnl.tbl[i].boost_entry, 616 sizeof(*sect_rx->tcam), 617 ICE_NONDMA_TO_NONDMA); 618 ice_memcpy(sect_tx->tcam + j, 619 hw->tnl.tbl[i].boost_entry, 620 sizeof(*sect_tx->tcam), 621 ICE_NONDMA_TO_NONDMA); 622 hw->tnl.tbl[i].marked = true; 623 j++; 624 } 625 626 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 627 if (!status) 628 for (i = 0; i < hw->tnl.count && 629 i < ICE_TUNNEL_MAX_ENTRIES; i++) 630 if (hw->tnl.tbl[i].marked) { 631 hw->tnl.tbl[i].ref = 0; 632 hw->tnl.tbl[i].port = 0; 633 hw->tnl.tbl[i].in_use = false; 634 hw->tnl.tbl[i].marked = false; 635 } 636 637 ice_destroy_tunnel_err: 638 ice_pkg_buf_free(hw, bld); 639 640 ice_destroy_tunnel_end: 641 ice_release_lock(&hw->tnl_lock); 642 643 return status; 644 } 645 646 /** 647 * ice_replay_tunnels 648 * @hw: pointer to the HW structure 649 * 650 * Replays all tunnels 651 */ 652 enum ice_status ice_replay_tunnels(struct ice_hw *hw) 653 { 654 enum ice_status status = ICE_SUCCESS; 655 u16 i; 656 657 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 658 659 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) { 660 enum ice_tunnel_type type = hw->tnl.tbl[i].type; 661 u16 refs = hw->tnl.tbl[i].ref; 662 u16 port = hw->tnl.tbl[i].port; 663 664 if (!hw->tnl.tbl[i].in_use) 665 continue; 666 667 /* Replay tunnels one at a time by destroying them, then 668 * recreating them 669 */ 670 hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */ 671 status = ice_destroy_tunnel(hw, port, false); 672 if (status) { 673 ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n", 674 status, port); 675 hw->tnl.tbl[i].ref = refs; 676 break; 677 } 678 679 status = ice_create_tunnel(hw, type, port); 680 if (status) { 681 ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n", 682 status, port); 683 break; 684 } 685 686 /* reset to original ref count */ 687 hw->tnl.tbl[i].ref = refs; 688 } 689 690 return status; 691 } 692 693 /** 694 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index 695 * @hw: pointer to the hardware structure 696 * @blk: hardware block 697 * @prof: profile ID 698 * @fv_idx: field vector word index 699 * @prot: variable to receive the protocol ID 700 * @off: variable to receive the protocol offset 701 */ 702 enum ice_status 703 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, 704 u8 *prot, u16 *off) 705 { 706 struct ice_fv_word *fv_ext; 707 708 if (prof >= hw->blk[blk].es.count) 709 return ICE_ERR_PARAM; 710 711 if (fv_idx >= hw->blk[blk].es.fvw) 712 return ICE_ERR_PARAM; 713 714 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); 715 716 *prot = fv_ext[fv_idx].prot_id; 717 *off = fv_ext[fv_idx].off; 718 719 return ICE_SUCCESS; 720 } 721 722 /* PTG Management */ 723 724 /** 725 * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table 726 * @hw: pointer to the hardware structure 727 * @blk: HW block 728 * 729 * This function will update the XLT1 hardware table to reflect the new 730 * packet type group configuration. 731 */ 732 enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) 733 { 734 struct ice_xlt1_section *sect; 735 struct ice_buf_build *bld; 736 enum ice_status status; 737 u16 index; 738 739 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1), 740 ice_struct_size(sect, value, 741 ICE_XLT1_CNT), 742 (void **)§); 743 if (!bld) 744 return ICE_ERR_NO_MEMORY; 745 746 sect->count = CPU_TO_LE16(ICE_XLT1_CNT); 747 sect->offset = CPU_TO_LE16(0); 748 for (index = 0; index < ICE_XLT1_CNT; index++) 749 sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg; 750 751 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 752 753 ice_pkg_buf_free(hw, bld); 754 755 return status; 756 } 757 758 /** 759 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) 760 * @hw: pointer to the hardware structure 761 * @blk: HW block 762 * @ptype: the ptype to search for 763 * @ptg: pointer to variable that receives the PTG 764 * 765 * This function will search the PTGs for a particular ptype, returning the 766 * PTG ID that contains it through the PTG parameter, with the value of 767 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. 768 */ 769 static enum ice_status 770 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) 771 { 772 if (ptype >= ICE_XLT1_CNT || !ptg) 773 return ICE_ERR_PARAM; 774 775 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; 776 return ICE_SUCCESS; 777 } 778 779 /** 780 * ice_ptg_alloc_val - Allocates a new packet type group ID by value 781 * @hw: pointer to the hardware structure 782 * @blk: HW block 783 * @ptg: the PTG to allocate 784 * 785 * This function allocates a given packet type group ID specified by the PTG 786 * parameter. 787 */ 788 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) 789 { 790 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; 791 } 792 793 /** 794 * ice_ptg_free - Frees a packet type group 795 * @hw: pointer to the hardware structure 796 * @blk: HW block 797 * @ptg: the PTG ID to free 798 * 799 * This function frees a packet type group, and returns all the current ptypes 800 * within it to the default PTG. 801 */ 802 void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg) 803 { 804 struct ice_ptg_ptype *p, *temp; 805 806 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false; 807 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 808 while (p) { 809 p->ptg = ICE_DEFAULT_PTG; 810 temp = p->next_ptype; 811 p->next_ptype = NULL; 812 p = temp; 813 } 814 815 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL; 816 } 817 818 /** 819 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group 820 * @hw: pointer to the hardware structure 821 * @blk: HW block 822 * @ptype: the ptype to remove 823 * @ptg: the PTG to remove the ptype from 824 * 825 * This function will remove the ptype from the specific PTG, and move it to 826 * the default PTG (ICE_DEFAULT_PTG). 827 */ 828 static enum ice_status 829 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 830 { 831 struct ice_ptg_ptype **ch; 832 struct ice_ptg_ptype *p; 833 834 if (ptype > ICE_XLT1_CNT - 1) 835 return ICE_ERR_PARAM; 836 837 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) 838 return ICE_ERR_DOES_NOT_EXIST; 839 840 /* Should not happen if .in_use is set, bad config */ 841 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) 842 return ICE_ERR_CFG; 843 844 /* find the ptype within this PTG, and bypass the link over it */ 845 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 846 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 847 while (p) { 848 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { 849 *ch = p->next_ptype; 850 break; 851 } 852 853 ch = &p->next_ptype; 854 p = p->next_ptype; 855 } 856 857 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; 858 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; 859 860 return ICE_SUCCESS; 861 } 862 863 /** 864 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group 865 * @hw: pointer to the hardware structure 866 * @blk: HW block 867 * @ptype: the ptype to add or move 868 * @ptg: the PTG to add or move the ptype to 869 * 870 * This function will either add or move a ptype to a particular PTG depending 871 * on if the ptype is already part of another group. Note that using a 872 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the 873 * default PTG. 874 */ 875 static enum ice_status 876 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 877 { 878 enum ice_status status; 879 u8 original_ptg; 880 881 if (ptype > ICE_XLT1_CNT - 1) 882 return ICE_ERR_PARAM; 883 884 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) 885 return ICE_ERR_DOES_NOT_EXIST; 886 887 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); 888 if (status) 889 return status; 890 891 /* Is ptype already in the correct PTG? */ 892 if (original_ptg == ptg) 893 return ICE_SUCCESS; 894 895 /* Remove from original PTG and move back to the default PTG */ 896 if (original_ptg != ICE_DEFAULT_PTG) 897 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); 898 899 /* Moving to default PTG? Then we're done with this request */ 900 if (ptg == ICE_DEFAULT_PTG) 901 return ICE_SUCCESS; 902 903 /* Add ptype to PTG at beginning of list */ 904 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = 905 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 906 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = 907 &hw->blk[blk].xlt1.ptypes[ptype]; 908 909 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; 910 hw->blk[blk].xlt1.t[ptype] = ptg; 911 912 return ICE_SUCCESS; 913 } 914 915 /* Block / table size info */ 916 struct ice_blk_size_details { 917 u16 xlt1; /* # XLT1 entries */ 918 u16 xlt2; /* # XLT2 entries */ 919 u16 prof_tcam; /* # profile ID TCAM entries */ 920 u16 prof_id; /* # profile IDs */ 921 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ 922 u16 prof_redir; /* # profile redirection entries */ 923 u16 es; /* # extraction sequence entries */ 924 u16 fvw; /* # field vector words */ 925 u8 overwrite; /* overwrite existing entries allowed */ 926 u8 reverse; /* reverse FV order */ 927 }; 928 929 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { 930 /** 931 * Table Definitions 932 * XLT1 - Number of entries in XLT1 table 933 * XLT2 - Number of entries in XLT2 table 934 * TCAM - Number of entries Profile ID TCAM table 935 * CDID - Control Domain ID of the hardware block 936 * PRED - Number of entries in the Profile Redirection Table 937 * FV - Number of entries in the Field Vector 938 * FVW - Width (in WORDs) of the Field Vector 939 * OVR - Overwrite existing table entries 940 * REV - Reverse FV 941 */ 942 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ 943 /* Overwrite , Reverse FV */ 944 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, 945 false, false }, 946 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, 947 false, false }, 948 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 949 false, true }, 950 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 951 true, true }, 952 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, 953 false, false }, 954 }; 955 956 enum ice_sid_all { 957 ICE_SID_XLT1_OFF = 0, 958 ICE_SID_XLT2_OFF, 959 ICE_SID_PR_OFF, 960 ICE_SID_PR_REDIR_OFF, 961 ICE_SID_ES_OFF, 962 ICE_SID_OFF_COUNT, 963 }; 964 965 /* Characteristic handling */ 966 967 /** 968 * ice_match_prop_lst - determine if properties of two lists match 969 * @list1: first properties list 970 * @list2: second properties list 971 * 972 * Count, cookies and the order must match in order to be considered equivalent. 973 */ 974 static bool 975 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2) 976 { 977 struct ice_vsig_prof *tmp1; 978 struct ice_vsig_prof *tmp2; 979 u16 chk_count = 0; 980 u16 count = 0; 981 982 /* compare counts */ 983 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) 984 count++; 985 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) 986 chk_count++; 987 if (!count || count != chk_count) 988 return false; 989 990 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list); 991 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list); 992 993 /* profile cookies must compare, and in the exact same order to take 994 * into account priority 995 */ 996 while (count--) { 997 if (tmp2->profile_cookie != tmp1->profile_cookie) 998 return false; 999 1000 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list); 1001 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list); 1002 } 1003 1004 return true; 1005 } 1006 1007 /* VSIG Management */ 1008 1009 /** 1010 * ice_vsig_update_xlt2_sect - update one section of XLT2 table 1011 * @hw: pointer to the hardware structure 1012 * @blk: HW block 1013 * @vsi: HW VSI number to program 1014 * @vsig: VSIG for the VSI 1015 * 1016 * This function will update the XLT2 hardware table with the input VSI 1017 * group configuration. 1018 */ 1019 static enum ice_status 1020 ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi, 1021 u16 vsig) 1022 { 1023 struct ice_xlt2_section *sect; 1024 struct ice_buf_build *bld; 1025 enum ice_status status; 1026 1027 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2), 1028 ice_struct_size(sect, value, 1), 1029 (void **)§); 1030 if (!bld) 1031 return ICE_ERR_NO_MEMORY; 1032 1033 sect->count = CPU_TO_LE16(1); 1034 sect->offset = CPU_TO_LE16(vsi); 1035 sect->value[0] = CPU_TO_LE16(vsig); 1036 1037 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 1038 1039 ice_pkg_buf_free(hw, bld); 1040 1041 return status; 1042 } 1043 1044 /** 1045 * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration 1046 * @hw: pointer to the hardware structure 1047 * @blk: HW block 1048 * 1049 * This function will update the XLT2 hardware table with the input VSI 1050 * group configuration of used vsis. 1051 */ 1052 enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) 1053 { 1054 u16 vsi; 1055 1056 for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) { 1057 /* update only vsis that have been changed */ 1058 if (hw->blk[blk].xlt2.vsis[vsi].changed) { 1059 enum ice_status status; 1060 u16 vsig; 1061 1062 vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 1063 status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig); 1064 if (status) 1065 return status; 1066 1067 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 1068 } 1069 } 1070 1071 return ICE_SUCCESS; 1072 } 1073 1074 /** 1075 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI 1076 * @hw: pointer to the hardware structure 1077 * @blk: HW block 1078 * @vsi: VSI of interest 1079 * @vsig: pointer to receive the VSI group 1080 * 1081 * This function will lookup the VSI entry in the XLT2 list and return 1082 * the VSI group its associated with. 1083 */ 1084 enum ice_status 1085 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) 1086 { 1087 if (!vsig || vsi >= ICE_MAX_VSI) 1088 return ICE_ERR_PARAM; 1089 1090 /* As long as there's a default or valid VSIG associated with the input 1091 * VSI, the functions returns a success. Any handling of VSIG will be 1092 * done by the following add, update or remove functions. 1093 */ 1094 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 1095 1096 return ICE_SUCCESS; 1097 } 1098 1099 /** 1100 * ice_vsig_alloc_val - allocate a new VSIG by value 1101 * @hw: pointer to the hardware structure 1102 * @blk: HW block 1103 * @vsig: the VSIG to allocate 1104 * 1105 * This function will allocate a given VSIG specified by the VSIG parameter. 1106 */ 1107 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) 1108 { 1109 u16 idx = vsig & ICE_VSIG_IDX_M; 1110 1111 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { 1112 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 1113 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; 1114 } 1115 1116 return ICE_VSIG_VALUE(idx, hw->pf_id); 1117 } 1118 1119 /** 1120 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG 1121 * @hw: pointer to the hardware structure 1122 * @blk: HW block 1123 * 1124 * This function will iterate through the VSIG list and mark the first 1125 * unused entry for the new VSIG entry as used and return that value. 1126 */ 1127 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) 1128 { 1129 u16 i; 1130 1131 for (i = 1; i < ICE_MAX_VSIGS; i++) 1132 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) 1133 return ice_vsig_alloc_val(hw, blk, i); 1134 1135 return ICE_DEFAULT_VSIG; 1136 } 1137 1138 /** 1139 * ice_find_dup_props_vsig - find VSI group with a specified set of properties 1140 * @hw: pointer to the hardware structure 1141 * @blk: HW block 1142 * @chs: characteristic list 1143 * @vsig: returns the VSIG with the matching profiles, if found 1144 * 1145 * Each VSIG is associated with a characteristic set; i.e. all VSIs under 1146 * a group have the same characteristic set. To check if there exists a VSIG 1147 * which has the same characteristics as the input characteristics; this 1148 * function will iterate through the XLT2 list and return the VSIG that has a 1149 * matching configuration. In order to make sure that priorities are accounted 1150 * for, the list must match exactly, including the order in which the 1151 * characteristics are listed. 1152 */ 1153 static enum ice_status 1154 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, 1155 struct LIST_HEAD_TYPE *chs, u16 *vsig) 1156 { 1157 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; 1158 u16 i; 1159 1160 for (i = 0; i < xlt2->count; i++) 1161 if (xlt2->vsig_tbl[i].in_use && 1162 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { 1163 *vsig = ICE_VSIG_VALUE(i, hw->pf_id); 1164 return ICE_SUCCESS; 1165 } 1166 1167 return ICE_ERR_DOES_NOT_EXIST; 1168 } 1169 1170 /** 1171 * ice_vsig_free - free VSI group 1172 * @hw: pointer to the hardware structure 1173 * @blk: HW block 1174 * @vsig: VSIG to remove 1175 * 1176 * The function will remove all VSIs associated with the input VSIG and move 1177 * them to the DEFAULT_VSIG and mark the VSIG available. 1178 */ 1179 static enum ice_status 1180 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) 1181 { 1182 struct ice_vsig_prof *dtmp, *del; 1183 struct ice_vsig_vsi *vsi_cur; 1184 u16 idx; 1185 1186 idx = vsig & ICE_VSIG_IDX_M; 1187 if (idx >= ICE_MAX_VSIGS) 1188 return ICE_ERR_PARAM; 1189 1190 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 1191 return ICE_ERR_DOES_NOT_EXIST; 1192 1193 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; 1194 1195 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 1196 /* If the VSIG has at least 1 VSI then iterate through the 1197 * list and remove the VSIs before deleting the group. 1198 */ 1199 if (vsi_cur) { 1200 /* remove all vsis associated with this VSIG XLT2 entry */ 1201 do { 1202 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 1203 1204 vsi_cur->vsig = ICE_DEFAULT_VSIG; 1205 vsi_cur->changed = 1; 1206 vsi_cur->next_vsi = NULL; 1207 vsi_cur = tmp; 1208 } while (vsi_cur); 1209 1210 /* NULL terminate head of VSI list */ 1211 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; 1212 } 1213 1214 /* free characteristic list */ 1215 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp, 1216 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 1217 ice_vsig_prof, list) { 1218 LIST_DEL(&del->list); 1219 ice_free(hw, del); 1220 } 1221 1222 /* if VSIG characteristic list was cleared for reset 1223 * re-initialize the list head 1224 */ 1225 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 1226 1227 return ICE_SUCCESS; 1228 } 1229 1230 /** 1231 * ice_vsig_remove_vsi - remove VSI from VSIG 1232 * @hw: pointer to the hardware structure 1233 * @blk: HW block 1234 * @vsi: VSI to remove 1235 * @vsig: VSI group to remove from 1236 * 1237 * The function will remove the input VSI from its VSI group and move it 1238 * to the DEFAULT_VSIG. 1239 */ 1240 static enum ice_status 1241 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 1242 { 1243 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; 1244 u16 idx; 1245 1246 idx = vsig & ICE_VSIG_IDX_M; 1247 1248 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 1249 return ICE_ERR_PARAM; 1250 1251 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 1252 return ICE_ERR_DOES_NOT_EXIST; 1253 1254 /* entry already in default VSIG, don't have to remove */ 1255 if (idx == ICE_DEFAULT_VSIG) 1256 return ICE_SUCCESS; 1257 1258 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 1259 if (!(*vsi_head)) 1260 return ICE_ERR_CFG; 1261 1262 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; 1263 vsi_cur = (*vsi_head); 1264 1265 /* iterate the VSI list, skip over the entry to be removed */ 1266 while (vsi_cur) { 1267 if (vsi_tgt == vsi_cur) { 1268 (*vsi_head) = vsi_cur->next_vsi; 1269 break; 1270 } 1271 vsi_head = &vsi_cur->next_vsi; 1272 vsi_cur = vsi_cur->next_vsi; 1273 } 1274 1275 /* verify if VSI was removed from group list */ 1276 if (!vsi_cur) 1277 return ICE_ERR_DOES_NOT_EXIST; 1278 1279 vsi_cur->vsig = ICE_DEFAULT_VSIG; 1280 vsi_cur->changed = 1; 1281 vsi_cur->next_vsi = NULL; 1282 1283 return ICE_SUCCESS; 1284 } 1285 1286 /** 1287 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group 1288 * @hw: pointer to the hardware structure 1289 * @blk: HW block 1290 * @vsi: VSI to move 1291 * @vsig: destination VSI group 1292 * 1293 * This function will move or add the input VSI to the target VSIG. 1294 * The function will find the original VSIG the VSI belongs to and 1295 * move the entry to the DEFAULT_VSIG, update the original VSIG and 1296 * then move entry to the new VSIG. 1297 */ 1298 static enum ice_status 1299 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 1300 { 1301 struct ice_vsig_vsi *tmp; 1302 enum ice_status status; 1303 u16 orig_vsig, idx; 1304 1305 idx = vsig & ICE_VSIG_IDX_M; 1306 1307 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 1308 return ICE_ERR_PARAM; 1309 1310 /* if VSIG not in use and VSIG is not default type this VSIG 1311 * doesn't exist. 1312 */ 1313 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && 1314 vsig != ICE_DEFAULT_VSIG) 1315 return ICE_ERR_DOES_NOT_EXIST; 1316 1317 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 1318 if (status) 1319 return status; 1320 1321 /* no update required if vsigs match */ 1322 if (orig_vsig == vsig) 1323 return ICE_SUCCESS; 1324 1325 if (orig_vsig != ICE_DEFAULT_VSIG) { 1326 /* remove entry from orig_vsig and add to default VSIG */ 1327 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); 1328 if (status) 1329 return status; 1330 } 1331 1332 if (idx == ICE_DEFAULT_VSIG) 1333 return ICE_SUCCESS; 1334 1335 /* Create VSI entry and add VSIG and prop_mask values */ 1336 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; 1337 hw->blk[blk].xlt2.vsis[vsi].changed = 1; 1338 1339 /* Add new entry to the head of the VSIG list */ 1340 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 1341 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = 1342 &hw->blk[blk].xlt2.vsis[vsi]; 1343 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; 1344 hw->blk[blk].xlt2.t[vsi] = vsig; 1345 1346 return ICE_SUCCESS; 1347 } 1348 1349 /** 1350 * ice_find_prof_id - find profile ID for a given field vector 1351 * @hw: pointer to the hardware structure 1352 * @blk: HW block 1353 * @fv: field vector to search for 1354 * @prof_id: receives the profile ID 1355 */ 1356 static enum ice_status 1357 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, 1358 struct ice_fv_word *fv, u8 *prof_id) 1359 { 1360 struct ice_es *es = &hw->blk[blk].es; 1361 u16 off; 1362 u8 i; 1363 1364 for (i = 0; i < (u8)es->count; i++) { 1365 off = i * es->fvw; 1366 1367 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) 1368 continue; 1369 1370 *prof_id = i; 1371 return ICE_SUCCESS; 1372 } 1373 1374 return ICE_ERR_DOES_NOT_EXIST; 1375 } 1376 1377 /** 1378 * ice_prof_id_rsrc_type - get profile ID resource type for a block type 1379 * @blk: the block type 1380 * @rsrc_type: pointer to variable to receive the resource type 1381 */ 1382 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) 1383 { 1384 switch (blk) { 1385 case ICE_BLK_RSS: 1386 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; 1387 break; 1388 case ICE_BLK_PE: 1389 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID; 1390 break; 1391 default: 1392 return false; 1393 } 1394 return true; 1395 } 1396 1397 /** 1398 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type 1399 * @blk: the block type 1400 * @rsrc_type: pointer to variable to receive the resource type 1401 */ 1402 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) 1403 { 1404 switch (blk) { 1405 case ICE_BLK_RSS: 1406 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; 1407 break; 1408 case ICE_BLK_PE: 1409 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM; 1410 break; 1411 default: 1412 return false; 1413 } 1414 return true; 1415 } 1416 1417 /** 1418 * ice_alloc_tcam_ent - allocate hardware TCAM entry 1419 * @hw: pointer to the HW struct 1420 * @blk: the block to allocate the TCAM for 1421 * @btm: true to allocate from bottom of table, false to allocate from top 1422 * @tcam_idx: pointer to variable to receive the TCAM entry 1423 * 1424 * This function allocates a new entry in a Profile ID TCAM for a specific 1425 * block. 1426 */ 1427 static enum ice_status 1428 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, 1429 u16 *tcam_idx) 1430 { 1431 u16 res_type; 1432 1433 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 1434 return ICE_ERR_PARAM; 1435 1436 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); 1437 } 1438 1439 /** 1440 * ice_free_tcam_ent - free hardware TCAM entry 1441 * @hw: pointer to the HW struct 1442 * @blk: the block from which to free the TCAM entry 1443 * @tcam_idx: the TCAM entry to free 1444 * 1445 * This function frees an entry in a Profile ID TCAM for a specific block. 1446 */ 1447 static enum ice_status 1448 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) 1449 { 1450 u16 res_type; 1451 1452 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 1453 return ICE_ERR_PARAM; 1454 1455 return ice_free_hw_res(hw, res_type, 1, &tcam_idx); 1456 } 1457 1458 /** 1459 * ice_alloc_prof_id - allocate profile ID 1460 * @hw: pointer to the HW struct 1461 * @blk: the block to allocate the profile ID for 1462 * @prof_id: pointer to variable to receive the profile ID 1463 * 1464 * This function allocates a new profile ID, which also corresponds to a Field 1465 * Vector (Extraction Sequence) entry. 1466 */ 1467 static enum ice_status 1468 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) 1469 { 1470 enum ice_status status; 1471 u16 res_type; 1472 u16 get_prof; 1473 1474 if (!ice_prof_id_rsrc_type(blk, &res_type)) 1475 return ICE_ERR_PARAM; 1476 1477 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); 1478 if (!status) 1479 *prof_id = (u8)get_prof; 1480 1481 return status; 1482 } 1483 1484 /** 1485 * ice_free_prof_id - free profile ID 1486 * @hw: pointer to the HW struct 1487 * @blk: the block from which to free the profile ID 1488 * @prof_id: the profile ID to free 1489 * 1490 * This function frees a profile ID, which also corresponds to a Field Vector. 1491 */ 1492 static enum ice_status 1493 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 1494 { 1495 u16 tmp_prof_id = (u16)prof_id; 1496 u16 res_type; 1497 1498 if (!ice_prof_id_rsrc_type(blk, &res_type)) 1499 return ICE_ERR_PARAM; 1500 1501 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); 1502 } 1503 1504 /** 1505 * ice_prof_inc_ref - increment reference count for profile 1506 * @hw: pointer to the HW struct 1507 * @blk: the block from which to free the profile ID 1508 * @prof_id: the profile ID for which to increment the reference count 1509 */ 1510 static enum ice_status 1511 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 1512 { 1513 if (prof_id > hw->blk[blk].es.count) 1514 return ICE_ERR_PARAM; 1515 1516 hw->blk[blk].es.ref_count[prof_id]++; 1517 1518 return ICE_SUCCESS; 1519 } 1520 1521 /** 1522 * ice_write_es - write an extraction sequence to hardware 1523 * @hw: pointer to the HW struct 1524 * @blk: the block in which to write the extraction sequence 1525 * @prof_id: the profile ID to write 1526 * @fv: pointer to the extraction sequence to write - NULL to clear extraction 1527 */ 1528 static void 1529 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, 1530 struct ice_fv_word *fv) 1531 { 1532 u16 off; 1533 1534 off = prof_id * hw->blk[blk].es.fvw; 1535 if (!fv) { 1536 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw * 1537 sizeof(*fv), ICE_NONDMA_MEM); 1538 hw->blk[blk].es.written[prof_id] = false; 1539 } else { 1540 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * 1541 sizeof(*fv), ICE_NONDMA_TO_NONDMA); 1542 } 1543 } 1544 1545 /** 1546 * ice_prof_dec_ref - decrement reference count for profile 1547 * @hw: pointer to the HW struct 1548 * @blk: the block from which to free the profile ID 1549 * @prof_id: the profile ID for which to decrement the reference count 1550 */ 1551 static enum ice_status 1552 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 1553 { 1554 if (prof_id > hw->blk[blk].es.count) 1555 return ICE_ERR_PARAM; 1556 1557 if (hw->blk[blk].es.ref_count[prof_id] > 0) { 1558 if (!--hw->blk[blk].es.ref_count[prof_id]) { 1559 ice_write_es(hw, blk, prof_id, NULL); 1560 return ice_free_prof_id(hw, blk, prof_id); 1561 } 1562 } 1563 1564 return ICE_SUCCESS; 1565 } 1566 1567 /* Block / table section IDs */ 1568 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { 1569 /* SWITCH */ 1570 { ICE_SID_XLT1_SW, 1571 ICE_SID_XLT2_SW, 1572 ICE_SID_PROFID_TCAM_SW, 1573 ICE_SID_PROFID_REDIR_SW, 1574 ICE_SID_FLD_VEC_SW 1575 }, 1576 1577 /* ACL */ 1578 { ICE_SID_XLT1_ACL, 1579 ICE_SID_XLT2_ACL, 1580 ICE_SID_PROFID_TCAM_ACL, 1581 ICE_SID_PROFID_REDIR_ACL, 1582 ICE_SID_FLD_VEC_ACL 1583 }, 1584 1585 /* FD */ 1586 { ICE_SID_XLT1_FD, 1587 ICE_SID_XLT2_FD, 1588 ICE_SID_PROFID_TCAM_FD, 1589 ICE_SID_PROFID_REDIR_FD, 1590 ICE_SID_FLD_VEC_FD 1591 }, 1592 1593 /* RSS */ 1594 { ICE_SID_XLT1_RSS, 1595 ICE_SID_XLT2_RSS, 1596 ICE_SID_PROFID_TCAM_RSS, 1597 ICE_SID_PROFID_REDIR_RSS, 1598 ICE_SID_FLD_VEC_RSS 1599 }, 1600 1601 /* PE */ 1602 { ICE_SID_XLT1_PE, 1603 ICE_SID_XLT2_PE, 1604 ICE_SID_PROFID_TCAM_PE, 1605 ICE_SID_PROFID_REDIR_PE, 1606 ICE_SID_FLD_VEC_PE 1607 } 1608 }; 1609 1610 /** 1611 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables 1612 * @hw: pointer to the hardware structure 1613 * @blk: the HW block to initialize 1614 */ 1615 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) 1616 { 1617 u16 pt; 1618 1619 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { 1620 u8 ptg; 1621 1622 ptg = hw->blk[blk].xlt1.t[pt]; 1623 if (ptg != ICE_DEFAULT_PTG) { 1624 ice_ptg_alloc_val(hw, blk, ptg); 1625 ice_ptg_add_mv_ptype(hw, blk, pt, ptg); 1626 } 1627 } 1628 } 1629 1630 /** 1631 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables 1632 * @hw: pointer to the hardware structure 1633 * @blk: the HW block to initialize 1634 */ 1635 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) 1636 { 1637 u16 vsi; 1638 1639 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { 1640 u16 vsig; 1641 1642 vsig = hw->blk[blk].xlt2.t[vsi]; 1643 if (vsig) { 1644 ice_vsig_alloc_val(hw, blk, vsig); 1645 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 1646 /* no changes at this time, since this has been 1647 * initialized from the original package 1648 */ 1649 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 1650 } 1651 } 1652 } 1653 1654 /** 1655 * ice_init_sw_db - init software database from HW tables 1656 * @hw: pointer to the hardware structure 1657 */ 1658 static void ice_init_sw_db(struct ice_hw *hw) 1659 { 1660 u16 i; 1661 1662 for (i = 0; i < ICE_BLK_COUNT; i++) { 1663 ice_init_sw_xlt1_db(hw, (enum ice_block)i); 1664 ice_init_sw_xlt2_db(hw, (enum ice_block)i); 1665 } 1666 } 1667 1668 /** 1669 * ice_fill_tbl - Reads content of a single table type into database 1670 * @hw: pointer to the hardware structure 1671 * @block_id: Block ID of the table to copy 1672 * @sid: Section ID of the table to copy 1673 * 1674 * Will attempt to read the entire content of a given table of a single block 1675 * into the driver database. We assume that the buffer will always 1676 * be as large or larger than the data contained in the package. If 1677 * this condition is not met, there is most likely an error in the package 1678 * contents. 1679 */ 1680 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) 1681 { 1682 u32 dst_len, sect_len, offset = 0; 1683 struct ice_prof_redir_section *pr; 1684 struct ice_prof_id_section *pid; 1685 struct ice_xlt1_section *xlt1; 1686 struct ice_xlt2_section *xlt2; 1687 struct ice_sw_fv_section *es; 1688 struct ice_pkg_enum state; 1689 u8 *src, *dst; 1690 void *sect; 1691 1692 /* if the HW segment pointer is null then the first iteration of 1693 * ice_pkg_enum_section() will fail. In this case the HW tables will 1694 * not be filled and return success. 1695 */ 1696 if (!hw->seg) { 1697 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); 1698 return; 1699 } 1700 1701 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1702 1703 sect = ice_pkg_enum_section(hw->seg, &state, sid); 1704 1705 while (sect) { 1706 switch (sid) { 1707 case ICE_SID_XLT1_SW: 1708 case ICE_SID_XLT1_FD: 1709 case ICE_SID_XLT1_RSS: 1710 case ICE_SID_XLT1_ACL: 1711 case ICE_SID_XLT1_PE: 1712 xlt1 = (struct ice_xlt1_section *)sect; 1713 src = xlt1->value; 1714 sect_len = LE16_TO_CPU(xlt1->count) * 1715 sizeof(*hw->blk[block_id].xlt1.t); 1716 dst = hw->blk[block_id].xlt1.t; 1717 dst_len = hw->blk[block_id].xlt1.count * 1718 sizeof(*hw->blk[block_id].xlt1.t); 1719 break; 1720 case ICE_SID_XLT2_SW: 1721 case ICE_SID_XLT2_FD: 1722 case ICE_SID_XLT2_RSS: 1723 case ICE_SID_XLT2_ACL: 1724 case ICE_SID_XLT2_PE: 1725 xlt2 = (struct ice_xlt2_section *)sect; 1726 src = (_FORCE_ u8 *)xlt2->value; 1727 sect_len = LE16_TO_CPU(xlt2->count) * 1728 sizeof(*hw->blk[block_id].xlt2.t); 1729 dst = (u8 *)hw->blk[block_id].xlt2.t; 1730 dst_len = hw->blk[block_id].xlt2.count * 1731 sizeof(*hw->blk[block_id].xlt2.t); 1732 break; 1733 case ICE_SID_PROFID_TCAM_SW: 1734 case ICE_SID_PROFID_TCAM_FD: 1735 case ICE_SID_PROFID_TCAM_RSS: 1736 case ICE_SID_PROFID_TCAM_ACL: 1737 case ICE_SID_PROFID_TCAM_PE: 1738 pid = (struct ice_prof_id_section *)sect; 1739 src = (u8 *)pid->entry; 1740 sect_len = LE16_TO_CPU(pid->count) * 1741 sizeof(*hw->blk[block_id].prof.t); 1742 dst = (u8 *)hw->blk[block_id].prof.t; 1743 dst_len = hw->blk[block_id].prof.count * 1744 sizeof(*hw->blk[block_id].prof.t); 1745 break; 1746 case ICE_SID_PROFID_REDIR_SW: 1747 case ICE_SID_PROFID_REDIR_FD: 1748 case ICE_SID_PROFID_REDIR_RSS: 1749 case ICE_SID_PROFID_REDIR_ACL: 1750 case ICE_SID_PROFID_REDIR_PE: 1751 pr = (struct ice_prof_redir_section *)sect; 1752 src = pr->redir_value; 1753 sect_len = LE16_TO_CPU(pr->count) * 1754 sizeof(*hw->blk[block_id].prof_redir.t); 1755 dst = hw->blk[block_id].prof_redir.t; 1756 dst_len = hw->blk[block_id].prof_redir.count * 1757 sizeof(*hw->blk[block_id].prof_redir.t); 1758 break; 1759 case ICE_SID_FLD_VEC_SW: 1760 case ICE_SID_FLD_VEC_FD: 1761 case ICE_SID_FLD_VEC_RSS: 1762 case ICE_SID_FLD_VEC_ACL: 1763 case ICE_SID_FLD_VEC_PE: 1764 es = (struct ice_sw_fv_section *)sect; 1765 src = (u8 *)es->fv; 1766 sect_len = (u32)(LE16_TO_CPU(es->count) * 1767 hw->blk[block_id].es.fvw) * 1768 sizeof(*hw->blk[block_id].es.t); 1769 dst = (u8 *)hw->blk[block_id].es.t; 1770 dst_len = (u32)(hw->blk[block_id].es.count * 1771 hw->blk[block_id].es.fvw) * 1772 sizeof(*hw->blk[block_id].es.t); 1773 break; 1774 default: 1775 return; 1776 } 1777 1778 /* if the section offset exceeds destination length, terminate 1779 * table fill. 1780 */ 1781 if (offset > dst_len) 1782 return; 1783 1784 /* if the sum of section size and offset exceed destination size 1785 * then we are out of bounds of the HW table size for that PF. 1786 * Changing section length to fill the remaining table space 1787 * of that PF. 1788 */ 1789 if ((offset + sect_len) > dst_len) 1790 sect_len = dst_len - offset; 1791 1792 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); 1793 offset += sect_len; 1794 sect = ice_pkg_enum_section(NULL, &state, sid); 1795 } 1796 } 1797 1798 /** 1799 * ice_init_flow_profs - init flow profile locks and list heads 1800 * @hw: pointer to the hardware structure 1801 * @blk_idx: HW block index 1802 */ 1803 static 1804 void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) 1805 { 1806 ice_init_lock(&hw->fl_profs_locks[blk_idx]); 1807 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 1808 } 1809 1810 /** 1811 * ice_init_hw_tbls - init hardware table memory 1812 * @hw: pointer to the hardware structure 1813 */ 1814 enum ice_status ice_init_hw_tbls(struct ice_hw *hw) 1815 { 1816 u8 i; 1817 1818 ice_init_lock(&hw->rss_locks); 1819 INIT_LIST_HEAD(&hw->rss_list_head); 1820 for (i = 0; i < ICE_BLK_COUNT; i++) { 1821 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 1822 struct ice_prof_tcam *prof = &hw->blk[i].prof; 1823 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 1824 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 1825 struct ice_es *es = &hw->blk[i].es; 1826 u16 j; 1827 1828 if (hw->blk[i].is_list_init) 1829 continue; 1830 1831 ice_init_flow_profs(hw, i); 1832 ice_init_lock(&es->prof_map_lock); 1833 INIT_LIST_HEAD(&es->prof_map); 1834 hw->blk[i].is_list_init = true; 1835 1836 hw->blk[i].overwrite = blk_sizes[i].overwrite; 1837 es->reverse = blk_sizes[i].reverse; 1838 1839 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; 1840 xlt1->count = blk_sizes[i].xlt1; 1841 1842 xlt1->ptypes = (struct ice_ptg_ptype *) 1843 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); 1844 1845 if (!xlt1->ptypes) 1846 goto err; 1847 1848 xlt1->ptg_tbl = (struct ice_ptg_entry *) 1849 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); 1850 1851 if (!xlt1->ptg_tbl) 1852 goto err; 1853 1854 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); 1855 if (!xlt1->t) 1856 goto err; 1857 1858 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; 1859 xlt2->count = blk_sizes[i].xlt2; 1860 1861 xlt2->vsis = (struct ice_vsig_vsi *) 1862 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); 1863 1864 if (!xlt2->vsis) 1865 goto err; 1866 1867 xlt2->vsig_tbl = (struct ice_vsig_entry *) 1868 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); 1869 if (!xlt2->vsig_tbl) 1870 goto err; 1871 1872 for (j = 0; j < xlt2->count; j++) 1873 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); 1874 1875 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); 1876 if (!xlt2->t) 1877 goto err; 1878 1879 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; 1880 prof->count = blk_sizes[i].prof_tcam; 1881 prof->max_prof_id = blk_sizes[i].prof_id; 1882 prof->cdid_bits = blk_sizes[i].prof_cdid_bits; 1883 prof->t = (struct ice_prof_tcam_entry *) 1884 ice_calloc(hw, prof->count, sizeof(*prof->t)); 1885 1886 if (!prof->t) 1887 goto err; 1888 1889 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; 1890 prof_redir->count = blk_sizes[i].prof_redir; 1891 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, 1892 sizeof(*prof_redir->t)); 1893 1894 if (!prof_redir->t) 1895 goto err; 1896 1897 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; 1898 es->count = blk_sizes[i].es; 1899 es->fvw = blk_sizes[i].fvw; 1900 es->t = (struct ice_fv_word *) 1901 ice_calloc(hw, (u32)(es->count * es->fvw), 1902 sizeof(*es->t)); 1903 if (!es->t) 1904 goto err; 1905 1906 es->ref_count = (u16 *) 1907 ice_calloc(hw, es->count, sizeof(*es->ref_count)); 1908 1909 if (!es->ref_count) 1910 goto err; 1911 1912 es->written = (u8 *) 1913 ice_calloc(hw, es->count, sizeof(*es->written)); 1914 1915 if (!es->written) 1916 goto err; 1917 1918 } 1919 return ICE_SUCCESS; 1920 1921 err: 1922 ice_free_hw_tbls(hw); 1923 return ICE_ERR_NO_MEMORY; 1924 } 1925 1926 /** 1927 * ice_fill_blk_tbls - Read package context for tables 1928 * @hw: pointer to the hardware structure 1929 * 1930 * Reads the current package contents and populates the driver 1931 * database with the data iteratively for all advanced feature 1932 * blocks. Assume that the HW tables have been allocated. 1933 */ 1934 void ice_fill_blk_tbls(struct ice_hw *hw) 1935 { 1936 u8 i; 1937 1938 for (i = 0; i < ICE_BLK_COUNT; i++) { 1939 enum ice_block blk_id = (enum ice_block)i; 1940 1941 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); 1942 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); 1943 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); 1944 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); 1945 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); 1946 } 1947 1948 ice_init_sw_db(hw); 1949 } 1950 1951 /** 1952 * ice_free_prof_map - free profile map 1953 * @hw: pointer to the hardware structure 1954 * @blk_idx: HW block index 1955 */ 1956 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) 1957 { 1958 struct ice_es *es = &hw->blk[blk_idx].es; 1959 struct ice_prof_map *del, *tmp; 1960 1961 ice_acquire_lock(&es->prof_map_lock); 1962 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map, 1963 ice_prof_map, list) { 1964 LIST_DEL(&del->list); 1965 ice_free(hw, del); 1966 } 1967 INIT_LIST_HEAD(&es->prof_map); 1968 ice_release_lock(&es->prof_map_lock); 1969 } 1970 1971 /** 1972 * ice_free_flow_profs - free flow profile entries 1973 * @hw: pointer to the hardware structure 1974 * @blk_idx: HW block index 1975 */ 1976 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) 1977 { 1978 struct ice_flow_prof *p, *tmp; 1979 1980 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]); 1981 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx], 1982 ice_flow_prof, l_entry) { 1983 LIST_DEL(&p->l_entry); 1984 1985 ice_free(hw, p); 1986 } 1987 ice_release_lock(&hw->fl_profs_locks[blk_idx]); 1988 1989 /* if driver is in reset and tables are being cleared 1990 * re-initialize the flow profile list heads 1991 */ 1992 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 1993 } 1994 1995 /** 1996 * ice_free_vsig_tbl - free complete VSIG table entries 1997 * @hw: pointer to the hardware structure 1998 * @blk: the HW block on which to free the VSIG table entries 1999 */ 2000 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) 2001 { 2002 u16 i; 2003 2004 if (!hw->blk[blk].xlt2.vsig_tbl) 2005 return; 2006 2007 for (i = 1; i < ICE_MAX_VSIGS; i++) 2008 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) 2009 ice_vsig_free(hw, blk, i); 2010 } 2011 2012 /** 2013 * ice_free_hw_tbls - free hardware table memory 2014 * @hw: pointer to the hardware structure 2015 */ 2016 void ice_free_hw_tbls(struct ice_hw *hw) 2017 { 2018 struct ice_rss_cfg *r, *rt; 2019 u8 i; 2020 2021 for (i = 0; i < ICE_BLK_COUNT; i++) { 2022 if (hw->blk[i].is_list_init) { 2023 struct ice_es *es = &hw->blk[i].es; 2024 2025 ice_free_prof_map(hw, i); 2026 ice_destroy_lock(&es->prof_map_lock); 2027 2028 ice_free_flow_profs(hw, i); 2029 ice_destroy_lock(&hw->fl_profs_locks[i]); 2030 2031 hw->blk[i].is_list_init = false; 2032 } 2033 ice_free_vsig_tbl(hw, (enum ice_block)i); 2034 ice_free(hw, hw->blk[i].xlt1.ptypes); 2035 ice_free(hw, hw->blk[i].xlt1.ptg_tbl); 2036 ice_free(hw, hw->blk[i].xlt1.t); 2037 ice_free(hw, hw->blk[i].xlt2.t); 2038 ice_free(hw, hw->blk[i].xlt2.vsig_tbl); 2039 ice_free(hw, hw->blk[i].xlt2.vsis); 2040 ice_free(hw, hw->blk[i].prof.t); 2041 ice_free(hw, hw->blk[i].prof_redir.t); 2042 ice_free(hw, hw->blk[i].es.t); 2043 ice_free(hw, hw->blk[i].es.ref_count); 2044 ice_free(hw, hw->blk[i].es.written); 2045 } 2046 2047 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head, 2048 ice_rss_cfg, l_entry) { 2049 LIST_DEL(&r->l_entry); 2050 ice_free(hw, r); 2051 } 2052 ice_destroy_lock(&hw->rss_locks); 2053 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM); 2054 } 2055 2056 /** 2057 * ice_clear_hw_tbls - clear HW tables and flow profiles 2058 * @hw: pointer to the hardware structure 2059 */ 2060 void ice_clear_hw_tbls(struct ice_hw *hw) 2061 { 2062 u8 i; 2063 2064 for (i = 0; i < ICE_BLK_COUNT; i++) { 2065 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 2066 struct ice_prof_tcam *prof = &hw->blk[i].prof; 2067 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 2068 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 2069 struct ice_es *es = &hw->blk[i].es; 2070 2071 if (hw->blk[i].is_list_init) { 2072 ice_free_prof_map(hw, i); 2073 ice_free_flow_profs(hw, i); 2074 } 2075 2076 ice_free_vsig_tbl(hw, (enum ice_block)i); 2077 2078 if (xlt1->ptypes) 2079 ice_memset(xlt1->ptypes, 0, 2080 xlt1->count * sizeof(*xlt1->ptypes), 2081 ICE_NONDMA_MEM); 2082 2083 if (xlt1->ptg_tbl) 2084 ice_memset(xlt1->ptg_tbl, 0, 2085 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), 2086 ICE_NONDMA_MEM); 2087 2088 if (xlt1->t) 2089 ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), 2090 ICE_NONDMA_MEM); 2091 2092 if (xlt2->vsis) 2093 ice_memset(xlt2->vsis, 0, 2094 xlt2->count * sizeof(*xlt2->vsis), 2095 ICE_NONDMA_MEM); 2096 2097 if (xlt2->vsig_tbl) 2098 ice_memset(xlt2->vsig_tbl, 0, 2099 xlt2->count * sizeof(*xlt2->vsig_tbl), 2100 ICE_NONDMA_MEM); 2101 2102 if (xlt2->t) 2103 ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), 2104 ICE_NONDMA_MEM); 2105 2106 if (prof->t) 2107 ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), 2108 ICE_NONDMA_MEM); 2109 2110 if (prof_redir->t) 2111 ice_memset(prof_redir->t, 0, 2112 prof_redir->count * sizeof(*prof_redir->t), 2113 ICE_NONDMA_MEM); 2114 2115 if (es->t) 2116 ice_memset(es->t, 0, 2117 es->count * sizeof(*es->t) * es->fvw, 2118 ICE_NONDMA_MEM); 2119 2120 if (es->ref_count) 2121 ice_memset(es->ref_count, 0, 2122 es->count * sizeof(*es->ref_count), 2123 ICE_NONDMA_MEM); 2124 2125 if (es->written) 2126 ice_memset(es->written, 0, 2127 es->count * sizeof(*es->written), 2128 ICE_NONDMA_MEM); 2129 2130 } 2131 } 2132 2133 /** 2134 * ice_prof_gen_key - generate profile ID key 2135 * @hw: pointer to the HW struct 2136 * @blk: the block in which to write profile ID to 2137 * @ptg: packet type group (PTG) portion of key 2138 * @vsig: VSIG portion of key 2139 * @cdid: CDID portion of key 2140 * @flags: flag portion of key 2141 * @vl_msk: valid mask 2142 * @dc_msk: don't care mask 2143 * @nm_msk: never match mask 2144 * @key: output of profile ID key 2145 */ 2146 static enum ice_status 2147 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, 2148 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 2149 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], 2150 u8 key[ICE_TCAM_KEY_SZ]) 2151 { 2152 struct ice_prof_id_key inkey; 2153 2154 inkey.xlt1 = ptg; 2155 inkey.xlt2_cdid = CPU_TO_LE16(vsig); 2156 inkey.flags = CPU_TO_LE16(flags); 2157 2158 switch (hw->blk[blk].prof.cdid_bits) { 2159 case 0: 2160 break; 2161 case 2: 2162 #define ICE_CD_2_M 0xC000U 2163 #define ICE_CD_2_S 14 2164 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M); 2165 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S); 2166 break; 2167 case 4: 2168 #define ICE_CD_4_M 0xF000U 2169 #define ICE_CD_4_S 12 2170 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M); 2171 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S); 2172 break; 2173 case 8: 2174 #define ICE_CD_8_M 0xFF00U 2175 #define ICE_CD_8_S 16 2176 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M); 2177 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S); 2178 break; 2179 default: 2180 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); 2181 break; 2182 } 2183 2184 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, 2185 nm_msk, 0, ICE_TCAM_KEY_SZ / 2); 2186 } 2187 2188 /** 2189 * ice_tcam_write_entry - write TCAM entry 2190 * @hw: pointer to the HW struct 2191 * @blk: the block in which to write profile ID to 2192 * @idx: the entry index to write to 2193 * @prof_id: profile ID 2194 * @ptg: packet type group (PTG) portion of key 2195 * @vsig: VSIG portion of key 2196 * @cdid: CDID portion of key 2197 * @flags: flag portion of key 2198 * @vl_msk: valid mask 2199 * @dc_msk: don't care mask 2200 * @nm_msk: never match mask 2201 */ 2202 static enum ice_status 2203 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, 2204 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, 2205 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 2206 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], 2207 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) 2208 { 2209 struct ice_prof_tcam_entry; 2210 enum ice_status status; 2211 2212 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, 2213 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); 2214 if (!status) { 2215 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx); 2216 hw->blk[blk].prof.t[idx].prof_id = prof_id; 2217 } 2218 2219 return status; 2220 } 2221 2222 /** 2223 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG 2224 * @hw: pointer to the hardware structure 2225 * @blk: HW block 2226 * @vsig: VSIG to query 2227 * @refs: pointer to variable to receive the reference count 2228 */ 2229 static enum ice_status 2230 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) 2231 { 2232 u16 idx = vsig & ICE_VSIG_IDX_M; 2233 struct ice_vsig_vsi *ptr; 2234 2235 *refs = 0; 2236 2237 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 2238 return ICE_ERR_DOES_NOT_EXIST; 2239 2240 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2241 while (ptr) { 2242 (*refs)++; 2243 ptr = ptr->next_vsi; 2244 } 2245 2246 return ICE_SUCCESS; 2247 } 2248 2249 /** 2250 * ice_has_prof_vsig - check to see if VSIG has a specific profile 2251 * @hw: pointer to the hardware structure 2252 * @blk: HW block 2253 * @vsig: VSIG to check against 2254 * @hdl: profile handle 2255 */ 2256 static bool 2257 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) 2258 { 2259 u16 idx = vsig & ICE_VSIG_IDX_M; 2260 struct ice_vsig_prof *ent; 2261 2262 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2263 ice_vsig_prof, list) 2264 if (ent->profile_cookie == hdl) 2265 return true; 2266 2267 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", 2268 vsig); 2269 return false; 2270 } 2271 2272 /** 2273 * ice_prof_bld_es - build profile ID extraction sequence changes 2274 * @hw: pointer to the HW struct 2275 * @blk: hardware block 2276 * @bld: the update package buffer build to add to 2277 * @chgs: the list of changes to make in hardware 2278 */ 2279 static enum ice_status 2280 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, 2281 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) 2282 { 2283 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); 2284 struct ice_chs_chg *tmp; 2285 2286 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 2287 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { 2288 u16 off = tmp->prof_id * hw->blk[blk].es.fvw; 2289 struct ice_pkg_es *p; 2290 u32 id; 2291 2292 id = ice_sect_id(blk, ICE_VEC_TBL); 2293 p = (struct ice_pkg_es *) 2294 ice_pkg_buf_alloc_section(bld, id, 2295 ice_struct_size(p, es, 2296 1) + 2297 vec_size - 2298 sizeof(p->es[0])); 2299 2300 if (!p) 2301 return ICE_ERR_MAX_LIMIT; 2302 2303 p->count = CPU_TO_LE16(1); 2304 p->offset = CPU_TO_LE16(tmp->prof_id); 2305 2306 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size, 2307 ICE_NONDMA_TO_NONDMA); 2308 } 2309 2310 return ICE_SUCCESS; 2311 } 2312 2313 /** 2314 * ice_prof_bld_tcam - build profile ID TCAM changes 2315 * @hw: pointer to the HW struct 2316 * @blk: hardware block 2317 * @bld: the update package buffer build to add to 2318 * @chgs: the list of changes to make in hardware 2319 */ 2320 static enum ice_status 2321 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, 2322 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) 2323 { 2324 struct ice_chs_chg *tmp; 2325 2326 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 2327 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { 2328 struct ice_prof_id_section *p; 2329 u32 id; 2330 2331 id = ice_sect_id(blk, ICE_PROF_TCAM); 2332 p = (struct ice_prof_id_section *) 2333 ice_pkg_buf_alloc_section(bld, id, 2334 ice_struct_size(p, 2335 entry, 2336 1)); 2337 2338 if (!p) 2339 return ICE_ERR_MAX_LIMIT; 2340 2341 p->count = CPU_TO_LE16(1); 2342 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx); 2343 p->entry[0].prof_id = tmp->prof_id; 2344 2345 ice_memcpy(p->entry[0].key, 2346 &hw->blk[blk].prof.t[tmp->tcam_idx].key, 2347 sizeof(hw->blk[blk].prof.t->key), 2348 ICE_NONDMA_TO_NONDMA); 2349 } 2350 2351 return ICE_SUCCESS; 2352 } 2353 2354 /** 2355 * ice_prof_bld_xlt1 - build XLT1 changes 2356 * @blk: hardware block 2357 * @bld: the update package buffer build to add to 2358 * @chgs: the list of changes to make in hardware 2359 */ 2360 static enum ice_status 2361 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, 2362 struct LIST_HEAD_TYPE *chgs) 2363 { 2364 struct ice_chs_chg *tmp; 2365 2366 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 2367 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { 2368 struct ice_xlt1_section *p; 2369 u32 id; 2370 2371 id = ice_sect_id(blk, ICE_XLT1); 2372 p = (struct ice_xlt1_section *) 2373 ice_pkg_buf_alloc_section(bld, id, 2374 ice_struct_size(p, 2375 value, 2376 1)); 2377 2378 if (!p) 2379 return ICE_ERR_MAX_LIMIT; 2380 2381 p->count = CPU_TO_LE16(1); 2382 p->offset = CPU_TO_LE16(tmp->ptype); 2383 p->value[0] = tmp->ptg; 2384 } 2385 2386 return ICE_SUCCESS; 2387 } 2388 2389 /** 2390 * ice_prof_bld_xlt2 - build XLT2 changes 2391 * @blk: hardware block 2392 * @bld: the update package buffer build to add to 2393 * @chgs: the list of changes to make in hardware 2394 */ 2395 static enum ice_status 2396 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, 2397 struct LIST_HEAD_TYPE *chgs) 2398 { 2399 struct ice_chs_chg *tmp; 2400 2401 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { 2402 struct ice_xlt2_section *p; 2403 u32 id; 2404 2405 switch (tmp->type) { 2406 case ICE_VSIG_ADD: 2407 case ICE_VSI_MOVE: 2408 case ICE_VSIG_REM: 2409 id = ice_sect_id(blk, ICE_XLT2); 2410 p = (struct ice_xlt2_section *) 2411 ice_pkg_buf_alloc_section(bld, id, 2412 ice_struct_size(p, 2413 value, 2414 1)); 2415 2416 if (!p) 2417 return ICE_ERR_MAX_LIMIT; 2418 2419 p->count = CPU_TO_LE16(1); 2420 p->offset = CPU_TO_LE16(tmp->vsi); 2421 p->value[0] = CPU_TO_LE16(tmp->vsig); 2422 break; 2423 default: 2424 break; 2425 } 2426 } 2427 2428 return ICE_SUCCESS; 2429 } 2430 2431 /** 2432 * ice_upd_prof_hw - update hardware using the change list 2433 * @hw: pointer to the HW struct 2434 * @blk: hardware block 2435 * @chgs: the list of changes to make in hardware 2436 */ 2437 static enum ice_status 2438 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, 2439 struct LIST_HEAD_TYPE *chgs) 2440 { 2441 struct ice_buf_build *b; 2442 struct ice_chs_chg *tmp; 2443 enum ice_status status; 2444 u16 pkg_sects; 2445 u16 xlt1 = 0; 2446 u16 xlt2 = 0; 2447 u16 tcam = 0; 2448 u16 es = 0; 2449 u16 sects; 2450 2451 /* count number of sections we need */ 2452 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { 2453 switch (tmp->type) { 2454 case ICE_PTG_ES_ADD: 2455 if (tmp->add_ptg) 2456 xlt1++; 2457 if (tmp->add_prof) 2458 es++; 2459 break; 2460 case ICE_TCAM_ADD: 2461 tcam++; 2462 break; 2463 case ICE_VSIG_ADD: 2464 case ICE_VSI_MOVE: 2465 case ICE_VSIG_REM: 2466 xlt2++; 2467 break; 2468 default: 2469 break; 2470 } 2471 } 2472 sects = xlt1 + xlt2 + tcam + es; 2473 2474 if (!sects) 2475 return ICE_SUCCESS; 2476 2477 /* Build update package buffer */ 2478 b = ice_pkg_buf_alloc(hw); 2479 if (!b) 2480 return ICE_ERR_NO_MEMORY; 2481 2482 status = ice_pkg_buf_reserve_section(b, sects); 2483 if (status) 2484 goto error_tmp; 2485 2486 /* Preserve order of table update: ES, TCAM, PTG, VSIG */ 2487 if (es) { 2488 status = ice_prof_bld_es(hw, blk, b, chgs); 2489 if (status) 2490 goto error_tmp; 2491 } 2492 2493 if (tcam) { 2494 status = ice_prof_bld_tcam(hw, blk, b, chgs); 2495 if (status) 2496 goto error_tmp; 2497 } 2498 2499 if (xlt1) { 2500 status = ice_prof_bld_xlt1(blk, b, chgs); 2501 if (status) 2502 goto error_tmp; 2503 } 2504 2505 if (xlt2) { 2506 status = ice_prof_bld_xlt2(blk, b, chgs); 2507 if (status) 2508 goto error_tmp; 2509 } 2510 2511 /* After package buffer build check if the section count in buffer is 2512 * non-zero and matches the number of sections detected for package 2513 * update. 2514 */ 2515 pkg_sects = ice_pkg_buf_get_active_sections(b); 2516 if (!pkg_sects || pkg_sects != sects) { 2517 status = ICE_ERR_INVAL_SIZE; 2518 goto error_tmp; 2519 } 2520 2521 /* update package */ 2522 status = ice_update_pkg(hw, ice_pkg_buf(b), 1); 2523 if (status == ICE_ERR_AQ_ERROR) 2524 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); 2525 2526 error_tmp: 2527 ice_pkg_buf_free(hw, b); 2528 return status; 2529 } 2530 2531 /** 2532 * ice_add_prof - add profile 2533 * @hw: pointer to the HW struct 2534 * @blk: hardware block 2535 * @id: profile tracking ID 2536 * @ptypes: bitmap indicating ptypes (ICE_FLOW_PTYPE_MAX bits) 2537 * @es: extraction sequence (length of array is determined by the block) 2538 * 2539 * This function registers a profile, which matches a set of PTGs with a 2540 * particular extraction sequence. While the hardware profile is allocated 2541 * it will not be written until the first call to ice_add_flow that specifies 2542 * the ID value used here. 2543 */ 2544 enum ice_status 2545 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, 2546 ice_bitmap_t *ptypes, struct ice_fv_word *es) 2547 { 2548 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); 2549 struct ice_prof_map *prof; 2550 enum ice_status status; 2551 u8 prof_id; 2552 u16 ptype; 2553 2554 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); 2555 2556 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 2557 2558 /* search for existing profile */ 2559 status = ice_find_prof_id(hw, blk, es, &prof_id); 2560 if (status) { 2561 /* allocate profile ID */ 2562 status = ice_alloc_prof_id(hw, blk, &prof_id); 2563 if (status) 2564 goto err_ice_add_prof; 2565 2566 /* and write new es */ 2567 ice_write_es(hw, blk, prof_id, es); 2568 } 2569 2570 ice_prof_inc_ref(hw, blk, prof_id); 2571 2572 /* add profile info */ 2573 2574 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof)); 2575 if (!prof) 2576 goto err_ice_add_prof; 2577 2578 prof->profile_cookie = id; 2579 prof->prof_id = prof_id; 2580 prof->ptg_cnt = 0; 2581 prof->context = 0; 2582 2583 /* build list of ptgs */ 2584 ice_for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) { 2585 u8 ptg; 2586 2587 /* The package should place all ptypes in a non-zero 2588 * PTG, so the following call should never fail. 2589 */ 2590 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) 2591 continue; 2592 2593 /* If PTG is already added, skip and continue */ 2594 if (ice_is_bit_set(ptgs_used, ptg)) 2595 continue; 2596 2597 ice_set_bit(ptg, ptgs_used); 2598 prof->ptg[prof->ptg_cnt] = ptg; 2599 2600 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) 2601 break; 2602 } 2603 2604 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map); 2605 status = ICE_SUCCESS; 2606 2607 err_ice_add_prof: 2608 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 2609 return status; 2610 } 2611 2612 /** 2613 * ice_search_prof_id - Search for a profile tracking ID 2614 * @hw: pointer to the HW struct 2615 * @blk: hardware block 2616 * @id: profile tracking ID 2617 * 2618 * This will search for a profile tracking ID which was previously added. 2619 * The profile map lock should be held before calling this function. 2620 */ 2621 struct ice_prof_map * 2622 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) 2623 { 2624 struct ice_prof_map *entry = NULL; 2625 struct ice_prof_map *map; 2626 2627 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list) 2628 if (map->profile_cookie == id) { 2629 entry = map; 2630 break; 2631 } 2632 2633 return entry; 2634 } 2635 2636 /** 2637 * ice_set_prof_context - Set context for a given profile 2638 * @hw: pointer to the HW struct 2639 * @blk: hardware block 2640 * @id: profile tracking ID 2641 * @cntxt: context 2642 */ 2643 enum ice_status 2644 ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt) 2645 { 2646 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2647 struct ice_prof_map *entry; 2648 2649 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 2650 entry = ice_search_prof_id(hw, blk, id); 2651 if (entry) { 2652 entry->context = cntxt; 2653 status = ICE_SUCCESS; 2654 } 2655 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 2656 return status; 2657 } 2658 2659 /** 2660 * ice_get_prof_context - Get context for a given profile 2661 * @hw: pointer to the HW struct 2662 * @blk: hardware block 2663 * @id: profile tracking ID 2664 * @cntxt: pointer to variable to receive the context 2665 */ 2666 enum ice_status 2667 ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt) 2668 { 2669 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2670 struct ice_prof_map *entry; 2671 2672 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 2673 entry = ice_search_prof_id(hw, blk, id); 2674 if (entry) { 2675 *cntxt = entry->context; 2676 status = ICE_SUCCESS; 2677 } 2678 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 2679 return status; 2680 } 2681 2682 /** 2683 * ice_vsig_prof_id_count - count profiles in a VSIG 2684 * @hw: pointer to the HW struct 2685 * @blk: hardware block 2686 * @vsig: VSIG to remove the profile from 2687 */ 2688 static u16 2689 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) 2690 { 2691 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; 2692 struct ice_vsig_prof *p; 2693 2694 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2695 ice_vsig_prof, list) 2696 count++; 2697 2698 return count; 2699 } 2700 2701 /** 2702 * ice_rel_tcam_idx - release a TCAM index 2703 * @hw: pointer to the HW struct 2704 * @blk: hardware block 2705 * @idx: the index to release 2706 */ 2707 static enum ice_status 2708 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) 2709 { 2710 /* Masks to invoke a never match entry */ 2711 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 2712 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; 2713 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; 2714 enum ice_status status; 2715 2716 /* write the TCAM entry */ 2717 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, 2718 dc_msk, nm_msk); 2719 if (status) 2720 return status; 2721 2722 /* release the TCAM entry */ 2723 status = ice_free_tcam_ent(hw, blk, idx); 2724 2725 return status; 2726 } 2727 2728 /** 2729 * ice_rem_prof_id - remove one profile from a VSIG 2730 * @hw: pointer to the HW struct 2731 * @blk: hardware block 2732 * @prof: pointer to profile structure to remove 2733 */ 2734 static enum ice_status 2735 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, 2736 struct ice_vsig_prof *prof) 2737 { 2738 enum ice_status status; 2739 u16 i; 2740 2741 for (i = 0; i < prof->tcam_count; i++) 2742 if (prof->tcam[i].in_use) { 2743 prof->tcam[i].in_use = false; 2744 status = ice_rel_tcam_idx(hw, blk, 2745 prof->tcam[i].tcam_idx); 2746 if (status) 2747 return ICE_ERR_HW_TABLE; 2748 } 2749 2750 return ICE_SUCCESS; 2751 } 2752 2753 /** 2754 * ice_rem_vsig - remove VSIG 2755 * @hw: pointer to the HW struct 2756 * @blk: hardware block 2757 * @vsig: the VSIG to remove 2758 * @chg: the change list 2759 */ 2760 static enum ice_status 2761 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 2762 struct LIST_HEAD_TYPE *chg) 2763 { 2764 u16 idx = vsig & ICE_VSIG_IDX_M; 2765 struct ice_vsig_vsi *vsi_cur; 2766 struct ice_vsig_prof *d, *t; 2767 2768 /* remove TCAM entries */ 2769 LIST_FOR_EACH_ENTRY_SAFE(d, t, 2770 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2771 ice_vsig_prof, list) { 2772 enum ice_status status; 2773 2774 status = ice_rem_prof_id(hw, blk, d); 2775 if (status) 2776 return status; 2777 2778 LIST_DEL(&d->list); 2779 ice_free(hw, d); 2780 } 2781 2782 /* Move all VSIS associated with this VSIG to the default VSIG */ 2783 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2784 /* If the VSIG has at least 1 VSI then iterate through the list 2785 * and remove the VSIs before deleting the group. 2786 */ 2787 if (vsi_cur) 2788 do { 2789 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 2790 struct ice_chs_chg *p; 2791 2792 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 2793 if (!p) 2794 return ICE_ERR_NO_MEMORY; 2795 2796 p->type = ICE_VSIG_REM; 2797 p->orig_vsig = vsig; 2798 p->vsig = ICE_DEFAULT_VSIG; 2799 p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis); 2800 2801 LIST_ADD(&p->list_entry, chg); 2802 2803 vsi_cur = tmp; 2804 } while (vsi_cur); 2805 2806 return ice_vsig_free(hw, blk, vsig); 2807 } 2808 2809 /** 2810 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG 2811 * @hw: pointer to the HW struct 2812 * @blk: hardware block 2813 * @vsig: VSIG to remove the profile from 2814 * @hdl: profile handle indicating which profile to remove 2815 * @chg: list to receive a record of changes 2816 */ 2817 static enum ice_status 2818 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 2819 struct LIST_HEAD_TYPE *chg) 2820 { 2821 u16 idx = vsig & ICE_VSIG_IDX_M; 2822 struct ice_vsig_prof *p, *t; 2823 2824 LIST_FOR_EACH_ENTRY_SAFE(p, t, 2825 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2826 ice_vsig_prof, list) 2827 if (p->profile_cookie == hdl) { 2828 enum ice_status status; 2829 2830 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) 2831 /* this is the last profile, remove the VSIG */ 2832 return ice_rem_vsig(hw, blk, vsig, chg); 2833 2834 status = ice_rem_prof_id(hw, blk, p); 2835 if (!status) { 2836 LIST_DEL(&p->list); 2837 ice_free(hw, p); 2838 } 2839 return status; 2840 } 2841 2842 return ICE_ERR_DOES_NOT_EXIST; 2843 } 2844 2845 /** 2846 * ice_rem_flow_all - remove all flows with a particular profile 2847 * @hw: pointer to the HW struct 2848 * @blk: hardware block 2849 * @id: profile tracking ID 2850 */ 2851 static enum ice_status 2852 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) 2853 { 2854 struct ice_chs_chg *del, *tmp; 2855 enum ice_status status; 2856 struct LIST_HEAD_TYPE chg; 2857 u16 i; 2858 2859 INIT_LIST_HEAD(&chg); 2860 2861 for (i = 1; i < ICE_MAX_VSIGS; i++) 2862 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { 2863 if (ice_has_prof_vsig(hw, blk, i, id)) { 2864 status = ice_rem_prof_id_vsig(hw, blk, i, id, 2865 &chg); 2866 if (status) 2867 goto err_ice_rem_flow_all; 2868 } 2869 } 2870 2871 status = ice_upd_prof_hw(hw, blk, &chg); 2872 2873 err_ice_rem_flow_all: 2874 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 2875 LIST_DEL(&del->list_entry); 2876 ice_free(hw, del); 2877 } 2878 2879 return status; 2880 } 2881 2882 /** 2883 * ice_rem_prof - remove profile 2884 * @hw: pointer to the HW struct 2885 * @blk: hardware block 2886 * @id: profile tracking ID 2887 * 2888 * This will remove the profile specified by the ID parameter, which was 2889 * previously created through ice_add_prof. If any existing entries 2890 * are associated with this profile, they will be removed as well. 2891 */ 2892 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) 2893 { 2894 struct ice_prof_map *pmap; 2895 enum ice_status status; 2896 2897 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 2898 2899 pmap = ice_search_prof_id(hw, blk, id); 2900 if (!pmap) { 2901 status = ICE_ERR_DOES_NOT_EXIST; 2902 goto err_ice_rem_prof; 2903 } 2904 2905 /* remove all flows with this profile */ 2906 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); 2907 if (status) 2908 goto err_ice_rem_prof; 2909 2910 /* dereference profile, and possibly remove */ 2911 ice_prof_dec_ref(hw, blk, pmap->prof_id); 2912 2913 LIST_DEL(&pmap->list); 2914 ice_free(hw, pmap); 2915 2916 err_ice_rem_prof: 2917 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 2918 return status; 2919 } 2920 2921 /** 2922 * ice_get_prof - get profile 2923 * @hw: pointer to the HW struct 2924 * @blk: hardware block 2925 * @hdl: profile handle 2926 * @chg: change list 2927 */ 2928 static enum ice_status 2929 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, 2930 struct LIST_HEAD_TYPE *chg) 2931 { 2932 enum ice_status status = ICE_SUCCESS; 2933 struct ice_prof_map *map; 2934 struct ice_chs_chg *p; 2935 u16 i; 2936 2937 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 2938 /* Get the details on the profile specified by the handle ID */ 2939 map = ice_search_prof_id(hw, blk, hdl); 2940 if (!map) { 2941 status = ICE_ERR_DOES_NOT_EXIST; 2942 goto err_ice_get_prof; 2943 } 2944 2945 for (i = 0; i < map->ptg_cnt; i++) 2946 if (!hw->blk[blk].es.written[map->prof_id]) { 2947 /* add ES to change list */ 2948 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 2949 if (!p) { 2950 status = ICE_ERR_NO_MEMORY; 2951 goto err_ice_get_prof; 2952 } 2953 2954 p->type = ICE_PTG_ES_ADD; 2955 p->ptype = 0; 2956 p->ptg = map->ptg[i]; 2957 p->add_ptg = 0; 2958 2959 p->add_prof = 1; 2960 p->prof_id = map->prof_id; 2961 2962 hw->blk[blk].es.written[map->prof_id] = true; 2963 2964 LIST_ADD(&p->list_entry, chg); 2965 } 2966 2967 err_ice_get_prof: 2968 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 2969 /* let caller clean up the change list */ 2970 return status; 2971 } 2972 2973 /** 2974 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG 2975 * @hw: pointer to the HW struct 2976 * @blk: hardware block 2977 * @vsig: VSIG from which to copy the list 2978 * @lst: output list 2979 * 2980 * This routine makes a copy of the list of profiles in the specified VSIG. 2981 */ 2982 static enum ice_status 2983 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 2984 struct LIST_HEAD_TYPE *lst) 2985 { 2986 struct ice_vsig_prof *ent1, *ent2; 2987 u16 idx = vsig & ICE_VSIG_IDX_M; 2988 2989 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2990 ice_vsig_prof, list) { 2991 struct ice_vsig_prof *p; 2992 2993 /* copy to the input list */ 2994 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p), 2995 ICE_NONDMA_TO_NONDMA); 2996 if (!p) 2997 goto err_ice_get_profs_vsig; 2998 2999 LIST_ADD_TAIL(&p->list, lst); 3000 } 3001 3002 return ICE_SUCCESS; 3003 3004 err_ice_get_profs_vsig: 3005 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) { 3006 LIST_DEL(&ent1->list); 3007 ice_free(hw, ent1); 3008 } 3009 3010 return ICE_ERR_NO_MEMORY; 3011 } 3012 3013 /** 3014 * ice_add_prof_to_lst - add profile entry to a list 3015 * @hw: pointer to the HW struct 3016 * @blk: hardware block 3017 * @lst: the list to be added to 3018 * @hdl: profile handle of entry to add 3019 */ 3020 static enum ice_status 3021 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, 3022 struct LIST_HEAD_TYPE *lst, u64 hdl) 3023 { 3024 enum ice_status status = ICE_SUCCESS; 3025 struct ice_prof_map *map; 3026 struct ice_vsig_prof *p; 3027 u16 i; 3028 3029 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 3030 map = ice_search_prof_id(hw, blk, hdl); 3031 if (!map) { 3032 status = ICE_ERR_DOES_NOT_EXIST; 3033 goto err_ice_add_prof_to_lst; 3034 } 3035 3036 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p)); 3037 if (!p) { 3038 status = ICE_ERR_NO_MEMORY; 3039 goto err_ice_add_prof_to_lst; 3040 } 3041 3042 p->profile_cookie = map->profile_cookie; 3043 p->prof_id = map->prof_id; 3044 p->tcam_count = map->ptg_cnt; 3045 3046 for (i = 0; i < map->ptg_cnt; i++) { 3047 p->tcam[i].prof_id = map->prof_id; 3048 p->tcam[i].tcam_idx = ICE_INVALID_TCAM; 3049 p->tcam[i].ptg = map->ptg[i]; 3050 } 3051 3052 LIST_ADD(&p->list, lst); 3053 3054 err_ice_add_prof_to_lst: 3055 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 3056 return status; 3057 } 3058 3059 /** 3060 * ice_move_vsi - move VSI to another VSIG 3061 * @hw: pointer to the HW struct 3062 * @blk: hardware block 3063 * @vsi: the VSI to move 3064 * @vsig: the VSIG to move the VSI to 3065 * @chg: the change list 3066 */ 3067 static enum ice_status 3068 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, 3069 struct LIST_HEAD_TYPE *chg) 3070 { 3071 enum ice_status status; 3072 struct ice_chs_chg *p; 3073 u16 orig_vsig; 3074 3075 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 3076 if (!p) 3077 return ICE_ERR_NO_MEMORY; 3078 3079 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 3080 if (!status) 3081 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 3082 3083 if (status) { 3084 ice_free(hw, p); 3085 return status; 3086 } 3087 3088 p->type = ICE_VSI_MOVE; 3089 p->vsi = vsi; 3090 p->orig_vsig = orig_vsig; 3091 p->vsig = vsig; 3092 3093 LIST_ADD(&p->list_entry, chg); 3094 3095 return ICE_SUCCESS; 3096 } 3097 3098 /** 3099 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list 3100 * @hw: pointer to the HW struct 3101 * @idx: the index of the TCAM entry to remove 3102 * @chg: the list of change structures to search 3103 */ 3104 static void 3105 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg) 3106 { 3107 struct ice_chs_chg *pos, *tmp; 3108 3109 LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) 3110 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { 3111 LIST_DEL(&tmp->list_entry); 3112 ice_free(hw, tmp); 3113 } 3114 } 3115 3116 /** 3117 * ice_prof_tcam_ena_dis - add enable or disable TCAM change 3118 * @hw: pointer to the HW struct 3119 * @blk: hardware block 3120 * @enable: true to enable, false to disable 3121 * @vsig: the VSIG of the TCAM entry 3122 * @tcam: pointer the TCAM info structure of the TCAM to disable 3123 * @chg: the change list 3124 * 3125 * This function appends an enable or disable TCAM entry in the change log 3126 */ 3127 static enum ice_status 3128 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, 3129 u16 vsig, struct ice_tcam_inf *tcam, 3130 struct LIST_HEAD_TYPE *chg) 3131 { 3132 enum ice_status status; 3133 struct ice_chs_chg *p; 3134 3135 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3136 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 3137 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 3138 3139 /* if disabling, free the TCAM */ 3140 if (!enable) { 3141 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); 3142 3143 /* if we have already created a change for this TCAM entry, then 3144 * we need to remove that entry, in order to prevent writing to 3145 * a TCAM entry we no longer will have ownership of. 3146 */ 3147 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); 3148 tcam->tcam_idx = 0; 3149 tcam->in_use = 0; 3150 return status; 3151 } 3152 3153 /* for re-enabling, reallocate a TCAM */ 3154 status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx); 3155 if (status) 3156 return status; 3157 3158 /* add TCAM to change list */ 3159 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 3160 if (!p) 3161 return ICE_ERR_NO_MEMORY; 3162 3163 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, 3164 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, 3165 nm_msk); 3166 if (status) 3167 goto err_ice_prof_tcam_ena_dis; 3168 3169 tcam->in_use = 1; 3170 3171 p->type = ICE_TCAM_ADD; 3172 p->add_tcam_idx = true; 3173 p->prof_id = tcam->prof_id; 3174 p->ptg = tcam->ptg; 3175 p->vsig = 0; 3176 p->tcam_idx = tcam->tcam_idx; 3177 3178 /* log change */ 3179 LIST_ADD(&p->list_entry, chg); 3180 3181 return ICE_SUCCESS; 3182 3183 err_ice_prof_tcam_ena_dis: 3184 ice_free(hw, p); 3185 return status; 3186 } 3187 3188 /** 3189 * ice_adj_prof_priorities - adjust profile based on priorities 3190 * @hw: pointer to the HW struct 3191 * @blk: hardware block 3192 * @vsig: the VSIG for which to adjust profile priorities 3193 * @chg: the change list 3194 */ 3195 static enum ice_status 3196 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, 3197 struct LIST_HEAD_TYPE *chg) 3198 { 3199 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); 3200 enum ice_status status = ICE_SUCCESS; 3201 struct ice_vsig_prof *t; 3202 u16 idx; 3203 3204 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); 3205 idx = vsig & ICE_VSIG_IDX_M; 3206 3207 /* Priority is based on the order in which the profiles are added. The 3208 * newest added profile has highest priority and the oldest added 3209 * profile has the lowest priority. Since the profile property list for 3210 * a VSIG is sorted from newest to oldest, this code traverses the list 3211 * in order and enables the first of each PTG that it finds (that is not 3212 * already enabled); it also disables any duplicate PTGs that it finds 3213 * in the older profiles (that are currently enabled). 3214 */ 3215 3216 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3217 ice_vsig_prof, list) { 3218 u16 i; 3219 3220 for (i = 0; i < t->tcam_count; i++) { 3221 bool used; 3222 3223 /* Scan the priorities from newest to oldest. 3224 * Make sure that the newest profiles take priority. 3225 */ 3226 used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg); 3227 3228 if (used && t->tcam[i].in_use) { 3229 /* need to mark this PTG as never match, as it 3230 * was already in use and therefore duplicate 3231 * (and lower priority) 3232 */ 3233 status = ice_prof_tcam_ena_dis(hw, blk, false, 3234 vsig, 3235 &t->tcam[i], 3236 chg); 3237 if (status) 3238 return status; 3239 } else if (!used && !t->tcam[i].in_use) { 3240 /* need to enable this PTG, as it in not in use 3241 * and not enabled (highest priority) 3242 */ 3243 status = ice_prof_tcam_ena_dis(hw, blk, true, 3244 vsig, 3245 &t->tcam[i], 3246 chg); 3247 if (status) 3248 return status; 3249 } 3250 3251 /* keep track of used ptgs */ 3252 ice_set_bit(t->tcam[i].ptg, ptgs_used); 3253 } 3254 } 3255 3256 return status; 3257 } 3258 3259 /** 3260 * ice_add_prof_id_vsig - add profile to VSIG 3261 * @hw: pointer to the HW struct 3262 * @blk: hardware block 3263 * @vsig: the VSIG to which this profile is to be added 3264 * @hdl: the profile handle indicating the profile to add 3265 * @rev: true to add entries to the end of the list 3266 * @chg: the change list 3267 */ 3268 static enum ice_status 3269 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 3270 bool rev, struct LIST_HEAD_TYPE *chg) 3271 { 3272 /* Masks that ignore flags */ 3273 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3274 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 3275 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 3276 enum ice_status status = ICE_SUCCESS; 3277 struct ice_prof_map *map; 3278 struct ice_vsig_prof *t; 3279 struct ice_chs_chg *p; 3280 u16 vsig_idx, i; 3281 3282 /* Error, if this VSIG already has this profile */ 3283 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) 3284 return ICE_ERR_ALREADY_EXISTS; 3285 3286 /* new VSIG profile structure */ 3287 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); 3288 if (!t) 3289 return ICE_ERR_NO_MEMORY; 3290 3291 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 3292 /* Get the details on the profile specified by the handle ID */ 3293 map = ice_search_prof_id(hw, blk, hdl); 3294 if (!map) { 3295 status = ICE_ERR_DOES_NOT_EXIST; 3296 goto err_ice_add_prof_id_vsig; 3297 } 3298 3299 t->profile_cookie = map->profile_cookie; 3300 t->prof_id = map->prof_id; 3301 t->tcam_count = map->ptg_cnt; 3302 3303 /* create TCAM entries */ 3304 for (i = 0; i < map->ptg_cnt; i++) { 3305 u16 tcam_idx; 3306 3307 /* add TCAM to change list */ 3308 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 3309 if (!p) { 3310 status = ICE_ERR_NO_MEMORY; 3311 goto err_ice_add_prof_id_vsig; 3312 } 3313 3314 /* allocate the TCAM entry index */ 3315 status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx); 3316 if (status) { 3317 ice_free(hw, p); 3318 goto err_ice_add_prof_id_vsig; 3319 } 3320 3321 t->tcam[i].ptg = map->ptg[i]; 3322 t->tcam[i].prof_id = map->prof_id; 3323 t->tcam[i].tcam_idx = tcam_idx; 3324 t->tcam[i].in_use = true; 3325 3326 p->type = ICE_TCAM_ADD; 3327 p->add_tcam_idx = true; 3328 p->prof_id = t->tcam[i].prof_id; 3329 p->ptg = t->tcam[i].ptg; 3330 p->vsig = vsig; 3331 p->tcam_idx = t->tcam[i].tcam_idx; 3332 3333 /* write the TCAM entry */ 3334 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, 3335 t->tcam[i].prof_id, 3336 t->tcam[i].ptg, vsig, 0, 0, 3337 vl_msk, dc_msk, nm_msk); 3338 if (status) { 3339 ice_free(hw, p); 3340 goto err_ice_add_prof_id_vsig; 3341 } 3342 3343 /* log change */ 3344 LIST_ADD(&p->list_entry, chg); 3345 } 3346 3347 /* add profile to VSIG */ 3348 vsig_idx = vsig & ICE_VSIG_IDX_M; 3349 if (rev) 3350 LIST_ADD_TAIL(&t->list, 3351 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 3352 else 3353 LIST_ADD(&t->list, 3354 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 3355 3356 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 3357 return status; 3358 3359 err_ice_add_prof_id_vsig: 3360 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 3361 /* let caller clean up the change list */ 3362 ice_free(hw, t); 3363 return status; 3364 } 3365 3366 /** 3367 * ice_create_prof_id_vsig - add a new VSIG with a single profile 3368 * @hw: pointer to the HW struct 3369 * @blk: hardware block 3370 * @vsi: the initial VSI that will be in VSIG 3371 * @hdl: the profile handle of the profile that will be added to the VSIG 3372 * @chg: the change list 3373 */ 3374 static enum ice_status 3375 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, 3376 struct LIST_HEAD_TYPE *chg) 3377 { 3378 enum ice_status status; 3379 struct ice_chs_chg *p; 3380 u16 new_vsig; 3381 3382 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 3383 if (!p) 3384 return ICE_ERR_NO_MEMORY; 3385 3386 new_vsig = ice_vsig_alloc(hw, blk); 3387 if (!new_vsig) { 3388 status = ICE_ERR_HW_TABLE; 3389 goto err_ice_create_prof_id_vsig; 3390 } 3391 3392 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); 3393 if (status) 3394 goto err_ice_create_prof_id_vsig; 3395 3396 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); 3397 if (status) 3398 goto err_ice_create_prof_id_vsig; 3399 3400 p->type = ICE_VSIG_ADD; 3401 p->vsi = vsi; 3402 p->orig_vsig = ICE_DEFAULT_VSIG; 3403 p->vsig = new_vsig; 3404 3405 LIST_ADD(&p->list_entry, chg); 3406 3407 return ICE_SUCCESS; 3408 3409 err_ice_create_prof_id_vsig: 3410 /* let caller clean up the change list */ 3411 ice_free(hw, p); 3412 return status; 3413 } 3414 3415 /** 3416 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles 3417 * @hw: pointer to the HW struct 3418 * @blk: hardware block 3419 * @vsi: the initial VSI that will be in VSIG 3420 * @lst: the list of profile that will be added to the VSIG 3421 * @new_vsig: return of new VSIG 3422 * @chg: the change list 3423 */ 3424 static enum ice_status 3425 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, 3426 struct LIST_HEAD_TYPE *lst, u16 *new_vsig, 3427 struct LIST_HEAD_TYPE *chg) 3428 { 3429 struct ice_vsig_prof *t; 3430 enum ice_status status; 3431 u16 vsig; 3432 3433 vsig = ice_vsig_alloc(hw, blk); 3434 if (!vsig) 3435 return ICE_ERR_HW_TABLE; 3436 3437 status = ice_move_vsi(hw, blk, vsi, vsig, chg); 3438 if (status) 3439 return status; 3440 3441 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) { 3442 /* Reverse the order here since we are copying the list */ 3443 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, 3444 true, chg); 3445 if (status) 3446 return status; 3447 } 3448 3449 *new_vsig = vsig; 3450 3451 return ICE_SUCCESS; 3452 } 3453 3454 /** 3455 * ice_find_prof_vsig - find a VSIG with a specific profile handle 3456 * @hw: pointer to the HW struct 3457 * @blk: hardware block 3458 * @hdl: the profile handle of the profile to search for 3459 * @vsig: returns the VSIG with the matching profile 3460 */ 3461 static bool 3462 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) 3463 { 3464 struct ice_vsig_prof *t; 3465 enum ice_status status; 3466 struct LIST_HEAD_TYPE lst; 3467 3468 INIT_LIST_HEAD(&lst); 3469 3470 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); 3471 if (!t) 3472 return false; 3473 3474 t->profile_cookie = hdl; 3475 LIST_ADD(&t->list, &lst); 3476 3477 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); 3478 3479 LIST_DEL(&t->list); 3480 ice_free(hw, t); 3481 3482 return status == ICE_SUCCESS; 3483 } 3484 3485 /** 3486 * ice_add_vsi_flow - add VSI flow 3487 * @hw: pointer to the HW struct 3488 * @blk: hardware block 3489 * @vsi: input VSI 3490 * @vsig: target VSIG to include the input VSI 3491 * 3492 * Calling this function will add the VSI to a given VSIG and 3493 * update the HW tables accordingly. This call can be used to 3494 * add multiple VSIs to a VSIG if we know beforehand that those 3495 * VSIs have the same characteristics of the VSIG. This will 3496 * save time in generating a new VSIG and TCAMs till a match is 3497 * found and subsequent rollback when a matching VSIG is found. 3498 */ 3499 enum ice_status 3500 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 3501 { 3502 struct ice_chs_chg *tmp, *del; 3503 struct LIST_HEAD_TYPE chg; 3504 enum ice_status status; 3505 3506 /* if target VSIG is default the move is invalid */ 3507 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG) 3508 return ICE_ERR_PARAM; 3509 3510 INIT_LIST_HEAD(&chg); 3511 3512 /* move VSI to the VSIG that matches */ 3513 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 3514 /* update hardware if success */ 3515 if (!status) 3516 status = ice_upd_prof_hw(hw, blk, &chg); 3517 3518 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 3519 LIST_DEL(&del->list_entry); 3520 ice_free(hw, del); 3521 } 3522 3523 return status; 3524 } 3525 3526 /** 3527 * ice_add_prof_id_flow - add profile flow 3528 * @hw: pointer to the HW struct 3529 * @blk: hardware block 3530 * @vsi: the VSI to enable with the profile specified by ID 3531 * @hdl: profile handle 3532 * 3533 * Calling this function will update the hardware tables to enable the 3534 * profile indicated by the ID parameter for the VSIs specified in the VSI 3535 * array. Once successfully called, the flow will be enabled. 3536 */ 3537 enum ice_status 3538 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 3539 { 3540 struct ice_vsig_prof *tmp1, *del1; 3541 struct ice_chs_chg *tmp, *del; 3542 struct LIST_HEAD_TYPE union_lst; 3543 enum ice_status status; 3544 struct LIST_HEAD_TYPE chg; 3545 u16 vsig; 3546 3547 INIT_LIST_HEAD(&union_lst); 3548 INIT_LIST_HEAD(&chg); 3549 3550 /* Get profile */ 3551 status = ice_get_prof(hw, blk, hdl, &chg); 3552 if (status) 3553 return status; 3554 3555 /* determine if VSI is already part of a VSIG */ 3556 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 3557 if (!status && vsig) { 3558 bool only_vsi; 3559 u16 or_vsig; 3560 u16 ref; 3561 3562 /* found in VSIG */ 3563 or_vsig = vsig; 3564 3565 /* make sure that there is no overlap/conflict between the new 3566 * characteristics and the existing ones; we don't support that 3567 * scenario 3568 */ 3569 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { 3570 status = ICE_ERR_ALREADY_EXISTS; 3571 goto err_ice_add_prof_id_flow; 3572 } 3573 3574 /* last VSI in the VSIG? */ 3575 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 3576 if (status) 3577 goto err_ice_add_prof_id_flow; 3578 only_vsi = (ref == 1); 3579 3580 /* create a union of the current profiles and the one being 3581 * added 3582 */ 3583 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); 3584 if (status) 3585 goto err_ice_add_prof_id_flow; 3586 3587 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); 3588 if (status) 3589 goto err_ice_add_prof_id_flow; 3590 3591 /* search for an existing VSIG with an exact charc match */ 3592 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); 3593 if (!status) { 3594 /* move VSI to the VSIG that matches */ 3595 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 3596 if (status) 3597 goto err_ice_add_prof_id_flow; 3598 3599 /* VSI has been moved out of or_vsig. If the or_vsig had 3600 * only that VSI it is now empty and can be removed. 3601 */ 3602 if (only_vsi) { 3603 status = ice_rem_vsig(hw, blk, or_vsig, &chg); 3604 if (status) 3605 goto err_ice_add_prof_id_flow; 3606 } 3607 } else if (only_vsi) { 3608 /* If the original VSIG only contains one VSI, then it 3609 * will be the requesting VSI. In this case the VSI is 3610 * not sharing entries and we can simply add the new 3611 * profile to the VSIG. 3612 */ 3613 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, 3614 &chg); 3615 if (status) 3616 goto err_ice_add_prof_id_flow; 3617 3618 /* Adjust priorities */ 3619 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 3620 if (status) 3621 goto err_ice_add_prof_id_flow; 3622 } else { 3623 /* No match, so we need a new VSIG */ 3624 status = ice_create_vsig_from_lst(hw, blk, vsi, 3625 &union_lst, &vsig, 3626 &chg); 3627 if (status) 3628 goto err_ice_add_prof_id_flow; 3629 3630 /* Adjust priorities */ 3631 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 3632 if (status) 3633 goto err_ice_add_prof_id_flow; 3634 } 3635 } else { 3636 /* need to find or add a VSIG */ 3637 /* search for an existing VSIG with an exact charc match */ 3638 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { 3639 /* found an exact match */ 3640 /* add or move VSI to the VSIG that matches */ 3641 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 3642 if (status) 3643 goto err_ice_add_prof_id_flow; 3644 } else { 3645 /* we did not find an exact match */ 3646 /* we need to add a VSIG */ 3647 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, 3648 &chg); 3649 if (status) 3650 goto err_ice_add_prof_id_flow; 3651 } 3652 } 3653 3654 /* update hardware */ 3655 if (!status) 3656 status = ice_upd_prof_hw(hw, blk, &chg); 3657 3658 err_ice_add_prof_id_flow: 3659 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 3660 LIST_DEL(&del->list_entry); 3661 ice_free(hw, del); 3662 } 3663 3664 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) { 3665 LIST_DEL(&del1->list); 3666 ice_free(hw, del1); 3667 } 3668 3669 return status; 3670 } 3671 3672 /** 3673 * ice_add_flow - add flow 3674 * @hw: pointer to the HW struct 3675 * @blk: hardware block 3676 * @vsi: array of VSIs to enable with the profile specified by ID 3677 * @count: number of elements in the VSI array 3678 * @id: profile tracking ID 3679 * 3680 * Calling this function will update the hardware tables to enable the 3681 * profile indicated by the ID parameter for the VSIs specified in the VSI 3682 * array. Once successfully called, the flow will be enabled. 3683 */ 3684 enum ice_status 3685 ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 3686 u64 id) 3687 { 3688 u16 i; 3689 3690 for (i = 0; i < count; i++) { 3691 enum ice_status status; 3692 3693 status = ice_add_prof_id_flow(hw, blk, vsi[i], id); 3694 if (status) 3695 return status; 3696 } 3697 3698 return ICE_SUCCESS; 3699 } 3700 3701 /** 3702 * ice_rem_prof_from_list - remove a profile from list 3703 * @hw: pointer to the HW struct 3704 * @lst: list to remove the profile from 3705 * @hdl: the profile handle indicating the profile to remove 3706 */ 3707 static enum ice_status 3708 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl) 3709 { 3710 struct ice_vsig_prof *ent, *tmp; 3711 3712 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) 3713 if (ent->profile_cookie == hdl) { 3714 LIST_DEL(&ent->list); 3715 ice_free(hw, ent); 3716 return ICE_SUCCESS; 3717 } 3718 3719 return ICE_ERR_DOES_NOT_EXIST; 3720 } 3721 3722 /** 3723 * ice_rem_prof_id_flow - remove flow 3724 * @hw: pointer to the HW struct 3725 * @blk: hardware block 3726 * @vsi: the VSI from which to remove the profile specified by ID 3727 * @hdl: profile tracking handle 3728 * 3729 * Calling this function will update the hardware tables to remove the 3730 * profile indicated by the ID parameter for the VSIs specified in the VSI 3731 * array. Once successfully called, the flow will be disabled. 3732 */ 3733 enum ice_status 3734 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 3735 { 3736 struct ice_vsig_prof *tmp1, *del1; 3737 struct ice_chs_chg *tmp, *del; 3738 struct LIST_HEAD_TYPE chg, copy; 3739 enum ice_status status; 3740 u16 vsig; 3741 3742 INIT_LIST_HEAD(©); 3743 INIT_LIST_HEAD(&chg); 3744 3745 /* determine if VSI is already part of a VSIG */ 3746 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 3747 if (!status && vsig) { 3748 bool last_profile; 3749 bool only_vsi; 3750 u16 ref; 3751 3752 /* found in VSIG */ 3753 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; 3754 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 3755 if (status) 3756 goto err_ice_rem_prof_id_flow; 3757 only_vsi = (ref == 1); 3758 3759 if (only_vsi) { 3760 /* If the original VSIG only contains one reference, 3761 * which will be the requesting VSI, then the VSI is not 3762 * sharing entries and we can simply remove the specific 3763 * characteristics from the VSIG. 3764 */ 3765 3766 if (last_profile) { 3767 /* If there are no profiles left for this VSIG, 3768 * then simply remove the VSIG. 3769 */ 3770 status = ice_rem_vsig(hw, blk, vsig, &chg); 3771 if (status) 3772 goto err_ice_rem_prof_id_flow; 3773 } else { 3774 status = ice_rem_prof_id_vsig(hw, blk, vsig, 3775 hdl, &chg); 3776 if (status) 3777 goto err_ice_rem_prof_id_flow; 3778 3779 /* Adjust priorities */ 3780 status = ice_adj_prof_priorities(hw, blk, vsig, 3781 &chg); 3782 if (status) 3783 goto err_ice_rem_prof_id_flow; 3784 } 3785 3786 } else { 3787 /* Make a copy of the VSIG's list of Profiles */ 3788 status = ice_get_profs_vsig(hw, blk, vsig, ©); 3789 if (status) 3790 goto err_ice_rem_prof_id_flow; 3791 3792 /* Remove specified profile entry from the list */ 3793 status = ice_rem_prof_from_list(hw, ©, hdl); 3794 if (status) 3795 goto err_ice_rem_prof_id_flow; 3796 3797 if (LIST_EMPTY(©)) { 3798 status = ice_move_vsi(hw, blk, vsi, 3799 ICE_DEFAULT_VSIG, &chg); 3800 if (status) 3801 goto err_ice_rem_prof_id_flow; 3802 3803 } else if (!ice_find_dup_props_vsig(hw, blk, ©, 3804 &vsig)) { 3805 /* found an exact match */ 3806 /* add or move VSI to the VSIG that matches */ 3807 /* Search for a VSIG with a matching profile 3808 * list 3809 */ 3810 3811 /* Found match, move VSI to the matching VSIG */ 3812 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 3813 if (status) 3814 goto err_ice_rem_prof_id_flow; 3815 } else { 3816 /* since no existing VSIG supports this 3817 * characteristic pattern, we need to create a 3818 * new VSIG and TCAM entries 3819 */ 3820 status = ice_create_vsig_from_lst(hw, blk, vsi, 3821 ©, &vsig, 3822 &chg); 3823 if (status) 3824 goto err_ice_rem_prof_id_flow; 3825 3826 /* Adjust priorities */ 3827 status = ice_adj_prof_priorities(hw, blk, vsig, 3828 &chg); 3829 if (status) 3830 goto err_ice_rem_prof_id_flow; 3831 } 3832 } 3833 } else { 3834 status = ICE_ERR_DOES_NOT_EXIST; 3835 } 3836 3837 /* update hardware tables */ 3838 if (!status) 3839 status = ice_upd_prof_hw(hw, blk, &chg); 3840 3841 err_ice_rem_prof_id_flow: 3842 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 3843 LIST_DEL(&del->list_entry); 3844 ice_free(hw, del); 3845 } 3846 3847 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) { 3848 LIST_DEL(&del1->list); 3849 ice_free(hw, del1); 3850 } 3851 3852 return status; 3853 } 3854 3855 /** 3856 * ice_rem_flow - remove flow 3857 * @hw: pointer to the HW struct 3858 * @blk: hardware block 3859 * @vsi: array of VSIs from which to remove the profile specified by ID 3860 * @count: number of elements in the VSI array 3861 * @id: profile tracking ID 3862 * 3863 * The function will remove flows from the specified VSIs that were enabled 3864 * using ice_add_flow. The ID value will indicated which profile will be 3865 * removed. Once successfully called, the flow will be disabled. 3866 */ 3867 enum ice_status 3868 ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 3869 u64 id) 3870 { 3871 u16 i; 3872 3873 for (i = 0; i < count; i++) { 3874 enum ice_status status; 3875 3876 status = ice_rem_prof_id_flow(hw, blk, vsi[i], id); 3877 if (status) 3878 return status; 3879 } 3880 3881 return ICE_SUCCESS; 3882 } 3883