1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2020, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_common.h" 34 #include "ice_flex_pipe.h" 35 #include "ice_protocol_type.h" 36 #include "ice_flow.h" 37 38 /* To support tunneling entries by PF, the package will append the PF number to 39 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. 40 */ 41 static const struct ice_tunnel_type_scan tnls[] = { 42 { TNL_VXLAN, "TNL_VXLAN_PF" }, 43 { TNL_GENEVE, "TNL_GENEVE_PF" }, 44 { TNL_LAST, "" } 45 }; 46 47 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { 48 /* SWITCH */ 49 { 50 ICE_SID_XLT0_SW, 51 ICE_SID_XLT_KEY_BUILDER_SW, 52 ICE_SID_XLT1_SW, 53 ICE_SID_XLT2_SW, 54 ICE_SID_PROFID_TCAM_SW, 55 ICE_SID_PROFID_REDIR_SW, 56 ICE_SID_FLD_VEC_SW, 57 ICE_SID_CDID_KEY_BUILDER_SW, 58 ICE_SID_CDID_REDIR_SW 59 }, 60 61 /* ACL */ 62 { 63 ICE_SID_XLT0_ACL, 64 ICE_SID_XLT_KEY_BUILDER_ACL, 65 ICE_SID_XLT1_ACL, 66 ICE_SID_XLT2_ACL, 67 ICE_SID_PROFID_TCAM_ACL, 68 ICE_SID_PROFID_REDIR_ACL, 69 ICE_SID_FLD_VEC_ACL, 70 ICE_SID_CDID_KEY_BUILDER_ACL, 71 ICE_SID_CDID_REDIR_ACL 72 }, 73 74 /* FD */ 75 { 76 ICE_SID_XLT0_FD, 77 ICE_SID_XLT_KEY_BUILDER_FD, 78 ICE_SID_XLT1_FD, 79 ICE_SID_XLT2_FD, 80 ICE_SID_PROFID_TCAM_FD, 81 ICE_SID_PROFID_REDIR_FD, 82 ICE_SID_FLD_VEC_FD, 83 ICE_SID_CDID_KEY_BUILDER_FD, 84 ICE_SID_CDID_REDIR_FD 85 }, 86 87 /* RSS */ 88 { 89 ICE_SID_XLT0_RSS, 90 ICE_SID_XLT_KEY_BUILDER_RSS, 91 ICE_SID_XLT1_RSS, 92 ICE_SID_XLT2_RSS, 93 ICE_SID_PROFID_TCAM_RSS, 94 ICE_SID_PROFID_REDIR_RSS, 95 ICE_SID_FLD_VEC_RSS, 96 ICE_SID_CDID_KEY_BUILDER_RSS, 97 ICE_SID_CDID_REDIR_RSS 98 }, 99 100 /* PE */ 101 { 102 ICE_SID_XLT0_PE, 103 ICE_SID_XLT_KEY_BUILDER_PE, 104 ICE_SID_XLT1_PE, 105 ICE_SID_XLT2_PE, 106 ICE_SID_PROFID_TCAM_PE, 107 ICE_SID_PROFID_REDIR_PE, 108 ICE_SID_FLD_VEC_PE, 109 ICE_SID_CDID_KEY_BUILDER_PE, 110 ICE_SID_CDID_REDIR_PE 111 } 112 }; 113 114 /** 115 * ice_sect_id - returns section ID 116 * @blk: block type 117 * @sect: section type 118 * 119 * This helper function returns the proper section ID given a block type and a 120 * section type. 121 */ 122 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) 123 { 124 return ice_sect_lkup[blk][sect]; 125 } 126 127 /** 128 * ice_pkg_val_buf 129 * @buf: pointer to the ice buffer 130 * 131 * This helper function validates a buffer's header. 132 */ 133 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 134 { 135 struct ice_buf_hdr *hdr; 136 u16 section_count; 137 u16 data_end; 138 139 hdr = (struct ice_buf_hdr *)buf->buf; 140 /* verify data */ 141 section_count = LE16_TO_CPU(hdr->section_count); 142 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) 143 return NULL; 144 145 data_end = LE16_TO_CPU(hdr->data_end); 146 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) 147 return NULL; 148 149 return hdr; 150 } 151 152 /** 153 * ice_find_buf_table 154 * @ice_seg: pointer to the ice segment 155 * 156 * Returns the address of the buffer table within the ice segment. 157 */ 158 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) 159 { 160 struct ice_nvm_table *nvms; 161 162 nvms = (struct ice_nvm_table *) 163 (ice_seg->device_table + 164 LE32_TO_CPU(ice_seg->device_table_count)); 165 166 return (_FORCE_ struct ice_buf_table *) 167 (nvms->vers + LE32_TO_CPU(nvms->table_count)); 168 } 169 170 /** 171 * ice_pkg_enum_buf 172 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 173 * @state: pointer to the enum state 174 * 175 * This function will enumerate all the buffers in the ice segment. The first 176 * call is made with the ice_seg parameter non-NULL; on subsequent calls, 177 * ice_seg is set to NULL which continues the enumeration. When the function 178 * returns a NULL pointer, then the end of the buffers has been reached, or an 179 * unexpected value has been detected (for example an invalid section count or 180 * an invalid buffer end value). 181 */ 182 static struct ice_buf_hdr * 183 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 184 { 185 if (ice_seg) { 186 state->buf_table = ice_find_buf_table(ice_seg); 187 if (!state->buf_table) 188 return NULL; 189 190 state->buf_idx = 0; 191 return ice_pkg_val_buf(state->buf_table->buf_array); 192 } 193 194 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) 195 return ice_pkg_val_buf(state->buf_table->buf_array + 196 state->buf_idx); 197 else 198 return NULL; 199 } 200 201 /** 202 * ice_pkg_advance_sect 203 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 204 * @state: pointer to the enum state 205 * 206 * This helper function will advance the section within the ice segment, 207 * also advancing the buffer if needed. 208 */ 209 static bool 210 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 211 { 212 if (!ice_seg && !state->buf) 213 return false; 214 215 if (!ice_seg && state->buf) 216 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) 217 return true; 218 219 state->buf = ice_pkg_enum_buf(ice_seg, state); 220 if (!state->buf) 221 return false; 222 223 /* start of new buffer, reset section index */ 224 state->sect_idx = 0; 225 return true; 226 } 227 228 /** 229 * ice_pkg_enum_section 230 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 231 * @state: pointer to the enum state 232 * @sect_type: section type to enumerate 233 * 234 * This function will enumerate all the sections of a particular type in the 235 * ice segment. The first call is made with the ice_seg parameter non-NULL; 236 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 237 * When the function returns a NULL pointer, then the end of the matching 238 * sections has been reached. 239 */ 240 static void * 241 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 242 u32 sect_type) 243 { 244 u16 offset, size; 245 246 if (ice_seg) 247 state->type = sect_type; 248 249 if (!ice_pkg_advance_sect(ice_seg, state)) 250 return NULL; 251 252 /* scan for next matching section */ 253 while (state->buf->section_entry[state->sect_idx].type != 254 CPU_TO_LE32(state->type)) 255 if (!ice_pkg_advance_sect(NULL, state)) 256 return NULL; 257 258 /* validate section */ 259 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); 260 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) 261 return NULL; 262 263 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); 264 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) 265 return NULL; 266 267 /* make sure the section fits in the buffer */ 268 if (offset + size > ICE_PKG_BUF_SIZE) 269 return NULL; 270 271 state->sect_type = 272 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); 273 274 /* calc pointer to this section */ 275 state->sect = ((u8 *)state->buf) + 276 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); 277 278 return state->sect; 279 } 280 281 /** 282 * ice_pkg_enum_entry 283 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 284 * @state: pointer to the enum state 285 * @sect_type: section type to enumerate 286 * @offset: pointer to variable that receives the offset in the table (optional) 287 * @handler: function that handles access to the entries into the section type 288 * 289 * This function will enumerate all the entries in particular section type in 290 * the ice segment. The first call is made with the ice_seg parameter non-NULL; 291 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 292 * When the function returns a NULL pointer, then the end of the entries has 293 * been reached. 294 * 295 * Since each section may have a different header and entry size, the handler 296 * function is needed to determine the number and location entries in each 297 * section. 298 * 299 * The offset parameter is optional, but should be used for sections that 300 * contain an offset for each section table. For such cases, the section handler 301 * function must return the appropriate offset + index to give the absolution 302 * offset for each entry. For example, if the base for a section's header 303 * indicates a base offset of 10, and the index for the entry is 2, then 304 * section handler function should set the offset to 10 + 2 = 12. 305 */ 306 static void * 307 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 308 u32 sect_type, u32 *offset, 309 void *(*handler)(u32 sect_type, void *section, 310 u32 index, u32 *offset)) 311 { 312 void *entry; 313 314 if (ice_seg) { 315 if (!handler) 316 return NULL; 317 318 if (!ice_pkg_enum_section(ice_seg, state, sect_type)) 319 return NULL; 320 321 state->entry_idx = 0; 322 state->handler = handler; 323 } else { 324 state->entry_idx++; 325 } 326 327 if (!state->handler) 328 return NULL; 329 330 /* get entry */ 331 entry = state->handler(state->sect_type, state->sect, state->entry_idx, 332 offset); 333 if (!entry) { 334 /* end of a section, look for another section of this type */ 335 if (!ice_pkg_enum_section(NULL, state, 0)) 336 return NULL; 337 338 state->entry_idx = 0; 339 entry = state->handler(state->sect_type, state->sect, 340 state->entry_idx, offset); 341 } 342 343 return entry; 344 } 345 346 /** 347 * ice_boost_tcam_handler 348 * @sect_type: section type 349 * @section: pointer to section 350 * @index: index of the boost TCAM entry to be returned 351 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections 352 * 353 * This is a callback function that can be passed to ice_pkg_enum_entry. 354 * Handles enumeration of individual boost TCAM entries. 355 */ 356 static void * 357 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) 358 { 359 struct ice_boost_tcam_section *boost; 360 361 if (!section) 362 return NULL; 363 364 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) 365 return NULL; 366 367 if (index > ICE_MAX_BST_TCAMS_IN_BUF) 368 return NULL; 369 370 if (offset) 371 *offset = 0; 372 373 boost = (struct ice_boost_tcam_section *)section; 374 if (index >= LE16_TO_CPU(boost->count)) 375 return NULL; 376 377 return boost->tcam + index; 378 } 379 380 /** 381 * ice_find_boost_entry 382 * @ice_seg: pointer to the ice segment (non-NULL) 383 * @addr: Boost TCAM address of entry to search for 384 * @entry: returns pointer to the entry 385 * 386 * Finds a particular Boost TCAM entry and returns a pointer to that entry 387 * if it is found. The ice_seg parameter must not be NULL since the first call 388 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. 389 */ 390 static enum ice_status 391 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, 392 struct ice_boost_tcam_entry **entry) 393 { 394 struct ice_boost_tcam_entry *tcam; 395 struct ice_pkg_enum state; 396 397 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 398 399 if (!ice_seg) 400 return ICE_ERR_PARAM; 401 402 do { 403 tcam = (struct ice_boost_tcam_entry *) 404 ice_pkg_enum_entry(ice_seg, &state, 405 ICE_SID_RXPARSER_BOOST_TCAM, NULL, 406 ice_boost_tcam_handler); 407 if (tcam && LE16_TO_CPU(tcam->addr) == addr) { 408 *entry = tcam; 409 return ICE_SUCCESS; 410 } 411 412 ice_seg = NULL; 413 } while (tcam); 414 415 *entry = NULL; 416 return ICE_ERR_CFG; 417 } 418 419 /** 420 * ice_label_enum_handler 421 * @sect_type: section type 422 * @section: pointer to section 423 * @index: index of the label entry to be returned 424 * @offset: pointer to receive absolute offset, always zero for label sections 425 * 426 * This is a callback function that can be passed to ice_pkg_enum_entry. 427 * Handles enumeration of individual label entries. 428 */ 429 static void * 430 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, 431 u32 *offset) 432 { 433 struct ice_label_section *labels; 434 435 if (!section) 436 return NULL; 437 438 if (index > ICE_MAX_LABELS_IN_BUF) 439 return NULL; 440 441 if (offset) 442 *offset = 0; 443 444 labels = (struct ice_label_section *)section; 445 if (index >= LE16_TO_CPU(labels->count)) 446 return NULL; 447 448 return labels->label + index; 449 } 450 451 /** 452 * ice_enum_labels 453 * @ice_seg: pointer to the ice segment (NULL on subsequent calls) 454 * @type: the section type that will contain the label (0 on subsequent calls) 455 * @state: ice_pkg_enum structure that will hold the state of the enumeration 456 * @value: pointer to a value that will return the label's value if found 457 * 458 * Enumerates a list of labels in the package. The caller will call 459 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call 460 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL 461 * the end of the list has been reached. 462 */ 463 static char * 464 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, 465 u16 *value) 466 { 467 struct ice_label *label; 468 469 /* Check for valid label section on first call */ 470 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) 471 return NULL; 472 473 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, 474 NULL, 475 ice_label_enum_handler); 476 if (!label) 477 return NULL; 478 479 *value = LE16_TO_CPU(label->value); 480 return label->name; 481 } 482 483 /** 484 * ice_init_pkg_hints 485 * @hw: pointer to the HW structure 486 * @ice_seg: pointer to the segment of the package scan (non-NULL) 487 * 488 * This function will scan the package and save off relevant information 489 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL 490 * since the first call to ice_enum_labels requires a pointer to an actual 491 * ice_seg structure. 492 */ 493 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) 494 { 495 struct ice_pkg_enum state; 496 char *label_name; 497 u16 val; 498 int i; 499 500 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); 501 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 502 503 if (!ice_seg) 504 return; 505 506 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, 507 &val); 508 509 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { 510 for (i = 0; tnls[i].type != TNL_LAST; i++) { 511 size_t len = strlen(tnls[i].label_prefix); 512 513 /* Look for matching label start, before continuing */ 514 if (strncmp(label_name, tnls[i].label_prefix, len)) 515 continue; 516 517 /* Make sure this label matches our PF. Note that the PF 518 * character ('0' - '7') will be located where our 519 * prefix string's null terminator is located. 520 */ 521 if ((label_name[len] - '0') == hw->pf_id) { 522 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; 523 hw->tnl.tbl[hw->tnl.count].valid = false; 524 hw->tnl.tbl[hw->tnl.count].in_use = false; 525 hw->tnl.tbl[hw->tnl.count].marked = false; 526 hw->tnl.tbl[hw->tnl.count].boost_addr = val; 527 hw->tnl.tbl[hw->tnl.count].port = 0; 528 hw->tnl.count++; 529 break; 530 } 531 } 532 533 label_name = ice_enum_labels(NULL, 0, &state, &val); 534 } 535 536 /* Cache the appropriate boost TCAM entry pointers */ 537 for (i = 0; i < hw->tnl.count; i++) { 538 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, 539 &hw->tnl.tbl[i].boost_entry); 540 if (hw->tnl.tbl[i].boost_entry) 541 hw->tnl.tbl[i].valid = true; 542 } 543 } 544 545 /* Key creation */ 546 547 #define ICE_DC_KEY 0x1 /* don't care */ 548 #define ICE_DC_KEYINV 0x1 549 #define ICE_NM_KEY 0x0 /* never match */ 550 #define ICE_NM_KEYINV 0x0 551 #define ICE_0_KEY 0x1 /* match 0 */ 552 #define ICE_0_KEYINV 0x0 553 #define ICE_1_KEY 0x0 /* match 1 */ 554 #define ICE_1_KEYINV 0x1 555 556 /** 557 * ice_gen_key_word - generate 16-bits of a key/mask word 558 * @val: the value 559 * @valid: valid bits mask (change only the valid bits) 560 * @dont_care: don't care mask 561 * @nvr_mtch: never match mask 562 * @key: pointer to an array of where the resulting key portion 563 * @key_inv: pointer to an array of where the resulting key invert portion 564 * 565 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask 566 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits 567 * of key and 8 bits of key invert. 568 * 569 * '0' = b01, always match a 0 bit 570 * '1' = b10, always match a 1 bit 571 * '?' = b11, don't care bit (always matches) 572 * '~' = b00, never match bit 573 * 574 * Input: 575 * val: b0 1 0 1 0 1 576 * dont_care: b0 0 1 1 0 0 577 * never_mtch: b0 0 0 0 1 1 578 * ------------------------------ 579 * Result: key: b01 10 11 11 00 00 580 */ 581 static enum ice_status 582 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, 583 u8 *key_inv) 584 { 585 u8 in_key = *key, in_key_inv = *key_inv; 586 u8 i; 587 588 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ 589 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) 590 return ICE_ERR_CFG; 591 592 *key = 0; 593 *key_inv = 0; 594 595 /* encode the 8 bits into 8-bit key and 8-bit key invert */ 596 for (i = 0; i < 8; i++) { 597 *key >>= 1; 598 *key_inv >>= 1; 599 600 if (!(valid & 0x1)) { /* change only valid bits */ 601 *key |= (in_key & 0x1) << 7; 602 *key_inv |= (in_key_inv & 0x1) << 7; 603 } else if (dont_care & 0x1) { /* don't care bit */ 604 *key |= ICE_DC_KEY << 7; 605 *key_inv |= ICE_DC_KEYINV << 7; 606 } else if (nvr_mtch & 0x1) { /* never match bit */ 607 *key |= ICE_NM_KEY << 7; 608 *key_inv |= ICE_NM_KEYINV << 7; 609 } else if (val & 0x01) { /* exact 1 match */ 610 *key |= ICE_1_KEY << 7; 611 *key_inv |= ICE_1_KEYINV << 7; 612 } else { /* exact 0 match */ 613 *key |= ICE_0_KEY << 7; 614 *key_inv |= ICE_0_KEYINV << 7; 615 } 616 617 dont_care >>= 1; 618 nvr_mtch >>= 1; 619 valid >>= 1; 620 val >>= 1; 621 in_key >>= 1; 622 in_key_inv >>= 1; 623 } 624 625 return ICE_SUCCESS; 626 } 627 628 /** 629 * ice_bits_max_set - determine if the number of bits set is within a maximum 630 * @mask: pointer to the byte array which is the mask 631 * @size: the number of bytes in the mask 632 * @max: the max number of set bits 633 * 634 * This function determines if there are at most 'max' number of bits set in an 635 * array. Returns true if the number for bits set is <= max or will return false 636 * otherwise. 637 */ 638 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) 639 { 640 u16 count = 0; 641 u16 i; 642 643 /* check each byte */ 644 for (i = 0; i < size; i++) { 645 /* if 0, go to next byte */ 646 if (!mask[i]) 647 continue; 648 649 /* We know there is at least one set bit in this byte because of 650 * the above check; if we already have found 'max' number of 651 * bits set, then we can return failure now. 652 */ 653 if (count == max) 654 return false; 655 656 /* count the bits in this byte, checking threshold */ 657 count += ice_hweight8(mask[i]); 658 if (count > max) 659 return false; 660 } 661 662 return true; 663 } 664 665 /** 666 * ice_set_key - generate a variable sized key with multiples of 16-bits 667 * @key: pointer to where the key will be stored 668 * @size: the size of the complete key in bytes (must be even) 669 * @val: array of 8-bit values that makes up the value portion of the key 670 * @upd: array of 8-bit masks that determine what key portion to update 671 * @dc: array of 8-bit masks that make up the don't care mask 672 * @nm: array of 8-bit masks that make up the never match mask 673 * @off: the offset of the first byte in the key to update 674 * @len: the number of bytes in the key update 675 * 676 * This function generates a key from a value, a don't care mask and a never 677 * match mask. 678 * upd, dc, and nm are optional parameters, and can be NULL: 679 * upd == NULL --> upd mask is all 1's (update all bits) 680 * dc == NULL --> dc mask is all 0's (no don't care bits) 681 * nm == NULL --> nm mask is all 0's (no never match bits) 682 */ 683 static enum ice_status 684 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, 685 u16 len) 686 { 687 u16 half_size; 688 u16 i; 689 690 /* size must be a multiple of 2 bytes. */ 691 if (size % 2) 692 return ICE_ERR_CFG; 693 half_size = size / 2; 694 695 if (off + len > half_size) 696 return ICE_ERR_CFG; 697 698 /* Make sure at most one bit is set in the never match mask. Having more 699 * than one never match mask bit set will cause HW to consume excessive 700 * power otherwise; this is a power management efficiency check. 701 */ 702 #define ICE_NVR_MTCH_BITS_MAX 1 703 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) 704 return ICE_ERR_CFG; 705 706 for (i = 0; i < len; i++) 707 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, 708 dc ? dc[i] : 0, nm ? nm[i] : 0, 709 key + off + i, key + half_size + off + i)) 710 return ICE_ERR_CFG; 711 712 return ICE_SUCCESS; 713 } 714 715 /** 716 * ice_acquire_global_cfg_lock 717 * @hw: pointer to the HW structure 718 * @access: access type (read or write) 719 * 720 * This function will request ownership of the global config lock for reading 721 * or writing of the package. When attempting to obtain write access, the 722 * caller must check for the following two return values: 723 * 724 * ICE_SUCCESS - Means the caller has acquired the global config lock 725 * and can perform writing of the package. 726 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the 727 * package or has found that no update was necessary; in 728 * this case, the caller can just skip performing any 729 * update of the package. 730 */ 731 static enum ice_status 732 ice_acquire_global_cfg_lock(struct ice_hw *hw, 733 enum ice_aq_res_access_type access) 734 { 735 enum ice_status status; 736 737 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 738 739 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, 740 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 741 742 if (status == ICE_ERR_AQ_NO_WORK) 743 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); 744 745 return status; 746 } 747 748 /** 749 * ice_release_global_cfg_lock 750 * @hw: pointer to the HW structure 751 * 752 * This function will release the global config lock. 753 */ 754 static void ice_release_global_cfg_lock(struct ice_hw *hw) 755 { 756 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); 757 } 758 759 /** 760 * ice_acquire_change_lock 761 * @hw: pointer to the HW structure 762 * @access: access type (read or write) 763 * 764 * This function will request ownership of the change lock. 765 */ 766 static enum ice_status 767 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) 768 { 769 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 770 771 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, 772 ICE_CHANGE_LOCK_TIMEOUT); 773 } 774 775 /** 776 * ice_release_change_lock 777 * @hw: pointer to the HW structure 778 * 779 * This function will release the change lock using the proper Admin Command. 780 */ 781 static void ice_release_change_lock(struct ice_hw *hw) 782 { 783 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 784 785 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); 786 } 787 788 /** 789 * ice_aq_download_pkg 790 * @hw: pointer to the hardware structure 791 * @pkg_buf: the package buffer to transfer 792 * @buf_size: the size of the package buffer 793 * @last_buf: last buffer indicator 794 * @error_offset: returns error offset 795 * @error_info: returns error information 796 * @cd: pointer to command details structure or NULL 797 * 798 * Download Package (0x0C40) 799 */ 800 static enum ice_status 801 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 802 u16 buf_size, bool last_buf, u32 *error_offset, 803 u32 *error_info, struct ice_sq_cd *cd) 804 { 805 struct ice_aqc_download_pkg *cmd; 806 struct ice_aq_desc desc; 807 enum ice_status status; 808 809 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 810 811 if (error_offset) 812 *error_offset = 0; 813 if (error_info) 814 *error_info = 0; 815 816 cmd = &desc.params.download_pkg; 817 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 818 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 819 820 if (last_buf) 821 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 822 823 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 824 if (status == ICE_ERR_AQ_ERROR) { 825 /* Read error from buffer only when the FW returned an error */ 826 struct ice_aqc_download_pkg_resp *resp; 827 828 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 829 if (error_offset) 830 *error_offset = LE32_TO_CPU(resp->error_offset); 831 if (error_info) 832 *error_info = LE32_TO_CPU(resp->error_info); 833 } 834 835 return status; 836 } 837 838 /** 839 * ice_aq_upload_section 840 * @hw: pointer to the hardware structure 841 * @pkg_buf: the package buffer which will receive the section 842 * @buf_size: the size of the package buffer 843 * @cd: pointer to command details structure or NULL 844 * 845 * Upload Section (0x0C41) 846 */ 847 enum ice_status 848 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 849 u16 buf_size, struct ice_sq_cd *cd) 850 { 851 struct ice_aq_desc desc; 852 853 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 854 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); 855 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 856 857 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 858 } 859 860 /** 861 * ice_aq_update_pkg 862 * @hw: pointer to the hardware structure 863 * @pkg_buf: the package cmd buffer 864 * @buf_size: the size of the package cmd buffer 865 * @last_buf: last buffer indicator 866 * @error_offset: returns error offset 867 * @error_info: returns error information 868 * @cd: pointer to command details structure or NULL 869 * 870 * Update Package (0x0C42) 871 */ 872 static enum ice_status 873 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, 874 bool last_buf, u32 *error_offset, u32 *error_info, 875 struct ice_sq_cd *cd) 876 { 877 struct ice_aqc_download_pkg *cmd; 878 struct ice_aq_desc desc; 879 enum ice_status status; 880 881 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 882 883 if (error_offset) 884 *error_offset = 0; 885 if (error_info) 886 *error_info = 0; 887 888 cmd = &desc.params.download_pkg; 889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); 890 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 891 892 if (last_buf) 893 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 894 895 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 896 if (status == ICE_ERR_AQ_ERROR) { 897 /* Read error from buffer only when the FW returned an error */ 898 struct ice_aqc_download_pkg_resp *resp; 899 900 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 901 if (error_offset) 902 *error_offset = LE32_TO_CPU(resp->error_offset); 903 if (error_info) 904 *error_info = LE32_TO_CPU(resp->error_info); 905 } 906 907 return status; 908 } 909 910 /** 911 * ice_find_seg_in_pkg 912 * @hw: pointer to the hardware structure 913 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) 914 * @pkg_hdr: pointer to the package header to be searched 915 * 916 * This function searches a package file for a particular segment type. On 917 * success it returns a pointer to the segment header, otherwise it will 918 * return NULL. 919 */ 920 static struct ice_generic_seg_hdr * 921 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 922 struct ice_pkg_hdr *pkg_hdr) 923 { 924 u32 i; 925 926 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 927 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 928 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, 929 pkg_hdr->pkg_format_ver.update, 930 pkg_hdr->pkg_format_ver.draft); 931 932 /* Search all package segments for the requested segment type */ 933 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { 934 struct ice_generic_seg_hdr *seg; 935 936 seg = (struct ice_generic_seg_hdr *) 937 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); 938 939 if (LE32_TO_CPU(seg->seg_type) == seg_type) 940 return seg; 941 } 942 943 return NULL; 944 } 945 946 /** 947 * ice_update_pkg 948 * @hw: pointer to the hardware structure 949 * @bufs: pointer to an array of buffers 950 * @count: the number of buffers in the array 951 * 952 * Obtains change lock and updates package. 953 */ 954 enum ice_status 955 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 956 { 957 enum ice_status status; 958 u32 offset, info, i; 959 960 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 961 if (status) 962 return status; 963 964 for (i = 0; i < count; i++) { 965 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); 966 bool last = ((i + 1) == count); 967 968 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), 969 last, &offset, &info, NULL); 970 971 if (status) { 972 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", 973 status, offset, info); 974 break; 975 } 976 } 977 978 ice_release_change_lock(hw); 979 980 return status; 981 } 982 983 /** 984 * ice_dwnld_cfg_bufs 985 * @hw: pointer to the hardware structure 986 * @bufs: pointer to an array of buffers 987 * @count: the number of buffers in the array 988 * 989 * Obtains global config lock and downloads the package configuration buffers 990 * to the firmware. Metadata buffers are skipped, and the first metadata buffer 991 * found indicates that the rest of the buffers are all metadata buffers. 992 */ 993 static enum ice_status 994 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 995 { 996 enum ice_status status; 997 struct ice_buf_hdr *bh; 998 u32 offset, info, i; 999 1000 if (!bufs || !count) 1001 return ICE_ERR_PARAM; 1002 1003 /* If the first buffer's first section has its metadata bit set 1004 * then there are no buffers to be downloaded, and the operation is 1005 * considered a success. 1006 */ 1007 bh = (struct ice_buf_hdr *)bufs; 1008 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) 1009 return ICE_SUCCESS; 1010 1011 /* reset pkg_dwnld_status in case this function is called in the 1012 * reset/rebuild flow 1013 */ 1014 hw->pkg_dwnld_status = ICE_AQ_RC_OK; 1015 1016 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1017 if (status) { 1018 if (status == ICE_ERR_AQ_NO_WORK) 1019 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; 1020 else 1021 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 1022 return status; 1023 } 1024 1025 for (i = 0; i < count; i++) { 1026 bool last = ((i + 1) == count); 1027 1028 if (!last) { 1029 /* check next buffer for metadata flag */ 1030 bh = (struct ice_buf_hdr *)(bufs + i + 1); 1031 1032 /* A set metadata flag in the next buffer will signal 1033 * that the current buffer will be the last buffer 1034 * downloaded 1035 */ 1036 if (LE16_TO_CPU(bh->section_count)) 1037 if (LE32_TO_CPU(bh->section_entry[0].type) & 1038 ICE_METADATA_BUF) 1039 last = true; 1040 } 1041 1042 bh = (struct ice_buf_hdr *)(bufs + i); 1043 1044 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, 1045 &offset, &info, NULL); 1046 1047 /* Save AQ status from download package */ 1048 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 1049 if (status) { 1050 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", 1051 status, offset, info); 1052 1053 break; 1054 } 1055 1056 if (last) 1057 break; 1058 } 1059 1060 ice_release_global_cfg_lock(hw); 1061 1062 return status; 1063 } 1064 1065 /** 1066 * ice_aq_get_pkg_info_list 1067 * @hw: pointer to the hardware structure 1068 * @pkg_info: the buffer which will receive the information list 1069 * @buf_size: the size of the pkg_info information buffer 1070 * @cd: pointer to command details structure or NULL 1071 * 1072 * Get Package Info List (0x0C43) 1073 */ 1074 static enum ice_status 1075 ice_aq_get_pkg_info_list(struct ice_hw *hw, 1076 struct ice_aqc_get_pkg_info_resp *pkg_info, 1077 u16 buf_size, struct ice_sq_cd *cd) 1078 { 1079 struct ice_aq_desc desc; 1080 1081 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1082 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); 1083 1084 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); 1085 } 1086 1087 /** 1088 * ice_download_pkg 1089 * @hw: pointer to the hardware structure 1090 * @ice_seg: pointer to the segment of the package to be downloaded 1091 * 1092 * Handles the download of a complete package. 1093 */ 1094 static enum ice_status 1095 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) 1096 { 1097 struct ice_buf_table *ice_buf_tbl; 1098 1099 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1100 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", 1101 ice_seg->hdr.seg_format_ver.major, 1102 ice_seg->hdr.seg_format_ver.minor, 1103 ice_seg->hdr.seg_format_ver.update, 1104 ice_seg->hdr.seg_format_ver.draft); 1105 1106 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 1107 LE32_TO_CPU(ice_seg->hdr.seg_type), 1108 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); 1109 1110 ice_buf_tbl = ice_find_buf_table(ice_seg); 1111 1112 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", 1113 LE32_TO_CPU(ice_buf_tbl->buf_count)); 1114 1115 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, 1116 LE32_TO_CPU(ice_buf_tbl->buf_count)); 1117 } 1118 1119 /** 1120 * ice_init_pkg_info 1121 * @hw: pointer to the hardware structure 1122 * @pkg_hdr: pointer to the driver's package hdr 1123 * 1124 * Saves off the package details into the HW structure. 1125 */ 1126 static enum ice_status 1127 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1128 { 1129 struct ice_global_metadata_seg *meta_seg; 1130 struct ice_generic_seg_hdr *seg_hdr; 1131 1132 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1133 if (!pkg_hdr) 1134 return ICE_ERR_PARAM; 1135 1136 meta_seg = (struct ice_global_metadata_seg *) 1137 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); 1138 if (meta_seg) { 1139 hw->pkg_ver = meta_seg->pkg_ver; 1140 ice_memcpy(hw->pkg_name, meta_seg->pkg_name, 1141 sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA); 1142 1143 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", 1144 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, 1145 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, 1146 meta_seg->pkg_name); 1147 } else { 1148 ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n"); 1149 return ICE_ERR_CFG; 1150 } 1151 1152 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); 1153 if (seg_hdr) { 1154 hw->ice_pkg_ver = seg_hdr->seg_format_ver; 1155 ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_id, 1156 sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA); 1157 1158 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", 1159 seg_hdr->seg_format_ver.major, 1160 seg_hdr->seg_format_ver.minor, 1161 seg_hdr->seg_format_ver.update, 1162 seg_hdr->seg_format_ver.draft, 1163 seg_hdr->seg_id); 1164 } else { 1165 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); 1166 return ICE_ERR_CFG; 1167 } 1168 1169 return ICE_SUCCESS; 1170 } 1171 1172 /** 1173 * ice_get_pkg_info 1174 * @hw: pointer to the hardware structure 1175 * 1176 * Store details of the package currently loaded in HW into the HW structure. 1177 */ 1178 static enum ice_status ice_get_pkg_info(struct ice_hw *hw) 1179 { 1180 struct ice_aqc_get_pkg_info_resp *pkg_info; 1181 enum ice_status status; 1182 u16 size; 1183 u32 i; 1184 1185 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1186 1187 size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); 1188 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); 1189 if (!pkg_info) 1190 return ICE_ERR_NO_MEMORY; 1191 1192 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); 1193 if (status) 1194 goto init_pkg_free_alloc; 1195 1196 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { 1197 #define ICE_PKG_FLAG_COUNT 4 1198 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; 1199 u8 place = 0; 1200 1201 if (pkg_info->pkg_info[i].is_active) { 1202 flags[place++] = 'A'; 1203 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 1204 hw->active_track_id = 1205 LE32_TO_CPU(pkg_info->pkg_info[i].track_id); 1206 ice_memcpy(hw->active_pkg_name, 1207 pkg_info->pkg_info[i].name, 1208 sizeof(pkg_info->pkg_info[i].name), 1209 ICE_NONDMA_TO_NONDMA); 1210 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 1211 } 1212 if (pkg_info->pkg_info[i].is_active_at_boot) 1213 flags[place++] = 'B'; 1214 if (pkg_info->pkg_info[i].is_modified) 1215 flags[place++] = 'M'; 1216 if (pkg_info->pkg_info[i].is_in_nvm) 1217 flags[place++] = 'N'; 1218 1219 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", 1220 i, pkg_info->pkg_info[i].ver.major, 1221 pkg_info->pkg_info[i].ver.minor, 1222 pkg_info->pkg_info[i].ver.update, 1223 pkg_info->pkg_info[i].ver.draft, 1224 pkg_info->pkg_info[i].name, flags); 1225 } 1226 1227 init_pkg_free_alloc: 1228 ice_free(hw, pkg_info); 1229 1230 return status; 1231 } 1232 1233 /** 1234 * ice_find_label_value 1235 * @ice_seg: pointer to the ice segment (non-NULL) 1236 * @name: name of the label to search for 1237 * @type: the section type that will contain the label 1238 * @value: pointer to a value that will return the label's value if found 1239 * 1240 * Finds a label's value given the label name and the section type to search. 1241 * The ice_seg parameter must not be NULL since the first call to 1242 * ice_enum_labels requires a pointer to an actual ice_seg structure. 1243 */ 1244 enum ice_status 1245 ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, 1246 u16 *value) 1247 { 1248 struct ice_pkg_enum state; 1249 char *label_name; 1250 u16 val; 1251 1252 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1253 1254 if (!ice_seg) 1255 return ICE_ERR_PARAM; 1256 1257 do { 1258 label_name = ice_enum_labels(ice_seg, type, &state, &val); 1259 if (label_name && !strcmp(label_name, name)) { 1260 *value = val; 1261 return ICE_SUCCESS; 1262 } 1263 1264 ice_seg = NULL; 1265 } while (label_name); 1266 1267 return ICE_ERR_CFG; 1268 } 1269 1270 /** 1271 * ice_verify_pkg - verify package 1272 * @pkg: pointer to the package buffer 1273 * @len: size of the package buffer 1274 * 1275 * Verifies various attributes of the package file, including length, format 1276 * version, and the requirement of at least one segment. 1277 */ 1278 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 1279 { 1280 u32 seg_count; 1281 u32 i; 1282 1283 if (len < ice_struct_size(pkg, seg_offset, 1)) 1284 return ICE_ERR_BUF_TOO_SHORT; 1285 1286 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || 1287 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || 1288 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || 1289 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) 1290 return ICE_ERR_CFG; 1291 1292 /* pkg must have at least one segment */ 1293 seg_count = LE32_TO_CPU(pkg->seg_count); 1294 if (seg_count < 1) 1295 return ICE_ERR_CFG; 1296 1297 /* make sure segment array fits in package length */ 1298 if (len < ice_struct_size(pkg, seg_offset, seg_count)) 1299 return ICE_ERR_BUF_TOO_SHORT; 1300 1301 /* all segments must fit within length */ 1302 for (i = 0; i < seg_count; i++) { 1303 u32 off = LE32_TO_CPU(pkg->seg_offset[i]); 1304 struct ice_generic_seg_hdr *seg; 1305 1306 /* segment header must fit */ 1307 if (len < off + sizeof(*seg)) 1308 return ICE_ERR_BUF_TOO_SHORT; 1309 1310 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); 1311 1312 /* segment body must fit */ 1313 if (len < off + LE32_TO_CPU(seg->seg_size)) 1314 return ICE_ERR_BUF_TOO_SHORT; 1315 } 1316 1317 return ICE_SUCCESS; 1318 } 1319 1320 /** 1321 * ice_free_seg - free package segment pointer 1322 * @hw: pointer to the hardware structure 1323 * 1324 * Frees the package segment pointer in the proper manner, depending on if the 1325 * segment was allocated or just the passed in pointer was stored. 1326 */ 1327 void ice_free_seg(struct ice_hw *hw) 1328 { 1329 if (hw->pkg_copy) { 1330 ice_free(hw, hw->pkg_copy); 1331 hw->pkg_copy = NULL; 1332 hw->pkg_size = 0; 1333 } 1334 hw->seg = NULL; 1335 } 1336 1337 /** 1338 * ice_init_pkg_regs - initialize additional package registers 1339 * @hw: pointer to the hardware structure 1340 */ 1341 static void ice_init_pkg_regs(struct ice_hw *hw) 1342 { 1343 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF 1344 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF 1345 #define ICE_SW_BLK_IDX 0 1346 1347 /* setup Switch block input mask, which is 48-bits in two parts */ 1348 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); 1349 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); 1350 } 1351 1352 /** 1353 * ice_chk_pkg_version - check package version for compatibility with driver 1354 * @pkg_ver: pointer to a version structure to check 1355 * 1356 * Check to make sure that the package about to be downloaded is compatible with 1357 * the driver. To be compatible, the major and minor components of the package 1358 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR 1359 * definitions. 1360 */ 1361 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) 1362 { 1363 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || 1364 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) 1365 return ICE_ERR_NOT_SUPPORTED; 1366 1367 return ICE_SUCCESS; 1368 } 1369 1370 /** 1371 * ice_chk_pkg_compat 1372 * @hw: pointer to the hardware structure 1373 * @ospkg: pointer to the package hdr 1374 * @seg: pointer to the package segment hdr 1375 * 1376 * This function checks the package version compatibility with driver and NVM 1377 */ 1378 static enum ice_status 1379 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, 1380 struct ice_seg **seg) 1381 { 1382 struct ice_aqc_get_pkg_info_resp *pkg; 1383 enum ice_status status; 1384 u16 size; 1385 u32 i; 1386 1387 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1388 1389 /* Check package version compatibility */ 1390 status = ice_chk_pkg_version(&hw->pkg_ver); 1391 if (status) { 1392 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); 1393 return status; 1394 } 1395 1396 /* find ICE segment in given package */ 1397 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, 1398 ospkg); 1399 if (!*seg) { 1400 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 1401 return ICE_ERR_CFG; 1402 } 1403 1404 /* Check if FW is compatible with the OS package */ 1405 size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); 1406 pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); 1407 if (!pkg) 1408 return ICE_ERR_NO_MEMORY; 1409 1410 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); 1411 if (status) 1412 goto fw_ddp_compat_free_alloc; 1413 1414 for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { 1415 /* loop till we find the NVM package */ 1416 if (!pkg->pkg_info[i].is_in_nvm) 1417 continue; 1418 if ((*seg)->hdr.seg_format_ver.major != 1419 pkg->pkg_info[i].ver.major || 1420 (*seg)->hdr.seg_format_ver.minor > 1421 pkg->pkg_info[i].ver.minor) { 1422 status = ICE_ERR_FW_DDP_MISMATCH; 1423 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); 1424 } 1425 /* done processing NVM package so break */ 1426 break; 1427 } 1428 fw_ddp_compat_free_alloc: 1429 ice_free(hw, pkg); 1430 return status; 1431 } 1432 1433 /** 1434 * ice_sw_fv_handler 1435 * @sect_type: section type 1436 * @section: pointer to section 1437 * @index: index of the field vector entry to be returned 1438 * @offset: ptr to variable that receives the offset in the field vector table 1439 * 1440 * This is a callback function that can be passed to ice_pkg_enum_entry. 1441 * This function treats the given section as of type ice_sw_fv_section and 1442 * enumerates offset field. "offset" is an index into the field vector table. 1443 */ 1444 static void * 1445 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) 1446 { 1447 struct ice_sw_fv_section *fv_section = 1448 (struct ice_sw_fv_section *)section; 1449 1450 if (!section || sect_type != ICE_SID_FLD_VEC_SW) 1451 return NULL; 1452 if (index >= LE16_TO_CPU(fv_section->count)) 1453 return NULL; 1454 if (offset) 1455 /* "index" passed in to this function is relative to a given 1456 * 4k block. To get to the true index into the field vector 1457 * table need to add the relative index to the base_offset 1458 * field of this section 1459 */ 1460 *offset = LE16_TO_CPU(fv_section->base_offset) + index; 1461 return fv_section->fv + index; 1462 } 1463 1464 /** 1465 * ice_get_prof_index_max - get the max profile index for used profile 1466 * @hw: pointer to the HW struct 1467 * 1468 * Calling this function will get the max profile index for used profile 1469 * and store the index number in struct ice_switch_info *switch_info 1470 * in hw for following use. 1471 */ 1472 static int ice_get_prof_index_max(struct ice_hw *hw) 1473 { 1474 u16 prof_index = 0, j, max_prof_index = 0; 1475 struct ice_pkg_enum state; 1476 struct ice_seg *ice_seg; 1477 bool flag = false; 1478 struct ice_fv *fv; 1479 u32 offset; 1480 1481 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1482 1483 if (!hw->seg) 1484 return ICE_ERR_PARAM; 1485 1486 ice_seg = hw->seg; 1487 1488 do { 1489 fv = (struct ice_fv *) 1490 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1491 &offset, ice_sw_fv_handler); 1492 if (!fv) 1493 break; 1494 ice_seg = NULL; 1495 1496 /* in the profile that not be used, the prot_id is set to 0xff 1497 * and the off is set to 0x1ff for all the field vectors. 1498 */ 1499 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 1500 if (fv->ew[j].prot_id != ICE_PROT_INVALID || 1501 fv->ew[j].off != ICE_FV_OFFSET_INVAL) 1502 flag = true; 1503 if (flag && prof_index > max_prof_index) 1504 max_prof_index = prof_index; 1505 1506 prof_index++; 1507 flag = false; 1508 } while (fv); 1509 1510 hw->switch_info->max_used_prof_index = max_prof_index; 1511 1512 return ICE_SUCCESS; 1513 } 1514 1515 /** 1516 * ice_init_pkg - initialize/download package 1517 * @hw: pointer to the hardware structure 1518 * @buf: pointer to the package buffer 1519 * @len: size of the package buffer 1520 * 1521 * This function initializes a package. The package contains HW tables 1522 * required to do packet processing. First, the function extracts package 1523 * information such as version. Then it finds the ice configuration segment 1524 * within the package; this function then saves a copy of the segment pointer 1525 * within the supplied package buffer. Next, the function will cache any hints 1526 * from the package, followed by downloading the package itself. Note, that if 1527 * a previous PF driver has already downloaded the package successfully, then 1528 * the current driver will not have to download the package again. 1529 * 1530 * The local package contents will be used to query default behavior and to 1531 * update specific sections of the HW's version of the package (e.g. to update 1532 * the parse graph to understand new protocols). 1533 * 1534 * This function stores a pointer to the package buffer memory, and it is 1535 * expected that the supplied buffer will not be freed immediately. If the 1536 * package buffer needs to be freed, such as when read from a file, use 1537 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this 1538 * case. 1539 */ 1540 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) 1541 { 1542 struct ice_pkg_hdr *pkg; 1543 enum ice_status status; 1544 struct ice_seg *seg; 1545 1546 if (!buf || !len) 1547 return ICE_ERR_PARAM; 1548 1549 pkg = (struct ice_pkg_hdr *)buf; 1550 status = ice_verify_pkg(pkg, len); 1551 if (status) { 1552 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", 1553 status); 1554 return status; 1555 } 1556 1557 /* initialize package info */ 1558 status = ice_init_pkg_info(hw, pkg); 1559 if (status) 1560 return status; 1561 1562 /* before downloading the package, check package version for 1563 * compatibility with driver 1564 */ 1565 status = ice_chk_pkg_compat(hw, pkg, &seg); 1566 if (status) 1567 return status; 1568 1569 /* initialize package hints and then download package */ 1570 ice_init_pkg_hints(hw, seg); 1571 status = ice_download_pkg(hw, seg); 1572 if (status == ICE_ERR_AQ_NO_WORK) { 1573 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); 1574 status = ICE_SUCCESS; 1575 } 1576 1577 /* Get information on the package currently loaded in HW, then make sure 1578 * the driver is compatible with this version. 1579 */ 1580 if (!status) { 1581 status = ice_get_pkg_info(hw); 1582 if (!status) 1583 status = ice_chk_pkg_version(&hw->active_pkg_ver); 1584 } 1585 1586 if (!status) { 1587 hw->seg = seg; 1588 /* on successful package download update other required 1589 * registers to support the package and fill HW tables 1590 * with package content. 1591 */ 1592 ice_init_pkg_regs(hw); 1593 ice_fill_blk_tbls(hw); 1594 ice_get_prof_index_max(hw); 1595 } else { 1596 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", 1597 status); 1598 } 1599 1600 return status; 1601 } 1602 1603 /** 1604 * ice_copy_and_init_pkg - initialize/download a copy of the package 1605 * @hw: pointer to the hardware structure 1606 * @buf: pointer to the package buffer 1607 * @len: size of the package buffer 1608 * 1609 * This function copies the package buffer, and then calls ice_init_pkg() to 1610 * initialize the copied package contents. 1611 * 1612 * The copying is necessary if the package buffer supplied is constant, or if 1613 * the memory may disappear shortly after calling this function. 1614 * 1615 * If the package buffer resides in the data segment and can be modified, the 1616 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). 1617 * 1618 * However, if the package buffer needs to be copied first, such as when being 1619 * read from a file, the caller should use ice_copy_and_init_pkg(). 1620 * 1621 * This function will first copy the package buffer, before calling 1622 * ice_init_pkg(). The caller is free to immediately destroy the original 1623 * package buffer, as the new copy will be managed by this function and 1624 * related routines. 1625 */ 1626 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) 1627 { 1628 enum ice_status status; 1629 u8 *buf_copy; 1630 1631 if (!buf || !len) 1632 return ICE_ERR_PARAM; 1633 1634 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); 1635 1636 status = ice_init_pkg(hw, buf_copy, len); 1637 if (status) { 1638 /* Free the copy, since we failed to initialize the package */ 1639 ice_free(hw, buf_copy); 1640 } else { 1641 /* Track the copied pkg so we can free it later */ 1642 hw->pkg_copy = buf_copy; 1643 hw->pkg_size = len; 1644 } 1645 1646 return status; 1647 } 1648 1649 /** 1650 * ice_pkg_buf_alloc 1651 * @hw: pointer to the HW structure 1652 * 1653 * Allocates a package buffer and returns a pointer to the buffer header. 1654 * Note: all package contents must be in Little Endian form. 1655 */ 1656 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) 1657 { 1658 struct ice_buf_build *bld; 1659 struct ice_buf_hdr *buf; 1660 1661 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); 1662 if (!bld) 1663 return NULL; 1664 1665 buf = (struct ice_buf_hdr *)bld; 1666 buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, 1667 section_entry)); 1668 return bld; 1669 } 1670 1671 /** 1672 * ice_get_sw_prof_type - determine switch profile type 1673 * @hw: pointer to the HW structure 1674 * @fv: pointer to the switch field vector 1675 */ 1676 static enum ice_prof_type 1677 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) 1678 { 1679 u16 i; 1680 1681 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { 1682 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ 1683 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && 1684 fv->ew[i].off == ICE_VNI_OFFSET) 1685 return ICE_PROF_TUN_UDP; 1686 1687 /* GRE tunnel will have GRE protocol */ 1688 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) 1689 return ICE_PROF_TUN_GRE; 1690 } 1691 1692 return ICE_PROF_NON_TUN; 1693 } 1694 1695 /** 1696 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type 1697 * @hw: pointer to hardware structure 1698 * @req_profs: type of profiles requested 1699 * @bm: pointer to memory for returning the bitmap of field vectors 1700 */ 1701 void 1702 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, 1703 ice_bitmap_t *bm) 1704 { 1705 struct ice_pkg_enum state; 1706 struct ice_seg *ice_seg; 1707 struct ice_fv *fv; 1708 1709 if (req_profs == ICE_PROF_ALL) { 1710 ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); 1711 return; 1712 } 1713 1714 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1715 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); 1716 ice_seg = hw->seg; 1717 do { 1718 enum ice_prof_type prof_type; 1719 u32 offset; 1720 1721 fv = (struct ice_fv *) 1722 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1723 &offset, ice_sw_fv_handler); 1724 ice_seg = NULL; 1725 1726 if (fv) { 1727 /* Determine field vector type */ 1728 prof_type = ice_get_sw_prof_type(hw, fv); 1729 1730 if (req_profs & prof_type) 1731 ice_set_bit((u16)offset, bm); 1732 } 1733 } while (fv); 1734 } 1735 1736 /** 1737 * ice_get_sw_fv_list 1738 * @hw: pointer to the HW structure 1739 * @prot_ids: field vector to search for with a given protocol ID 1740 * @ids_cnt: lookup/protocol count 1741 * @bm: bitmap of field vectors to consider 1742 * @fv_list: Head of a list 1743 * 1744 * Finds all the field vector entries from switch block that contain 1745 * a given protocol ID and returns a list of structures of type 1746 * "ice_sw_fv_list_entry". Every structure in the list has a field vector 1747 * definition and profile ID information 1748 * NOTE: The caller of the function is responsible for freeing the memory 1749 * allocated for every list entry. 1750 */ 1751 enum ice_status 1752 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, 1753 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) 1754 { 1755 struct ice_sw_fv_list_entry *fvl; 1756 struct ice_sw_fv_list_entry *tmp; 1757 struct ice_pkg_enum state; 1758 struct ice_seg *ice_seg; 1759 struct ice_fv *fv; 1760 u32 offset; 1761 1762 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1763 1764 if (!ids_cnt || !hw->seg) 1765 return ICE_ERR_PARAM; 1766 1767 ice_seg = hw->seg; 1768 do { 1769 u16 i; 1770 1771 fv = (struct ice_fv *) 1772 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1773 &offset, ice_sw_fv_handler); 1774 if (!fv) 1775 break; 1776 ice_seg = NULL; 1777 1778 /* If field vector is not in the bitmap list, then skip this 1779 * profile. 1780 */ 1781 if (!ice_is_bit_set(bm, (u16)offset)) 1782 continue; 1783 1784 for (i = 0; i < ids_cnt; i++) { 1785 int j; 1786 1787 /* This code assumes that if a switch field vector line 1788 * has a matching protocol, then this line will contain 1789 * the entries necessary to represent every field in 1790 * that protocol header. 1791 */ 1792 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 1793 if (fv->ew[j].prot_id == prot_ids[i]) 1794 break; 1795 if (j >= hw->blk[ICE_BLK_SW].es.fvw) 1796 break; 1797 if (i + 1 == ids_cnt) { 1798 fvl = (struct ice_sw_fv_list_entry *) 1799 ice_malloc(hw, sizeof(*fvl)); 1800 if (!fvl) 1801 goto err; 1802 fvl->fv_ptr = fv; 1803 fvl->profile_id = offset; 1804 LIST_ADD(&fvl->list_entry, fv_list); 1805 break; 1806 } 1807 } 1808 } while (fv); 1809 if (LIST_EMPTY(fv_list)) 1810 return ICE_ERR_CFG; 1811 return ICE_SUCCESS; 1812 1813 err: 1814 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, 1815 list_entry) { 1816 LIST_DEL(&fvl->list_entry); 1817 ice_free(hw, fvl); 1818 } 1819 1820 return ICE_ERR_NO_MEMORY; 1821 } 1822 1823 /** 1824 * ice_init_prof_result_bm - Initialize the profile result index bitmap 1825 * @hw: pointer to hardware structure 1826 */ 1827 void ice_init_prof_result_bm(struct ice_hw *hw) 1828 { 1829 struct ice_pkg_enum state; 1830 struct ice_seg *ice_seg; 1831 struct ice_fv *fv; 1832 1833 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1834 1835 if (!hw->seg) 1836 return; 1837 1838 ice_seg = hw->seg; 1839 do { 1840 u32 off; 1841 u16 i; 1842 1843 fv = (struct ice_fv *) 1844 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1845 &off, ice_sw_fv_handler); 1846 ice_seg = NULL; 1847 if (!fv) 1848 break; 1849 1850 ice_zero_bitmap(hw->switch_info->prof_res_bm[off], 1851 ICE_MAX_FV_WORDS); 1852 1853 /* Determine empty field vector indices, these can be 1854 * used for recipe results. Skip index 0, since it is 1855 * always used for Switch ID. 1856 */ 1857 for (i = 1; i < ICE_MAX_FV_WORDS; i++) 1858 if (fv->ew[i].prot_id == ICE_PROT_INVALID && 1859 fv->ew[i].off == ICE_FV_OFFSET_INVAL) 1860 ice_set_bit(i, 1861 hw->switch_info->prof_res_bm[off]); 1862 } while (fv); 1863 } 1864 1865 /** 1866 * ice_pkg_buf_free 1867 * @hw: pointer to the HW structure 1868 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1869 * 1870 * Frees a package buffer 1871 */ 1872 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) 1873 { 1874 ice_free(hw, bld); 1875 } 1876 1877 /** 1878 * ice_pkg_buf_reserve_section 1879 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1880 * @count: the number of sections to reserve 1881 * 1882 * Reserves one or more section table entries in a package buffer. This routine 1883 * can be called multiple times as long as they are made before calling 1884 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 1885 * is called once, the number of sections that can be allocated will not be able 1886 * to be increased; not using all reserved sections is fine, but this will 1887 * result in some wasted space in the buffer. 1888 * Note: all package contents must be in Little Endian form. 1889 */ 1890 static enum ice_status 1891 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) 1892 { 1893 struct ice_buf_hdr *buf; 1894 u16 section_count; 1895 u16 data_end; 1896 1897 if (!bld) 1898 return ICE_ERR_PARAM; 1899 1900 buf = (struct ice_buf_hdr *)&bld->buf; 1901 1902 /* already an active section, can't increase table size */ 1903 section_count = LE16_TO_CPU(buf->section_count); 1904 if (section_count > 0) 1905 return ICE_ERR_CFG; 1906 1907 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) 1908 return ICE_ERR_CFG; 1909 bld->reserved_section_table_entries += count; 1910 1911 data_end = LE16_TO_CPU(buf->data_end) + 1912 (count * sizeof(buf->section_entry[0])); 1913 buf->data_end = CPU_TO_LE16(data_end); 1914 1915 return ICE_SUCCESS; 1916 } 1917 1918 /** 1919 * ice_pkg_buf_alloc_section 1920 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1921 * @type: the section type value 1922 * @size: the size of the section to reserve (in bytes) 1923 * 1924 * Reserves memory in the buffer for a section's content and updates the 1925 * buffers' status accordingly. This routine returns a pointer to the first 1926 * byte of the section start within the buffer, which is used to fill in the 1927 * section contents. 1928 * Note: all package contents must be in Little Endian form. 1929 */ 1930 static void * 1931 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) 1932 { 1933 struct ice_buf_hdr *buf; 1934 u16 sect_count; 1935 u16 data_end; 1936 1937 if (!bld || !type || !size) 1938 return NULL; 1939 1940 buf = (struct ice_buf_hdr *)&bld->buf; 1941 1942 /* check for enough space left in buffer */ 1943 data_end = LE16_TO_CPU(buf->data_end); 1944 1945 /* section start must align on 4 byte boundary */ 1946 data_end = ICE_ALIGN(data_end, 4); 1947 1948 if ((data_end + size) > ICE_MAX_S_DATA_END) 1949 return NULL; 1950 1951 /* check for more available section table entries */ 1952 sect_count = LE16_TO_CPU(buf->section_count); 1953 if (sect_count < bld->reserved_section_table_entries) { 1954 void *section_ptr = ((u8 *)buf) + data_end; 1955 1956 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); 1957 buf->section_entry[sect_count].size = CPU_TO_LE16(size); 1958 buf->section_entry[sect_count].type = CPU_TO_LE32(type); 1959 1960 data_end += size; 1961 buf->data_end = CPU_TO_LE16(data_end); 1962 1963 buf->section_count = CPU_TO_LE16(sect_count + 1); 1964 return section_ptr; 1965 } 1966 1967 /* no free section table entries */ 1968 return NULL; 1969 } 1970 1971 /** 1972 * ice_pkg_buf_alloc_single_section 1973 * @hw: pointer to the HW structure 1974 * @type: the section type value 1975 * @size: the size of the section to reserve (in bytes) 1976 * @section: returns pointer to the section 1977 * 1978 * Allocates a package buffer with a single section. 1979 * Note: all package contents must be in Little Endian form. 1980 */ 1981 static struct ice_buf_build * 1982 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, 1983 void **section) 1984 { 1985 struct ice_buf_build *buf; 1986 1987 if (!section) 1988 return NULL; 1989 1990 buf = ice_pkg_buf_alloc(hw); 1991 if (!buf) 1992 return NULL; 1993 1994 if (ice_pkg_buf_reserve_section(buf, 1)) 1995 goto ice_pkg_buf_alloc_single_section_err; 1996 1997 *section = ice_pkg_buf_alloc_section(buf, type, size); 1998 if (!*section) 1999 goto ice_pkg_buf_alloc_single_section_err; 2000 2001 return buf; 2002 2003 ice_pkg_buf_alloc_single_section_err: 2004 ice_pkg_buf_free(hw, buf); 2005 return NULL; 2006 } 2007 2008 /** 2009 * ice_pkg_buf_unreserve_section 2010 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2011 * @count: the number of sections to unreserve 2012 * 2013 * Unreserves one or more section table entries in a package buffer, releasing 2014 * space that can be used for section data. This routine can be called 2015 * multiple times as long as they are made before calling 2016 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 2017 * is called once, the number of sections that can be allocated will not be able 2018 * to be increased; not using all reserved sections is fine, but this will 2019 * result in some wasted space in the buffer. 2020 * Note: all package contents must be in Little Endian form. 2021 */ 2022 enum ice_status 2023 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) 2024 { 2025 struct ice_buf_hdr *buf; 2026 u16 section_count; 2027 u16 data_end; 2028 2029 if (!bld) 2030 return ICE_ERR_PARAM; 2031 2032 buf = (struct ice_buf_hdr *)&bld->buf; 2033 2034 /* already an active section, can't decrease table size */ 2035 section_count = LE16_TO_CPU(buf->section_count); 2036 if (section_count > 0) 2037 return ICE_ERR_CFG; 2038 2039 if (count > bld->reserved_section_table_entries) 2040 return ICE_ERR_CFG; 2041 bld->reserved_section_table_entries -= count; 2042 2043 data_end = LE16_TO_CPU(buf->data_end) - 2044 (count * sizeof(buf->section_entry[0])); 2045 buf->data_end = CPU_TO_LE16(data_end); 2046 2047 return ICE_SUCCESS; 2048 } 2049 2050 /** 2051 * ice_pkg_buf_get_free_space 2052 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2053 * 2054 * Returns the number of free bytes remaining in the buffer. 2055 * Note: all package contents must be in Little Endian form. 2056 */ 2057 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) 2058 { 2059 struct ice_buf_hdr *buf; 2060 2061 if (!bld) 2062 return 0; 2063 2064 buf = (struct ice_buf_hdr *)&bld->buf; 2065 return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); 2066 } 2067 2068 /** 2069 * ice_pkg_buf_get_active_sections 2070 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2071 * 2072 * Returns the number of active sections. Before using the package buffer 2073 * in an update package command, the caller should make sure that there is at 2074 * least one active section - otherwise, the buffer is not legal and should 2075 * not be used. 2076 * Note: all package contents must be in Little Endian form. 2077 */ 2078 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) 2079 { 2080 struct ice_buf_hdr *buf; 2081 2082 if (!bld) 2083 return 0; 2084 2085 buf = (struct ice_buf_hdr *)&bld->buf; 2086 return LE16_TO_CPU(buf->section_count); 2087 } 2088 2089 /** 2090 * ice_pkg_buf 2091 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2092 * 2093 * Return a pointer to the buffer's header 2094 */ 2095 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) 2096 { 2097 if (!bld) 2098 return NULL; 2099 2100 return &bld->buf; 2101 } 2102 2103 /** 2104 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage 2105 * @hw: pointer to the HW structure 2106 * @port: port to search for 2107 * @index: optionally returns index 2108 * 2109 * Returns whether a port is already in use as a tunnel, and optionally its 2110 * index 2111 */ 2112 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) 2113 { 2114 u16 i; 2115 2116 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2117 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { 2118 if (index) 2119 *index = i; 2120 return true; 2121 } 2122 2123 return false; 2124 } 2125 2126 /** 2127 * ice_tunnel_port_in_use 2128 * @hw: pointer to the HW structure 2129 * @port: port to search for 2130 * @index: optionally returns index 2131 * 2132 * Returns whether a port is already in use as a tunnel, and optionally its 2133 * index 2134 */ 2135 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) 2136 { 2137 bool res; 2138 2139 ice_acquire_lock(&hw->tnl_lock); 2140 res = ice_tunnel_port_in_use_hlpr(hw, port, index); 2141 ice_release_lock(&hw->tnl_lock); 2142 2143 return res; 2144 } 2145 2146 /** 2147 * ice_tunnel_get_type 2148 * @hw: pointer to the HW structure 2149 * @port: port to search for 2150 * @type: returns tunnel index 2151 * 2152 * For a given port number, will return the type of tunnel. 2153 */ 2154 bool 2155 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) 2156 { 2157 bool res = false; 2158 u16 i; 2159 2160 ice_acquire_lock(&hw->tnl_lock); 2161 2162 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2163 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { 2164 *type = hw->tnl.tbl[i].type; 2165 res = true; 2166 break; 2167 } 2168 2169 ice_release_lock(&hw->tnl_lock); 2170 2171 return res; 2172 } 2173 2174 /** 2175 * ice_find_free_tunnel_entry 2176 * @hw: pointer to the HW structure 2177 * @type: tunnel type 2178 * @index: optionally returns index 2179 * 2180 * Returns whether there is a free tunnel entry, and optionally its index 2181 */ 2182 static bool 2183 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, 2184 u16 *index) 2185 { 2186 u16 i; 2187 2188 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2189 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && 2190 hw->tnl.tbl[i].type == type) { 2191 if (index) 2192 *index = i; 2193 return true; 2194 } 2195 2196 return false; 2197 } 2198 2199 /** 2200 * ice_get_open_tunnel_port - retrieve an open tunnel port 2201 * @hw: pointer to the HW structure 2202 * @type: tunnel type (TNL_ALL will return any open port) 2203 * @port: returns open port 2204 */ 2205 bool 2206 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, 2207 u16 *port) 2208 { 2209 bool res = false; 2210 u16 i; 2211 2212 ice_acquire_lock(&hw->tnl_lock); 2213 2214 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2215 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 2216 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { 2217 *port = hw->tnl.tbl[i].port; 2218 res = true; 2219 break; 2220 } 2221 2222 ice_release_lock(&hw->tnl_lock); 2223 2224 return res; 2225 } 2226 2227 /** 2228 * ice_create_tunnel 2229 * @hw: pointer to the HW structure 2230 * @type: type of tunnel 2231 * @port: port of tunnel to create 2232 * 2233 * Create a tunnel by updating the parse graph in the parser. We do that by 2234 * creating a package buffer with the tunnel info and issuing an update package 2235 * command. 2236 */ 2237 enum ice_status 2238 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) 2239 { 2240 struct ice_boost_tcam_section *sect_rx, *sect_tx; 2241 enum ice_status status = ICE_ERR_MAX_LIMIT; 2242 struct ice_buf_build *bld; 2243 u16 index; 2244 2245 ice_acquire_lock(&hw->tnl_lock); 2246 2247 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { 2248 hw->tnl.tbl[index].ref++; 2249 status = ICE_SUCCESS; 2250 goto ice_create_tunnel_end; 2251 } 2252 2253 if (!ice_find_free_tunnel_entry(hw, type, &index)) { 2254 status = ICE_ERR_OUT_OF_RANGE; 2255 goto ice_create_tunnel_end; 2256 } 2257 2258 bld = ice_pkg_buf_alloc(hw); 2259 if (!bld) { 2260 status = ICE_ERR_NO_MEMORY; 2261 goto ice_create_tunnel_end; 2262 } 2263 2264 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 2265 if (ice_pkg_buf_reserve_section(bld, 2)) 2266 goto ice_create_tunnel_err; 2267 2268 sect_rx = (struct ice_boost_tcam_section *) 2269 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 2270 ice_struct_size(sect_rx, tcam, 1)); 2271 if (!sect_rx) 2272 goto ice_create_tunnel_err; 2273 sect_rx->count = CPU_TO_LE16(1); 2274 2275 sect_tx = (struct ice_boost_tcam_section *) 2276 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 2277 ice_struct_size(sect_tx, tcam, 1)); 2278 if (!sect_tx) 2279 goto ice_create_tunnel_err; 2280 sect_tx->count = CPU_TO_LE16(1); 2281 2282 /* copy original boost entry to update package buffer */ 2283 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, 2284 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); 2285 2286 /* over-write the never-match dest port key bits with the encoded port 2287 * bits 2288 */ 2289 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), 2290 (u8 *)&port, NULL, NULL, NULL, 2291 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), 2292 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); 2293 2294 /* exact copy of entry to Tx section entry */ 2295 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), 2296 ICE_NONDMA_TO_NONDMA); 2297 2298 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2299 if (!status) { 2300 hw->tnl.tbl[index].port = port; 2301 hw->tnl.tbl[index].in_use = true; 2302 hw->tnl.tbl[index].ref = 1; 2303 } 2304 2305 ice_create_tunnel_err: 2306 ice_pkg_buf_free(hw, bld); 2307 2308 ice_create_tunnel_end: 2309 ice_release_lock(&hw->tnl_lock); 2310 2311 return status; 2312 } 2313 2314 /** 2315 * ice_destroy_tunnel 2316 * @hw: pointer to the HW structure 2317 * @port: port of tunnel to destroy (ignored if the all parameter is true) 2318 * @all: flag that states to destroy all tunnels 2319 * 2320 * Destroys a tunnel or all tunnels by creating an update package buffer 2321 * targeting the specific updates requested and then performing an update 2322 * package. 2323 */ 2324 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) 2325 { 2326 struct ice_boost_tcam_section *sect_rx, *sect_tx; 2327 enum ice_status status = ICE_ERR_MAX_LIMIT; 2328 struct ice_buf_build *bld; 2329 u16 count = 0; 2330 u16 index; 2331 u16 size; 2332 u16 i; 2333 2334 ice_acquire_lock(&hw->tnl_lock); 2335 2336 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) 2337 if (hw->tnl.tbl[index].ref > 1) { 2338 hw->tnl.tbl[index].ref--; 2339 status = ICE_SUCCESS; 2340 goto ice_destroy_tunnel_end; 2341 } 2342 2343 /* determine count */ 2344 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2345 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 2346 (all || hw->tnl.tbl[i].port == port)) 2347 count++; 2348 2349 if (!count) { 2350 status = ICE_ERR_PARAM; 2351 goto ice_destroy_tunnel_end; 2352 } 2353 2354 /* size of section - there is at least one entry */ 2355 size = ice_struct_size(sect_rx, tcam, count); 2356 2357 bld = ice_pkg_buf_alloc(hw); 2358 if (!bld) { 2359 status = ICE_ERR_NO_MEMORY; 2360 goto ice_destroy_tunnel_end; 2361 } 2362 2363 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 2364 if (ice_pkg_buf_reserve_section(bld, 2)) 2365 goto ice_destroy_tunnel_err; 2366 2367 sect_rx = (struct ice_boost_tcam_section *) 2368 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 2369 size); 2370 if (!sect_rx) 2371 goto ice_destroy_tunnel_err; 2372 sect_rx->count = CPU_TO_LE16(1); 2373 2374 sect_tx = (struct ice_boost_tcam_section *) 2375 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 2376 size); 2377 if (!sect_tx) 2378 goto ice_destroy_tunnel_err; 2379 sect_tx->count = CPU_TO_LE16(1); 2380 2381 /* copy original boost entry to update package buffer, one copy to Rx 2382 * section, another copy to the Tx section 2383 */ 2384 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2385 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 2386 (all || hw->tnl.tbl[i].port == port)) { 2387 ice_memcpy(sect_rx->tcam + i, 2388 hw->tnl.tbl[i].boost_entry, 2389 sizeof(*sect_rx->tcam), 2390 ICE_NONDMA_TO_NONDMA); 2391 ice_memcpy(sect_tx->tcam + i, 2392 hw->tnl.tbl[i].boost_entry, 2393 sizeof(*sect_tx->tcam), 2394 ICE_NONDMA_TO_NONDMA); 2395 hw->tnl.tbl[i].marked = true; 2396 } 2397 2398 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2399 if (!status) 2400 for (i = 0; i < hw->tnl.count && 2401 i < ICE_TUNNEL_MAX_ENTRIES; i++) 2402 if (hw->tnl.tbl[i].marked) { 2403 hw->tnl.tbl[i].ref = 0; 2404 hw->tnl.tbl[i].port = 0; 2405 hw->tnl.tbl[i].in_use = false; 2406 hw->tnl.tbl[i].marked = false; 2407 } 2408 2409 ice_destroy_tunnel_err: 2410 ice_pkg_buf_free(hw, bld); 2411 2412 ice_destroy_tunnel_end: 2413 ice_release_lock(&hw->tnl_lock); 2414 2415 return status; 2416 } 2417 2418 /** 2419 * ice_replay_tunnels 2420 * @hw: pointer to the HW structure 2421 * 2422 * Replays all tunnels 2423 */ 2424 enum ice_status ice_replay_tunnels(struct ice_hw *hw) 2425 { 2426 enum ice_status status = ICE_SUCCESS; 2427 u16 i; 2428 2429 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2430 2431 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) { 2432 enum ice_tunnel_type type = hw->tnl.tbl[i].type; 2433 u16 refs = hw->tnl.tbl[i].ref; 2434 u16 port = hw->tnl.tbl[i].port; 2435 2436 if (!hw->tnl.tbl[i].in_use) 2437 continue; 2438 2439 /* Replay tunnels one at a time by destroying them, then 2440 * recreating them 2441 */ 2442 hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */ 2443 status = ice_destroy_tunnel(hw, port, false); 2444 if (status) { 2445 ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n", 2446 status, port); 2447 break; 2448 } 2449 2450 status = ice_create_tunnel(hw, type, port); 2451 if (status) { 2452 ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n", 2453 status, port); 2454 break; 2455 } 2456 2457 /* reset to original ref count */ 2458 hw->tnl.tbl[i].ref = refs; 2459 } 2460 2461 return status; 2462 } 2463 2464 /** 2465 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index 2466 * @hw: pointer to the hardware structure 2467 * @blk: hardware block 2468 * @prof: profile ID 2469 * @fv_idx: field vector word index 2470 * @prot: variable to receive the protocol ID 2471 * @off: variable to receive the protocol offset 2472 */ 2473 enum ice_status 2474 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, 2475 u8 *prot, u16 *off) 2476 { 2477 struct ice_fv_word *fv_ext; 2478 2479 if (prof >= hw->blk[blk].es.count) 2480 return ICE_ERR_PARAM; 2481 2482 if (fv_idx >= hw->blk[blk].es.fvw) 2483 return ICE_ERR_PARAM; 2484 2485 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); 2486 2487 *prot = fv_ext[fv_idx].prot_id; 2488 *off = fv_ext[fv_idx].off; 2489 2490 return ICE_SUCCESS; 2491 } 2492 2493 /* PTG Management */ 2494 2495 /** 2496 * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table 2497 * @hw: pointer to the hardware structure 2498 * @blk: HW block 2499 * 2500 * This function will update the XLT1 hardware table to reflect the new 2501 * packet type group configuration. 2502 */ 2503 enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) 2504 { 2505 struct ice_xlt1_section *sect; 2506 struct ice_buf_build *bld; 2507 enum ice_status status; 2508 u16 index; 2509 2510 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1), 2511 ice_struct_size(sect, value, 2512 ICE_XLT1_CNT), 2513 (void **)§); 2514 if (!bld) 2515 return ICE_ERR_NO_MEMORY; 2516 2517 sect->count = CPU_TO_LE16(ICE_XLT1_CNT); 2518 sect->offset = CPU_TO_LE16(0); 2519 for (index = 0; index < ICE_XLT1_CNT; index++) 2520 sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg; 2521 2522 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2523 2524 ice_pkg_buf_free(hw, bld); 2525 2526 return status; 2527 } 2528 2529 /** 2530 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) 2531 * @hw: pointer to the hardware structure 2532 * @blk: HW block 2533 * @ptype: the ptype to search for 2534 * @ptg: pointer to variable that receives the PTG 2535 * 2536 * This function will search the PTGs for a particular ptype, returning the 2537 * PTG ID that contains it through the PTG parameter, with the value of 2538 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. 2539 */ 2540 static enum ice_status 2541 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) 2542 { 2543 if (ptype >= ICE_XLT1_CNT || !ptg) 2544 return ICE_ERR_PARAM; 2545 2546 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; 2547 return ICE_SUCCESS; 2548 } 2549 2550 /** 2551 * ice_ptg_alloc_val - Allocates a new packet type group ID by value 2552 * @hw: pointer to the hardware structure 2553 * @blk: HW block 2554 * @ptg: the PTG to allocate 2555 * 2556 * This function allocates a given packet type group ID specified by the PTG 2557 * parameter. 2558 */ 2559 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) 2560 { 2561 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; 2562 } 2563 2564 /** 2565 * ice_ptg_free - Frees a packet type group 2566 * @hw: pointer to the hardware structure 2567 * @blk: HW block 2568 * @ptg: the PTG ID to free 2569 * 2570 * This function frees a packet type group, and returns all the current ptypes 2571 * within it to the default PTG. 2572 */ 2573 void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg) 2574 { 2575 struct ice_ptg_ptype *p, *temp; 2576 2577 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false; 2578 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2579 while (p) { 2580 p->ptg = ICE_DEFAULT_PTG; 2581 temp = p->next_ptype; 2582 p->next_ptype = NULL; 2583 p = temp; 2584 } 2585 2586 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL; 2587 } 2588 2589 /** 2590 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group 2591 * @hw: pointer to the hardware structure 2592 * @blk: HW block 2593 * @ptype: the ptype to remove 2594 * @ptg: the PTG to remove the ptype from 2595 * 2596 * This function will remove the ptype from the specific PTG, and move it to 2597 * the default PTG (ICE_DEFAULT_PTG). 2598 */ 2599 static enum ice_status 2600 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 2601 { 2602 struct ice_ptg_ptype **ch; 2603 struct ice_ptg_ptype *p; 2604 2605 if (ptype > ICE_XLT1_CNT - 1) 2606 return ICE_ERR_PARAM; 2607 2608 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) 2609 return ICE_ERR_DOES_NOT_EXIST; 2610 2611 /* Should not happen if .in_use is set, bad config */ 2612 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) 2613 return ICE_ERR_CFG; 2614 2615 /* find the ptype within this PTG, and bypass the link over it */ 2616 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2617 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2618 while (p) { 2619 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { 2620 *ch = p->next_ptype; 2621 break; 2622 } 2623 2624 ch = &p->next_ptype; 2625 p = p->next_ptype; 2626 } 2627 2628 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; 2629 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; 2630 2631 return ICE_SUCCESS; 2632 } 2633 2634 /** 2635 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group 2636 * @hw: pointer to the hardware structure 2637 * @blk: HW block 2638 * @ptype: the ptype to add or move 2639 * @ptg: the PTG to add or move the ptype to 2640 * 2641 * This function will either add or move a ptype to a particular PTG depending 2642 * on if the ptype is already part of another group. Note that using a 2643 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the 2644 * default PTG. 2645 */ 2646 static enum ice_status 2647 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 2648 { 2649 enum ice_status status; 2650 u8 original_ptg; 2651 2652 if (ptype > ICE_XLT1_CNT - 1) 2653 return ICE_ERR_PARAM; 2654 2655 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) 2656 return ICE_ERR_DOES_NOT_EXIST; 2657 2658 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); 2659 if (status) 2660 return status; 2661 2662 /* Is ptype already in the correct PTG? */ 2663 if (original_ptg == ptg) 2664 return ICE_SUCCESS; 2665 2666 /* Remove from original PTG and move back to the default PTG */ 2667 if (original_ptg != ICE_DEFAULT_PTG) 2668 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); 2669 2670 /* Moving to default PTG? Then we're done with this request */ 2671 if (ptg == ICE_DEFAULT_PTG) 2672 return ICE_SUCCESS; 2673 2674 /* Add ptype to PTG at beginning of list */ 2675 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = 2676 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2677 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = 2678 &hw->blk[blk].xlt1.ptypes[ptype]; 2679 2680 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; 2681 hw->blk[blk].xlt1.t[ptype] = ptg; 2682 2683 return ICE_SUCCESS; 2684 } 2685 2686 /* Block / table size info */ 2687 struct ice_blk_size_details { 2688 u16 xlt1; /* # XLT1 entries */ 2689 u16 xlt2; /* # XLT2 entries */ 2690 u16 prof_tcam; /* # profile ID TCAM entries */ 2691 u16 prof_id; /* # profile IDs */ 2692 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ 2693 u16 prof_redir; /* # profile redirection entries */ 2694 u16 es; /* # extraction sequence entries */ 2695 u16 fvw; /* # field vector words */ 2696 u8 overwrite; /* overwrite existing entries allowed */ 2697 u8 reverse; /* reverse FV order */ 2698 }; 2699 2700 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { 2701 /** 2702 * Table Definitions 2703 * XLT1 - Number of entries in XLT1 table 2704 * XLT2 - Number of entries in XLT2 table 2705 * TCAM - Number of entries Profile ID TCAM table 2706 * CDID - Control Domain ID of the hardware block 2707 * PRED - Number of entries in the Profile Redirection Table 2708 * FV - Number of entries in the Field Vector 2709 * FVW - Width (in WORDs) of the Field Vector 2710 * OVR - Overwrite existing table entries 2711 * REV - Reverse FV 2712 */ 2713 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ 2714 /* Overwrite , Reverse FV */ 2715 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, 2716 false, false }, 2717 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, 2718 false, false }, 2719 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 2720 false, true }, 2721 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 2722 true, true }, 2723 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, 2724 false, false }, 2725 }; 2726 2727 enum ice_sid_all { 2728 ICE_SID_XLT1_OFF = 0, 2729 ICE_SID_XLT2_OFF, 2730 ICE_SID_PR_OFF, 2731 ICE_SID_PR_REDIR_OFF, 2732 ICE_SID_ES_OFF, 2733 ICE_SID_OFF_COUNT, 2734 }; 2735 2736 /* Characteristic handling */ 2737 2738 /** 2739 * ice_match_prop_lst - determine if properties of two lists match 2740 * @list1: first properties list 2741 * @list2: second properties list 2742 * 2743 * Count, cookies and the order must match in order to be considered equivalent. 2744 */ 2745 static bool 2746 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2) 2747 { 2748 struct ice_vsig_prof *tmp1; 2749 struct ice_vsig_prof *tmp2; 2750 u16 chk_count = 0; 2751 u16 count = 0; 2752 2753 /* compare counts */ 2754 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) 2755 count++; 2756 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) 2757 chk_count++; 2758 if (!count || count != chk_count) 2759 return false; 2760 2761 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list); 2762 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list); 2763 2764 /* profile cookies must compare, and in the exact same order to take 2765 * into account priority 2766 */ 2767 while (count--) { 2768 if (tmp2->profile_cookie != tmp1->profile_cookie) 2769 return false; 2770 2771 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list); 2772 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list); 2773 } 2774 2775 return true; 2776 } 2777 2778 /* VSIG Management */ 2779 2780 /** 2781 * ice_vsig_update_xlt2_sect - update one section of XLT2 table 2782 * @hw: pointer to the hardware structure 2783 * @blk: HW block 2784 * @vsi: HW VSI number to program 2785 * @vsig: VSIG for the VSI 2786 * 2787 * This function will update the XLT2 hardware table with the input VSI 2788 * group configuration. 2789 */ 2790 static enum ice_status 2791 ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi, 2792 u16 vsig) 2793 { 2794 struct ice_xlt2_section *sect; 2795 struct ice_buf_build *bld; 2796 enum ice_status status; 2797 2798 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2), 2799 ice_struct_size(sect, value, 1), 2800 (void **)§); 2801 if (!bld) 2802 return ICE_ERR_NO_MEMORY; 2803 2804 sect->count = CPU_TO_LE16(1); 2805 sect->offset = CPU_TO_LE16(vsi); 2806 sect->value[0] = CPU_TO_LE16(vsig); 2807 2808 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2809 2810 ice_pkg_buf_free(hw, bld); 2811 2812 return status; 2813 } 2814 2815 /** 2816 * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration 2817 * @hw: pointer to the hardware structure 2818 * @blk: HW block 2819 * 2820 * This function will update the XLT2 hardware table with the input VSI 2821 * group configuration of used vsis. 2822 */ 2823 enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) 2824 { 2825 u16 vsi; 2826 2827 for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) { 2828 /* update only vsis that have been changed */ 2829 if (hw->blk[blk].xlt2.vsis[vsi].changed) { 2830 enum ice_status status; 2831 u16 vsig; 2832 2833 vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 2834 status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig); 2835 if (status) 2836 return status; 2837 2838 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 2839 } 2840 } 2841 2842 return ICE_SUCCESS; 2843 } 2844 2845 /** 2846 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI 2847 * @hw: pointer to the hardware structure 2848 * @blk: HW block 2849 * @vsi: VSI of interest 2850 * @vsig: pointer to receive the VSI group 2851 * 2852 * This function will lookup the VSI entry in the XLT2 list and return 2853 * the VSI group its associated with. 2854 */ 2855 enum ice_status 2856 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) 2857 { 2858 if (!vsig || vsi >= ICE_MAX_VSI) 2859 return ICE_ERR_PARAM; 2860 2861 /* As long as there's a default or valid VSIG associated with the input 2862 * VSI, the functions returns a success. Any handling of VSIG will be 2863 * done by the following add, update or remove functions. 2864 */ 2865 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 2866 2867 return ICE_SUCCESS; 2868 } 2869 2870 /** 2871 * ice_vsig_alloc_val - allocate a new VSIG by value 2872 * @hw: pointer to the hardware structure 2873 * @blk: HW block 2874 * @vsig: the VSIG to allocate 2875 * 2876 * This function will allocate a given VSIG specified by the VSIG parameter. 2877 */ 2878 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) 2879 { 2880 u16 idx = vsig & ICE_VSIG_IDX_M; 2881 2882 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { 2883 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 2884 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; 2885 } 2886 2887 return ICE_VSIG_VALUE(idx, hw->pf_id); 2888 } 2889 2890 /** 2891 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG 2892 * @hw: pointer to the hardware structure 2893 * @blk: HW block 2894 * 2895 * This function will iterate through the VSIG list and mark the first 2896 * unused entry for the new VSIG entry as used and return that value. 2897 */ 2898 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) 2899 { 2900 u16 i; 2901 2902 for (i = 1; i < ICE_MAX_VSIGS; i++) 2903 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) 2904 return ice_vsig_alloc_val(hw, blk, i); 2905 2906 return ICE_DEFAULT_VSIG; 2907 } 2908 2909 /** 2910 * ice_find_dup_props_vsig - find VSI group with a specified set of properties 2911 * @hw: pointer to the hardware structure 2912 * @blk: HW block 2913 * @chs: characteristic list 2914 * @vsig: returns the VSIG with the matching profiles, if found 2915 * 2916 * Each VSIG is associated with a characteristic set; i.e. all VSIs under 2917 * a group have the same characteristic set. To check if there exists a VSIG 2918 * which has the same characteristics as the input characteristics; this 2919 * function will iterate through the XLT2 list and return the VSIG that has a 2920 * matching configuration. In order to make sure that priorities are accounted 2921 * for, the list must match exactly, including the order in which the 2922 * characteristics are listed. 2923 */ 2924 static enum ice_status 2925 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, 2926 struct LIST_HEAD_TYPE *chs, u16 *vsig) 2927 { 2928 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; 2929 u16 i; 2930 2931 for (i = 0; i < xlt2->count; i++) 2932 if (xlt2->vsig_tbl[i].in_use && 2933 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { 2934 *vsig = ICE_VSIG_VALUE(i, hw->pf_id); 2935 return ICE_SUCCESS; 2936 } 2937 2938 return ICE_ERR_DOES_NOT_EXIST; 2939 } 2940 2941 /** 2942 * ice_vsig_free - free VSI group 2943 * @hw: pointer to the hardware structure 2944 * @blk: HW block 2945 * @vsig: VSIG to remove 2946 * 2947 * The function will remove all VSIs associated with the input VSIG and move 2948 * them to the DEFAULT_VSIG and mark the VSIG available. 2949 */ 2950 static enum ice_status 2951 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) 2952 { 2953 struct ice_vsig_prof *dtmp, *del; 2954 struct ice_vsig_vsi *vsi_cur; 2955 u16 idx; 2956 2957 idx = vsig & ICE_VSIG_IDX_M; 2958 if (idx >= ICE_MAX_VSIGS) 2959 return ICE_ERR_PARAM; 2960 2961 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 2962 return ICE_ERR_DOES_NOT_EXIST; 2963 2964 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; 2965 2966 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2967 /* If the VSIG has at least 1 VSI then iterate through the 2968 * list and remove the VSIs before deleting the group. 2969 */ 2970 if (vsi_cur) { 2971 /* remove all vsis associated with this VSIG XLT2 entry */ 2972 do { 2973 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 2974 2975 vsi_cur->vsig = ICE_DEFAULT_VSIG; 2976 vsi_cur->changed = 1; 2977 vsi_cur->next_vsi = NULL; 2978 vsi_cur = tmp; 2979 } while (vsi_cur); 2980 2981 /* NULL terminate head of VSI list */ 2982 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; 2983 } 2984 2985 /* free characteristic list */ 2986 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp, 2987 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 2988 ice_vsig_prof, list) { 2989 LIST_DEL(&del->list); 2990 ice_free(hw, del); 2991 } 2992 2993 /* if VSIG characteristic list was cleared for reset 2994 * re-initialize the list head 2995 */ 2996 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 2997 2998 return ICE_SUCCESS; 2999 } 3000 3001 /** 3002 * ice_vsig_remove_vsi - remove VSI from VSIG 3003 * @hw: pointer to the hardware structure 3004 * @blk: HW block 3005 * @vsi: VSI to remove 3006 * @vsig: VSI group to remove from 3007 * 3008 * The function will remove the input VSI from its VSI group and move it 3009 * to the DEFAULT_VSIG. 3010 */ 3011 static enum ice_status 3012 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 3013 { 3014 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; 3015 u16 idx; 3016 3017 idx = vsig & ICE_VSIG_IDX_M; 3018 3019 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 3020 return ICE_ERR_PARAM; 3021 3022 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 3023 return ICE_ERR_DOES_NOT_EXIST; 3024 3025 /* entry already in default VSIG, don't have to remove */ 3026 if (idx == ICE_DEFAULT_VSIG) 3027 return ICE_SUCCESS; 3028 3029 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3030 if (!(*vsi_head)) 3031 return ICE_ERR_CFG; 3032 3033 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; 3034 vsi_cur = (*vsi_head); 3035 3036 /* iterate the VSI list, skip over the entry to be removed */ 3037 while (vsi_cur) { 3038 if (vsi_tgt == vsi_cur) { 3039 (*vsi_head) = vsi_cur->next_vsi; 3040 break; 3041 } 3042 vsi_head = &vsi_cur->next_vsi; 3043 vsi_cur = vsi_cur->next_vsi; 3044 } 3045 3046 /* verify if VSI was removed from group list */ 3047 if (!vsi_cur) 3048 return ICE_ERR_DOES_NOT_EXIST; 3049 3050 vsi_cur->vsig = ICE_DEFAULT_VSIG; 3051 vsi_cur->changed = 1; 3052 vsi_cur->next_vsi = NULL; 3053 3054 return ICE_SUCCESS; 3055 } 3056 3057 /** 3058 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group 3059 * @hw: pointer to the hardware structure 3060 * @blk: HW block 3061 * @vsi: VSI to move 3062 * @vsig: destination VSI group 3063 * 3064 * This function will move or add the input VSI to the target VSIG. 3065 * The function will find the original VSIG the VSI belongs to and 3066 * move the entry to the DEFAULT_VSIG, update the original VSIG and 3067 * then move entry to the new VSIG. 3068 */ 3069 static enum ice_status 3070 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 3071 { 3072 struct ice_vsig_vsi *tmp; 3073 enum ice_status status; 3074 u16 orig_vsig, idx; 3075 3076 idx = vsig & ICE_VSIG_IDX_M; 3077 3078 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 3079 return ICE_ERR_PARAM; 3080 3081 /* if VSIG not in use and VSIG is not default type this VSIG 3082 * doesn't exist. 3083 */ 3084 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && 3085 vsig != ICE_DEFAULT_VSIG) 3086 return ICE_ERR_DOES_NOT_EXIST; 3087 3088 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 3089 if (status) 3090 return status; 3091 3092 /* no update required if vsigs match */ 3093 if (orig_vsig == vsig) 3094 return ICE_SUCCESS; 3095 3096 if (orig_vsig != ICE_DEFAULT_VSIG) { 3097 /* remove entry from orig_vsig and add to default VSIG */ 3098 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); 3099 if (status) 3100 return status; 3101 } 3102 3103 if (idx == ICE_DEFAULT_VSIG) 3104 return ICE_SUCCESS; 3105 3106 /* Create VSI entry and add VSIG and prop_mask values */ 3107 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; 3108 hw->blk[blk].xlt2.vsis[vsi].changed = 1; 3109 3110 /* Add new entry to the head of the VSIG list */ 3111 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3112 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = 3113 &hw->blk[blk].xlt2.vsis[vsi]; 3114 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; 3115 hw->blk[blk].xlt2.t[vsi] = vsig; 3116 3117 return ICE_SUCCESS; 3118 } 3119 3120 /** 3121 * ice_find_prof_id - find profile ID for a given field vector 3122 * @hw: pointer to the hardware structure 3123 * @blk: HW block 3124 * @fv: field vector to search for 3125 * @prof_id: receives the profile ID 3126 */ 3127 static enum ice_status 3128 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, 3129 struct ice_fv_word *fv, u8 *prof_id) 3130 { 3131 struct ice_es *es = &hw->blk[blk].es; 3132 u16 off; 3133 u8 i; 3134 3135 for (i = 0; i < (u8)es->count; i++) { 3136 off = i * es->fvw; 3137 3138 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) 3139 continue; 3140 3141 *prof_id = i; 3142 return ICE_SUCCESS; 3143 } 3144 3145 return ICE_ERR_DOES_NOT_EXIST; 3146 } 3147 3148 /** 3149 * ice_prof_id_rsrc_type - get profile ID resource type for a block type 3150 * @blk: the block type 3151 * @rsrc_type: pointer to variable to receive the resource type 3152 */ 3153 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) 3154 { 3155 switch (blk) { 3156 case ICE_BLK_RSS: 3157 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; 3158 break; 3159 case ICE_BLK_PE: 3160 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID; 3161 break; 3162 default: 3163 return false; 3164 } 3165 return true; 3166 } 3167 3168 /** 3169 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type 3170 * @blk: the block type 3171 * @rsrc_type: pointer to variable to receive the resource type 3172 */ 3173 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) 3174 { 3175 switch (blk) { 3176 case ICE_BLK_RSS: 3177 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; 3178 break; 3179 case ICE_BLK_PE: 3180 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM; 3181 break; 3182 default: 3183 return false; 3184 } 3185 return true; 3186 } 3187 3188 /** 3189 * ice_alloc_tcam_ent - allocate hardware TCAM entry 3190 * @hw: pointer to the HW struct 3191 * @blk: the block to allocate the TCAM for 3192 * @btm: true to allocate from bottom of table, false to allocate from top 3193 * @tcam_idx: pointer to variable to receive the TCAM entry 3194 * 3195 * This function allocates a new entry in a Profile ID TCAM for a specific 3196 * block. 3197 */ 3198 static enum ice_status 3199 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, 3200 u16 *tcam_idx) 3201 { 3202 u16 res_type; 3203 3204 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 3205 return ICE_ERR_PARAM; 3206 3207 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); 3208 } 3209 3210 /** 3211 * ice_free_tcam_ent - free hardware TCAM entry 3212 * @hw: pointer to the HW struct 3213 * @blk: the block from which to free the TCAM entry 3214 * @tcam_idx: the TCAM entry to free 3215 * 3216 * This function frees an entry in a Profile ID TCAM for a specific block. 3217 */ 3218 static enum ice_status 3219 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) 3220 { 3221 u16 res_type; 3222 3223 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 3224 return ICE_ERR_PARAM; 3225 3226 return ice_free_hw_res(hw, res_type, 1, &tcam_idx); 3227 } 3228 3229 /** 3230 * ice_alloc_prof_id - allocate profile ID 3231 * @hw: pointer to the HW struct 3232 * @blk: the block to allocate the profile ID for 3233 * @prof_id: pointer to variable to receive the profile ID 3234 * 3235 * This function allocates a new profile ID, which also corresponds to a Field 3236 * Vector (Extraction Sequence) entry. 3237 */ 3238 static enum ice_status 3239 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) 3240 { 3241 enum ice_status status; 3242 u16 res_type; 3243 u16 get_prof; 3244 3245 if (!ice_prof_id_rsrc_type(blk, &res_type)) 3246 return ICE_ERR_PARAM; 3247 3248 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); 3249 if (!status) 3250 *prof_id = (u8)get_prof; 3251 3252 return status; 3253 } 3254 3255 /** 3256 * ice_free_prof_id - free profile ID 3257 * @hw: pointer to the HW struct 3258 * @blk: the block from which to free the profile ID 3259 * @prof_id: the profile ID to free 3260 * 3261 * This function frees a profile ID, which also corresponds to a Field Vector. 3262 */ 3263 static enum ice_status 3264 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 3265 { 3266 u16 tmp_prof_id = (u16)prof_id; 3267 u16 res_type; 3268 3269 if (!ice_prof_id_rsrc_type(blk, &res_type)) 3270 return ICE_ERR_PARAM; 3271 3272 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); 3273 } 3274 3275 /** 3276 * ice_prof_inc_ref - increment reference count for profile 3277 * @hw: pointer to the HW struct 3278 * @blk: the block from which to free the profile ID 3279 * @prof_id: the profile ID for which to increment the reference count 3280 */ 3281 static enum ice_status 3282 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 3283 { 3284 if (prof_id > hw->blk[blk].es.count) 3285 return ICE_ERR_PARAM; 3286 3287 hw->blk[blk].es.ref_count[prof_id]++; 3288 3289 return ICE_SUCCESS; 3290 } 3291 3292 /** 3293 * ice_write_es - write an extraction sequence to hardware 3294 * @hw: pointer to the HW struct 3295 * @blk: the block in which to write the extraction sequence 3296 * @prof_id: the profile ID to write 3297 * @fv: pointer to the extraction sequence to write - NULL to clear extraction 3298 */ 3299 static void 3300 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, 3301 struct ice_fv_word *fv) 3302 { 3303 u16 off; 3304 3305 off = prof_id * hw->blk[blk].es.fvw; 3306 if (!fv) { 3307 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw * 3308 sizeof(*fv), ICE_NONDMA_MEM); 3309 hw->blk[blk].es.written[prof_id] = false; 3310 } else { 3311 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * 3312 sizeof(*fv), ICE_NONDMA_TO_NONDMA); 3313 } 3314 } 3315 3316 /** 3317 * ice_prof_dec_ref - decrement reference count for profile 3318 * @hw: pointer to the HW struct 3319 * @blk: the block from which to free the profile ID 3320 * @prof_id: the profile ID for which to decrement the reference count 3321 */ 3322 static enum ice_status 3323 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 3324 { 3325 if (prof_id > hw->blk[blk].es.count) 3326 return ICE_ERR_PARAM; 3327 3328 if (hw->blk[blk].es.ref_count[prof_id] > 0) { 3329 if (!--hw->blk[blk].es.ref_count[prof_id]) { 3330 ice_write_es(hw, blk, prof_id, NULL); 3331 return ice_free_prof_id(hw, blk, prof_id); 3332 } 3333 } 3334 3335 return ICE_SUCCESS; 3336 } 3337 3338 /* Block / table section IDs */ 3339 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { 3340 /* SWITCH */ 3341 { ICE_SID_XLT1_SW, 3342 ICE_SID_XLT2_SW, 3343 ICE_SID_PROFID_TCAM_SW, 3344 ICE_SID_PROFID_REDIR_SW, 3345 ICE_SID_FLD_VEC_SW 3346 }, 3347 3348 /* ACL */ 3349 { ICE_SID_XLT1_ACL, 3350 ICE_SID_XLT2_ACL, 3351 ICE_SID_PROFID_TCAM_ACL, 3352 ICE_SID_PROFID_REDIR_ACL, 3353 ICE_SID_FLD_VEC_ACL 3354 }, 3355 3356 /* FD */ 3357 { ICE_SID_XLT1_FD, 3358 ICE_SID_XLT2_FD, 3359 ICE_SID_PROFID_TCAM_FD, 3360 ICE_SID_PROFID_REDIR_FD, 3361 ICE_SID_FLD_VEC_FD 3362 }, 3363 3364 /* RSS */ 3365 { ICE_SID_XLT1_RSS, 3366 ICE_SID_XLT2_RSS, 3367 ICE_SID_PROFID_TCAM_RSS, 3368 ICE_SID_PROFID_REDIR_RSS, 3369 ICE_SID_FLD_VEC_RSS 3370 }, 3371 3372 /* PE */ 3373 { ICE_SID_XLT1_PE, 3374 ICE_SID_XLT2_PE, 3375 ICE_SID_PROFID_TCAM_PE, 3376 ICE_SID_PROFID_REDIR_PE, 3377 ICE_SID_FLD_VEC_PE 3378 } 3379 }; 3380 3381 /** 3382 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables 3383 * @hw: pointer to the hardware structure 3384 * @blk: the HW block to initialize 3385 */ 3386 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) 3387 { 3388 u16 pt; 3389 3390 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { 3391 u8 ptg; 3392 3393 ptg = hw->blk[blk].xlt1.t[pt]; 3394 if (ptg != ICE_DEFAULT_PTG) { 3395 ice_ptg_alloc_val(hw, blk, ptg); 3396 ice_ptg_add_mv_ptype(hw, blk, pt, ptg); 3397 } 3398 } 3399 } 3400 3401 /** 3402 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables 3403 * @hw: pointer to the hardware structure 3404 * @blk: the HW block to initialize 3405 */ 3406 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) 3407 { 3408 u16 vsi; 3409 3410 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { 3411 u16 vsig; 3412 3413 vsig = hw->blk[blk].xlt2.t[vsi]; 3414 if (vsig) { 3415 ice_vsig_alloc_val(hw, blk, vsig); 3416 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 3417 /* no changes at this time, since this has been 3418 * initialized from the original package 3419 */ 3420 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 3421 } 3422 } 3423 } 3424 3425 /** 3426 * ice_init_sw_db - init software database from HW tables 3427 * @hw: pointer to the hardware structure 3428 */ 3429 static void ice_init_sw_db(struct ice_hw *hw) 3430 { 3431 u16 i; 3432 3433 for (i = 0; i < ICE_BLK_COUNT; i++) { 3434 ice_init_sw_xlt1_db(hw, (enum ice_block)i); 3435 ice_init_sw_xlt2_db(hw, (enum ice_block)i); 3436 } 3437 } 3438 3439 /** 3440 * ice_fill_tbl - Reads content of a single table type into database 3441 * @hw: pointer to the hardware structure 3442 * @block_id: Block ID of the table to copy 3443 * @sid: Section ID of the table to copy 3444 * 3445 * Will attempt to read the entire content of a given table of a single block 3446 * into the driver database. We assume that the buffer will always 3447 * be as large or larger than the data contained in the package. If 3448 * this condition is not met, there is most likely an error in the package 3449 * contents. 3450 */ 3451 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) 3452 { 3453 u32 dst_len, sect_len, offset = 0; 3454 struct ice_prof_redir_section *pr; 3455 struct ice_prof_id_section *pid; 3456 struct ice_xlt1_section *xlt1; 3457 struct ice_xlt2_section *xlt2; 3458 struct ice_sw_fv_section *es; 3459 struct ice_pkg_enum state; 3460 u8 *src, *dst; 3461 void *sect; 3462 3463 /* if the HW segment pointer is null then the first iteration of 3464 * ice_pkg_enum_section() will fail. In this case the HW tables will 3465 * not be filled and return success. 3466 */ 3467 if (!hw->seg) { 3468 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); 3469 return; 3470 } 3471 3472 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 3473 3474 sect = ice_pkg_enum_section(hw->seg, &state, sid); 3475 3476 while (sect) { 3477 switch (sid) { 3478 case ICE_SID_XLT1_SW: 3479 case ICE_SID_XLT1_FD: 3480 case ICE_SID_XLT1_RSS: 3481 case ICE_SID_XLT1_ACL: 3482 case ICE_SID_XLT1_PE: 3483 xlt1 = (struct ice_xlt1_section *)sect; 3484 src = xlt1->value; 3485 sect_len = LE16_TO_CPU(xlt1->count) * 3486 sizeof(*hw->blk[block_id].xlt1.t); 3487 dst = hw->blk[block_id].xlt1.t; 3488 dst_len = hw->blk[block_id].xlt1.count * 3489 sizeof(*hw->blk[block_id].xlt1.t); 3490 break; 3491 case ICE_SID_XLT2_SW: 3492 case ICE_SID_XLT2_FD: 3493 case ICE_SID_XLT2_RSS: 3494 case ICE_SID_XLT2_ACL: 3495 case ICE_SID_XLT2_PE: 3496 xlt2 = (struct ice_xlt2_section *)sect; 3497 src = (_FORCE_ u8 *)xlt2->value; 3498 sect_len = LE16_TO_CPU(xlt2->count) * 3499 sizeof(*hw->blk[block_id].xlt2.t); 3500 dst = (u8 *)hw->blk[block_id].xlt2.t; 3501 dst_len = hw->blk[block_id].xlt2.count * 3502 sizeof(*hw->blk[block_id].xlt2.t); 3503 break; 3504 case ICE_SID_PROFID_TCAM_SW: 3505 case ICE_SID_PROFID_TCAM_FD: 3506 case ICE_SID_PROFID_TCAM_RSS: 3507 case ICE_SID_PROFID_TCAM_ACL: 3508 case ICE_SID_PROFID_TCAM_PE: 3509 pid = (struct ice_prof_id_section *)sect; 3510 src = (u8 *)pid->entry; 3511 sect_len = LE16_TO_CPU(pid->count) * 3512 sizeof(*hw->blk[block_id].prof.t); 3513 dst = (u8 *)hw->blk[block_id].prof.t; 3514 dst_len = hw->blk[block_id].prof.count * 3515 sizeof(*hw->blk[block_id].prof.t); 3516 break; 3517 case ICE_SID_PROFID_REDIR_SW: 3518 case ICE_SID_PROFID_REDIR_FD: 3519 case ICE_SID_PROFID_REDIR_RSS: 3520 case ICE_SID_PROFID_REDIR_ACL: 3521 case ICE_SID_PROFID_REDIR_PE: 3522 pr = (struct ice_prof_redir_section *)sect; 3523 src = pr->redir_value; 3524 sect_len = LE16_TO_CPU(pr->count) * 3525 sizeof(*hw->blk[block_id].prof_redir.t); 3526 dst = hw->blk[block_id].prof_redir.t; 3527 dst_len = hw->blk[block_id].prof_redir.count * 3528 sizeof(*hw->blk[block_id].prof_redir.t); 3529 break; 3530 case ICE_SID_FLD_VEC_SW: 3531 case ICE_SID_FLD_VEC_FD: 3532 case ICE_SID_FLD_VEC_RSS: 3533 case ICE_SID_FLD_VEC_ACL: 3534 case ICE_SID_FLD_VEC_PE: 3535 es = (struct ice_sw_fv_section *)sect; 3536 src = (u8 *)es->fv; 3537 sect_len = (u32)(LE16_TO_CPU(es->count) * 3538 hw->blk[block_id].es.fvw) * 3539 sizeof(*hw->blk[block_id].es.t); 3540 dst = (u8 *)hw->blk[block_id].es.t; 3541 dst_len = (u32)(hw->blk[block_id].es.count * 3542 hw->blk[block_id].es.fvw) * 3543 sizeof(*hw->blk[block_id].es.t); 3544 break; 3545 default: 3546 return; 3547 } 3548 3549 /* if the section offset exceeds destination length, terminate 3550 * table fill. 3551 */ 3552 if (offset > dst_len) 3553 return; 3554 3555 /* if the sum of section size and offset exceed destination size 3556 * then we are out of bounds of the HW table size for that PF. 3557 * Changing section length to fill the remaining table space 3558 * of that PF. 3559 */ 3560 if ((offset + sect_len) > dst_len) 3561 sect_len = dst_len - offset; 3562 3563 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); 3564 offset += sect_len; 3565 sect = ice_pkg_enum_section(NULL, &state, sid); 3566 } 3567 } 3568 3569 /** 3570 * ice_fill_blk_tbls - Read package context for tables 3571 * @hw: pointer to the hardware structure 3572 * 3573 * Reads the current package contents and populates the driver 3574 * database with the data iteratively for all advanced feature 3575 * blocks. Assume that the HW tables have been allocated. 3576 */ 3577 void ice_fill_blk_tbls(struct ice_hw *hw) 3578 { 3579 u8 i; 3580 3581 for (i = 0; i < ICE_BLK_COUNT; i++) { 3582 enum ice_block blk_id = (enum ice_block)i; 3583 3584 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); 3585 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); 3586 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); 3587 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); 3588 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); 3589 } 3590 3591 ice_init_sw_db(hw); 3592 } 3593 3594 /** 3595 * ice_free_prof_map - free profile map 3596 * @hw: pointer to the hardware structure 3597 * @blk_idx: HW block index 3598 */ 3599 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) 3600 { 3601 struct ice_es *es = &hw->blk[blk_idx].es; 3602 struct ice_prof_map *del, *tmp; 3603 3604 ice_acquire_lock(&es->prof_map_lock); 3605 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map, 3606 ice_prof_map, list) { 3607 LIST_DEL(&del->list); 3608 ice_free(hw, del); 3609 } 3610 INIT_LIST_HEAD(&es->prof_map); 3611 ice_release_lock(&es->prof_map_lock); 3612 } 3613 3614 /** 3615 * ice_free_flow_profs - free flow profile entries 3616 * @hw: pointer to the hardware structure 3617 * @blk_idx: HW block index 3618 */ 3619 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) 3620 { 3621 struct ice_flow_prof *p, *tmp; 3622 3623 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]); 3624 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx], 3625 ice_flow_prof, l_entry) { 3626 LIST_DEL(&p->l_entry); 3627 3628 ice_free(hw, p); 3629 } 3630 ice_release_lock(&hw->fl_profs_locks[blk_idx]); 3631 3632 /* if driver is in reset and tables are being cleared 3633 * re-initialize the flow profile list heads 3634 */ 3635 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 3636 } 3637 3638 /** 3639 * ice_free_vsig_tbl - free complete VSIG table entries 3640 * @hw: pointer to the hardware structure 3641 * @blk: the HW block on which to free the VSIG table entries 3642 */ 3643 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) 3644 { 3645 u16 i; 3646 3647 if (!hw->blk[blk].xlt2.vsig_tbl) 3648 return; 3649 3650 for (i = 1; i < ICE_MAX_VSIGS; i++) 3651 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) 3652 ice_vsig_free(hw, blk, i); 3653 } 3654 3655 /** 3656 * ice_free_hw_tbls - free hardware table memory 3657 * @hw: pointer to the hardware structure 3658 */ 3659 void ice_free_hw_tbls(struct ice_hw *hw) 3660 { 3661 struct ice_rss_cfg *r, *rt; 3662 u8 i; 3663 3664 for (i = 0; i < ICE_BLK_COUNT; i++) { 3665 if (hw->blk[i].is_list_init) { 3666 struct ice_es *es = &hw->blk[i].es; 3667 3668 ice_free_prof_map(hw, i); 3669 ice_destroy_lock(&es->prof_map_lock); 3670 3671 ice_free_flow_profs(hw, i); 3672 ice_destroy_lock(&hw->fl_profs_locks[i]); 3673 3674 hw->blk[i].is_list_init = false; 3675 } 3676 ice_free_vsig_tbl(hw, (enum ice_block)i); 3677 ice_free(hw, hw->blk[i].xlt1.ptypes); 3678 ice_free(hw, hw->blk[i].xlt1.ptg_tbl); 3679 ice_free(hw, hw->blk[i].xlt1.t); 3680 ice_free(hw, hw->blk[i].xlt2.t); 3681 ice_free(hw, hw->blk[i].xlt2.vsig_tbl); 3682 ice_free(hw, hw->blk[i].xlt2.vsis); 3683 ice_free(hw, hw->blk[i].prof.t); 3684 ice_free(hw, hw->blk[i].prof_redir.t); 3685 ice_free(hw, hw->blk[i].es.t); 3686 ice_free(hw, hw->blk[i].es.ref_count); 3687 ice_free(hw, hw->blk[i].es.written); 3688 } 3689 3690 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head, 3691 ice_rss_cfg, l_entry) { 3692 LIST_DEL(&r->l_entry); 3693 ice_free(hw, r); 3694 } 3695 ice_destroy_lock(&hw->rss_locks); 3696 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM); 3697 } 3698 3699 /** 3700 * ice_init_flow_profs - init flow profile locks and list heads 3701 * @hw: pointer to the hardware structure 3702 * @blk_idx: HW block index 3703 */ 3704 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) 3705 { 3706 ice_init_lock(&hw->fl_profs_locks[blk_idx]); 3707 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 3708 } 3709 3710 /** 3711 * ice_clear_hw_tbls - clear HW tables and flow profiles 3712 * @hw: pointer to the hardware structure 3713 */ 3714 void ice_clear_hw_tbls(struct ice_hw *hw) 3715 { 3716 u8 i; 3717 3718 for (i = 0; i < ICE_BLK_COUNT; i++) { 3719 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 3720 struct ice_prof_tcam *prof = &hw->blk[i].prof; 3721 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 3722 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 3723 struct ice_es *es = &hw->blk[i].es; 3724 3725 if (hw->blk[i].is_list_init) { 3726 ice_free_prof_map(hw, i); 3727 ice_free_flow_profs(hw, i); 3728 } 3729 3730 ice_free_vsig_tbl(hw, (enum ice_block)i); 3731 3732 ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes), 3733 ICE_NONDMA_MEM); 3734 ice_memset(xlt1->ptg_tbl, 0, 3735 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), 3736 ICE_NONDMA_MEM); 3737 ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), 3738 ICE_NONDMA_MEM); 3739 3740 ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis), 3741 ICE_NONDMA_MEM); 3742 ice_memset(xlt2->vsig_tbl, 0, 3743 xlt2->count * sizeof(*xlt2->vsig_tbl), 3744 ICE_NONDMA_MEM); 3745 ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), 3746 ICE_NONDMA_MEM); 3747 3748 ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), 3749 ICE_NONDMA_MEM); 3750 ice_memset(prof_redir->t, 0, 3751 prof_redir->count * sizeof(*prof_redir->t), 3752 ICE_NONDMA_MEM); 3753 3754 ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw, 3755 ICE_NONDMA_MEM); 3756 ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count), 3757 ICE_NONDMA_MEM); 3758 ice_memset(es->written, 0, es->count * sizeof(*es->written), 3759 ICE_NONDMA_MEM); 3760 } 3761 } 3762 3763 /** 3764 * ice_init_hw_tbls - init hardware table memory 3765 * @hw: pointer to the hardware structure 3766 */ 3767 enum ice_status ice_init_hw_tbls(struct ice_hw *hw) 3768 { 3769 u8 i; 3770 3771 ice_init_lock(&hw->rss_locks); 3772 INIT_LIST_HEAD(&hw->rss_list_head); 3773 for (i = 0; i < ICE_BLK_COUNT; i++) { 3774 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 3775 struct ice_prof_tcam *prof = &hw->blk[i].prof; 3776 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 3777 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 3778 struct ice_es *es = &hw->blk[i].es; 3779 u16 j; 3780 3781 if (hw->blk[i].is_list_init) 3782 continue; 3783 3784 ice_init_flow_profs(hw, i); 3785 ice_init_lock(&es->prof_map_lock); 3786 INIT_LIST_HEAD(&es->prof_map); 3787 hw->blk[i].is_list_init = true; 3788 3789 hw->blk[i].overwrite = blk_sizes[i].overwrite; 3790 es->reverse = blk_sizes[i].reverse; 3791 3792 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; 3793 xlt1->count = blk_sizes[i].xlt1; 3794 3795 xlt1->ptypes = (struct ice_ptg_ptype *) 3796 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); 3797 3798 if (!xlt1->ptypes) 3799 goto err; 3800 3801 xlt1->ptg_tbl = (struct ice_ptg_entry *) 3802 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); 3803 3804 if (!xlt1->ptg_tbl) 3805 goto err; 3806 3807 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); 3808 if (!xlt1->t) 3809 goto err; 3810 3811 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; 3812 xlt2->count = blk_sizes[i].xlt2; 3813 3814 xlt2->vsis = (struct ice_vsig_vsi *) 3815 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); 3816 3817 if (!xlt2->vsis) 3818 goto err; 3819 3820 xlt2->vsig_tbl = (struct ice_vsig_entry *) 3821 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); 3822 if (!xlt2->vsig_tbl) 3823 goto err; 3824 3825 for (j = 0; j < xlt2->count; j++) 3826 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); 3827 3828 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); 3829 if (!xlt2->t) 3830 goto err; 3831 3832 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; 3833 prof->count = blk_sizes[i].prof_tcam; 3834 prof->max_prof_id = blk_sizes[i].prof_id; 3835 prof->cdid_bits = blk_sizes[i].prof_cdid_bits; 3836 prof->t = (struct ice_prof_tcam_entry *) 3837 ice_calloc(hw, prof->count, sizeof(*prof->t)); 3838 3839 if (!prof->t) 3840 goto err; 3841 3842 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; 3843 prof_redir->count = blk_sizes[i].prof_redir; 3844 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, 3845 sizeof(*prof_redir->t)); 3846 3847 if (!prof_redir->t) 3848 goto err; 3849 3850 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; 3851 es->count = blk_sizes[i].es; 3852 es->fvw = blk_sizes[i].fvw; 3853 es->t = (struct ice_fv_word *) 3854 ice_calloc(hw, (u32)(es->count * es->fvw), 3855 sizeof(*es->t)); 3856 if (!es->t) 3857 goto err; 3858 3859 es->ref_count = (u16 *) 3860 ice_calloc(hw, es->count, sizeof(*es->ref_count)); 3861 3862 if (!es->ref_count) 3863 goto err; 3864 3865 es->written = (u8 *) 3866 ice_calloc(hw, es->count, sizeof(*es->written)); 3867 3868 if (!es->written) 3869 goto err; 3870 3871 } 3872 return ICE_SUCCESS; 3873 3874 err: 3875 ice_free_hw_tbls(hw); 3876 return ICE_ERR_NO_MEMORY; 3877 } 3878 3879 /** 3880 * ice_prof_gen_key - generate profile ID key 3881 * @hw: pointer to the HW struct 3882 * @blk: the block in which to write profile ID to 3883 * @ptg: packet type group (PTG) portion of key 3884 * @vsig: VSIG portion of key 3885 * @cdid: CDID portion of key 3886 * @flags: flag portion of key 3887 * @vl_msk: valid mask 3888 * @dc_msk: don't care mask 3889 * @nm_msk: never match mask 3890 * @key: output of profile ID key 3891 */ 3892 static enum ice_status 3893 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, 3894 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 3895 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], 3896 u8 key[ICE_TCAM_KEY_SZ]) 3897 { 3898 struct ice_prof_id_key inkey; 3899 3900 inkey.xlt1 = ptg; 3901 inkey.xlt2_cdid = CPU_TO_LE16(vsig); 3902 inkey.flags = CPU_TO_LE16(flags); 3903 3904 switch (hw->blk[blk].prof.cdid_bits) { 3905 case 0: 3906 break; 3907 case 2: 3908 #define ICE_CD_2_M 0xC000U 3909 #define ICE_CD_2_S 14 3910 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M); 3911 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S); 3912 break; 3913 case 4: 3914 #define ICE_CD_4_M 0xF000U 3915 #define ICE_CD_4_S 12 3916 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M); 3917 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S); 3918 break; 3919 case 8: 3920 #define ICE_CD_8_M 0xFF00U 3921 #define ICE_CD_8_S 16 3922 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M); 3923 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S); 3924 break; 3925 default: 3926 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); 3927 break; 3928 } 3929 3930 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, 3931 nm_msk, 0, ICE_TCAM_KEY_SZ / 2); 3932 } 3933 3934 /** 3935 * ice_tcam_write_entry - write TCAM entry 3936 * @hw: pointer to the HW struct 3937 * @blk: the block in which to write profile ID to 3938 * @idx: the entry index to write to 3939 * @prof_id: profile ID 3940 * @ptg: packet type group (PTG) portion of key 3941 * @vsig: VSIG portion of key 3942 * @cdid: CDID portion of key 3943 * @flags: flag portion of key 3944 * @vl_msk: valid mask 3945 * @dc_msk: don't care mask 3946 * @nm_msk: never match mask 3947 */ 3948 static enum ice_status 3949 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, 3950 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, 3951 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 3952 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], 3953 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) 3954 { 3955 struct ice_prof_tcam_entry; 3956 enum ice_status status; 3957 3958 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, 3959 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); 3960 if (!status) { 3961 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx); 3962 hw->blk[blk].prof.t[idx].prof_id = prof_id; 3963 } 3964 3965 return status; 3966 } 3967 3968 /** 3969 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG 3970 * @hw: pointer to the hardware structure 3971 * @blk: HW block 3972 * @vsig: VSIG to query 3973 * @refs: pointer to variable to receive the reference count 3974 */ 3975 static enum ice_status 3976 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) 3977 { 3978 u16 idx = vsig & ICE_VSIG_IDX_M; 3979 struct ice_vsig_vsi *ptr; 3980 3981 *refs = 0; 3982 3983 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 3984 return ICE_ERR_DOES_NOT_EXIST; 3985 3986 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3987 while (ptr) { 3988 (*refs)++; 3989 ptr = ptr->next_vsi; 3990 } 3991 3992 return ICE_SUCCESS; 3993 } 3994 3995 /** 3996 * ice_has_prof_vsig - check to see if VSIG has a specific profile 3997 * @hw: pointer to the hardware structure 3998 * @blk: HW block 3999 * @vsig: VSIG to check against 4000 * @hdl: profile handle 4001 */ 4002 static bool 4003 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) 4004 { 4005 u16 idx = vsig & ICE_VSIG_IDX_M; 4006 struct ice_vsig_prof *ent; 4007 4008 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4009 ice_vsig_prof, list) 4010 if (ent->profile_cookie == hdl) 4011 return true; 4012 4013 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", 4014 vsig); 4015 return false; 4016 } 4017 4018 /** 4019 * ice_prof_bld_es - build profile ID extraction sequence changes 4020 * @hw: pointer to the HW struct 4021 * @blk: hardware block 4022 * @bld: the update package buffer build to add to 4023 * @chgs: the list of changes to make in hardware 4024 */ 4025 static enum ice_status 4026 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, 4027 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) 4028 { 4029 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); 4030 struct ice_chs_chg *tmp; 4031 4032 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 4033 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { 4034 u16 off = tmp->prof_id * hw->blk[blk].es.fvw; 4035 struct ice_pkg_es *p; 4036 u32 id; 4037 4038 id = ice_sect_id(blk, ICE_VEC_TBL); 4039 p = (struct ice_pkg_es *) 4040 ice_pkg_buf_alloc_section(bld, id, 4041 ice_struct_size(p, es, 4042 1) + 4043 vec_size - 4044 sizeof(p->es[0])); 4045 4046 if (!p) 4047 return ICE_ERR_MAX_LIMIT; 4048 4049 p->count = CPU_TO_LE16(1); 4050 p->offset = CPU_TO_LE16(tmp->prof_id); 4051 4052 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size, 4053 ICE_NONDMA_TO_NONDMA); 4054 } 4055 4056 return ICE_SUCCESS; 4057 } 4058 4059 /** 4060 * ice_prof_bld_tcam - build profile ID TCAM changes 4061 * @hw: pointer to the HW struct 4062 * @blk: hardware block 4063 * @bld: the update package buffer build to add to 4064 * @chgs: the list of changes to make in hardware 4065 */ 4066 static enum ice_status 4067 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, 4068 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) 4069 { 4070 struct ice_chs_chg *tmp; 4071 4072 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 4073 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { 4074 struct ice_prof_id_section *p; 4075 u32 id; 4076 4077 id = ice_sect_id(blk, ICE_PROF_TCAM); 4078 p = (struct ice_prof_id_section *) 4079 ice_pkg_buf_alloc_section(bld, id, 4080 ice_struct_size(p, 4081 entry, 4082 1)); 4083 4084 if (!p) 4085 return ICE_ERR_MAX_LIMIT; 4086 4087 p->count = CPU_TO_LE16(1); 4088 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx); 4089 p->entry[0].prof_id = tmp->prof_id; 4090 4091 ice_memcpy(p->entry[0].key, 4092 &hw->blk[blk].prof.t[tmp->tcam_idx].key, 4093 sizeof(hw->blk[blk].prof.t->key), 4094 ICE_NONDMA_TO_NONDMA); 4095 } 4096 4097 return ICE_SUCCESS; 4098 } 4099 4100 /** 4101 * ice_prof_bld_xlt1 - build XLT1 changes 4102 * @blk: hardware block 4103 * @bld: the update package buffer build to add to 4104 * @chgs: the list of changes to make in hardware 4105 */ 4106 static enum ice_status 4107 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, 4108 struct LIST_HEAD_TYPE *chgs) 4109 { 4110 struct ice_chs_chg *tmp; 4111 4112 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 4113 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { 4114 struct ice_xlt1_section *p; 4115 u32 id; 4116 4117 id = ice_sect_id(blk, ICE_XLT1); 4118 p = (struct ice_xlt1_section *) 4119 ice_pkg_buf_alloc_section(bld, id, 4120 ice_struct_size(p, 4121 value, 4122 1)); 4123 4124 if (!p) 4125 return ICE_ERR_MAX_LIMIT; 4126 4127 p->count = CPU_TO_LE16(1); 4128 p->offset = CPU_TO_LE16(tmp->ptype); 4129 p->value[0] = tmp->ptg; 4130 } 4131 4132 return ICE_SUCCESS; 4133 } 4134 4135 /** 4136 * ice_prof_bld_xlt2 - build XLT2 changes 4137 * @blk: hardware block 4138 * @bld: the update package buffer build to add to 4139 * @chgs: the list of changes to make in hardware 4140 */ 4141 static enum ice_status 4142 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, 4143 struct LIST_HEAD_TYPE *chgs) 4144 { 4145 struct ice_chs_chg *tmp; 4146 4147 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { 4148 struct ice_xlt2_section *p; 4149 u32 id; 4150 4151 switch (tmp->type) { 4152 case ICE_VSIG_ADD: 4153 case ICE_VSI_MOVE: 4154 case ICE_VSIG_REM: 4155 id = ice_sect_id(blk, ICE_XLT2); 4156 p = (struct ice_xlt2_section *) 4157 ice_pkg_buf_alloc_section(bld, id, 4158 ice_struct_size(p, 4159 value, 4160 1)); 4161 4162 if (!p) 4163 return ICE_ERR_MAX_LIMIT; 4164 4165 p->count = CPU_TO_LE16(1); 4166 p->offset = CPU_TO_LE16(tmp->vsi); 4167 p->value[0] = CPU_TO_LE16(tmp->vsig); 4168 break; 4169 default: 4170 break; 4171 } 4172 } 4173 4174 return ICE_SUCCESS; 4175 } 4176 4177 /** 4178 * ice_upd_prof_hw - update hardware using the change list 4179 * @hw: pointer to the HW struct 4180 * @blk: hardware block 4181 * @chgs: the list of changes to make in hardware 4182 */ 4183 static enum ice_status 4184 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, 4185 struct LIST_HEAD_TYPE *chgs) 4186 { 4187 struct ice_buf_build *b; 4188 struct ice_chs_chg *tmp; 4189 enum ice_status status; 4190 u16 pkg_sects; 4191 u16 xlt1 = 0; 4192 u16 xlt2 = 0; 4193 u16 tcam = 0; 4194 u16 es = 0; 4195 u16 sects; 4196 4197 /* count number of sections we need */ 4198 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { 4199 switch (tmp->type) { 4200 case ICE_PTG_ES_ADD: 4201 if (tmp->add_ptg) 4202 xlt1++; 4203 if (tmp->add_prof) 4204 es++; 4205 break; 4206 case ICE_TCAM_ADD: 4207 tcam++; 4208 break; 4209 case ICE_VSIG_ADD: 4210 case ICE_VSI_MOVE: 4211 case ICE_VSIG_REM: 4212 xlt2++; 4213 break; 4214 default: 4215 break; 4216 } 4217 } 4218 sects = xlt1 + xlt2 + tcam + es; 4219 4220 if (!sects) 4221 return ICE_SUCCESS; 4222 4223 /* Build update package buffer */ 4224 b = ice_pkg_buf_alloc(hw); 4225 if (!b) 4226 return ICE_ERR_NO_MEMORY; 4227 4228 status = ice_pkg_buf_reserve_section(b, sects); 4229 if (status) 4230 goto error_tmp; 4231 4232 /* Preserve order of table update: ES, TCAM, PTG, VSIG */ 4233 if (es) { 4234 status = ice_prof_bld_es(hw, blk, b, chgs); 4235 if (status) 4236 goto error_tmp; 4237 } 4238 4239 if (tcam) { 4240 status = ice_prof_bld_tcam(hw, blk, b, chgs); 4241 if (status) 4242 goto error_tmp; 4243 } 4244 4245 if (xlt1) { 4246 status = ice_prof_bld_xlt1(blk, b, chgs); 4247 if (status) 4248 goto error_tmp; 4249 } 4250 4251 if (xlt2) { 4252 status = ice_prof_bld_xlt2(blk, b, chgs); 4253 if (status) 4254 goto error_tmp; 4255 } 4256 4257 /* After package buffer build check if the section count in buffer is 4258 * non-zero and matches the number of sections detected for package 4259 * update. 4260 */ 4261 pkg_sects = ice_pkg_buf_get_active_sections(b); 4262 if (!pkg_sects || pkg_sects != sects) { 4263 status = ICE_ERR_INVAL_SIZE; 4264 goto error_tmp; 4265 } 4266 4267 /* update package */ 4268 status = ice_update_pkg(hw, ice_pkg_buf(b), 1); 4269 if (status == ICE_ERR_AQ_ERROR) 4270 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); 4271 4272 error_tmp: 4273 ice_pkg_buf_free(hw, b); 4274 return status; 4275 } 4276 4277 /** 4278 * ice_add_prof - add profile 4279 * @hw: pointer to the HW struct 4280 * @blk: hardware block 4281 * @id: profile tracking ID 4282 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) 4283 * @es: extraction sequence (length of array is determined by the block) 4284 * 4285 * This function registers a profile, which matches a set of PTGs with a 4286 * particular extraction sequence. While the hardware profile is allocated 4287 * it will not be written until the first call to ice_add_flow that specifies 4288 * the ID value used here. 4289 */ 4290 enum ice_status 4291 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], 4292 struct ice_fv_word *es) 4293 { 4294 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); 4295 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); 4296 struct ice_prof_map *prof; 4297 enum ice_status status; 4298 u8 byte = 0; 4299 u8 prof_id; 4300 4301 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); 4302 4303 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4304 4305 /* search for existing profile */ 4306 status = ice_find_prof_id(hw, blk, es, &prof_id); 4307 if (status) { 4308 /* allocate profile ID */ 4309 status = ice_alloc_prof_id(hw, blk, &prof_id); 4310 if (status) 4311 goto err_ice_add_prof; 4312 4313 /* and write new es */ 4314 ice_write_es(hw, blk, prof_id, es); 4315 } 4316 4317 ice_prof_inc_ref(hw, blk, prof_id); 4318 4319 /* add profile info */ 4320 4321 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof)); 4322 if (!prof) 4323 goto err_ice_add_prof; 4324 4325 prof->profile_cookie = id; 4326 prof->prof_id = prof_id; 4327 prof->ptg_cnt = 0; 4328 prof->context = 0; 4329 4330 /* build list of ptgs */ 4331 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { 4332 u8 bit; 4333 4334 if (!ptypes[byte]) { 4335 bytes--; 4336 byte++; 4337 continue; 4338 } 4339 4340 /* Examine 8 bits per byte */ 4341 ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte], 4342 BITS_PER_BYTE) { 4343 u16 ptype; 4344 u8 ptg; 4345 4346 ptype = byte * BITS_PER_BYTE + bit; 4347 4348 /* The package should place all ptypes in a non-zero 4349 * PTG, so the following call should never fail. 4350 */ 4351 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) 4352 continue; 4353 4354 /* If PTG is already added, skip and continue */ 4355 if (ice_is_bit_set(ptgs_used, ptg)) 4356 continue; 4357 4358 ice_set_bit(ptg, ptgs_used); 4359 prof->ptg[prof->ptg_cnt] = ptg; 4360 4361 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) 4362 break; 4363 } 4364 4365 bytes--; 4366 byte++; 4367 } 4368 4369 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map); 4370 status = ICE_SUCCESS; 4371 4372 err_ice_add_prof: 4373 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4374 return status; 4375 } 4376 4377 /** 4378 * ice_search_prof_id - Search for a profile tracking ID 4379 * @hw: pointer to the HW struct 4380 * @blk: hardware block 4381 * @id: profile tracking ID 4382 * 4383 * This will search for a profile tracking ID which was previously added. 4384 * The profile map lock should be held before calling this function. 4385 */ 4386 struct ice_prof_map * 4387 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) 4388 { 4389 struct ice_prof_map *entry = NULL; 4390 struct ice_prof_map *map; 4391 4392 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list) 4393 if (map->profile_cookie == id) { 4394 entry = map; 4395 break; 4396 } 4397 4398 return entry; 4399 } 4400 4401 /** 4402 * ice_set_prof_context - Set context for a given profile 4403 * @hw: pointer to the HW struct 4404 * @blk: hardware block 4405 * @id: profile tracking ID 4406 * @cntxt: context 4407 */ 4408 enum ice_status 4409 ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt) 4410 { 4411 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 4412 struct ice_prof_map *entry; 4413 4414 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4415 entry = ice_search_prof_id(hw, blk, id); 4416 if (entry) { 4417 entry->context = cntxt; 4418 status = ICE_SUCCESS; 4419 } 4420 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4421 return status; 4422 } 4423 4424 /** 4425 * ice_get_prof_context - Get context for a given profile 4426 * @hw: pointer to the HW struct 4427 * @blk: hardware block 4428 * @id: profile tracking ID 4429 * @cntxt: pointer to variable to receive the context 4430 */ 4431 enum ice_status 4432 ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt) 4433 { 4434 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 4435 struct ice_prof_map *entry; 4436 4437 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4438 entry = ice_search_prof_id(hw, blk, id); 4439 if (entry) { 4440 *cntxt = entry->context; 4441 status = ICE_SUCCESS; 4442 } 4443 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4444 return status; 4445 } 4446 4447 /** 4448 * ice_vsig_prof_id_count - count profiles in a VSIG 4449 * @hw: pointer to the HW struct 4450 * @blk: hardware block 4451 * @vsig: VSIG to remove the profile from 4452 */ 4453 static u16 4454 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) 4455 { 4456 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; 4457 struct ice_vsig_prof *p; 4458 4459 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4460 ice_vsig_prof, list) 4461 count++; 4462 4463 return count; 4464 } 4465 4466 /** 4467 * ice_rel_tcam_idx - release a TCAM index 4468 * @hw: pointer to the HW struct 4469 * @blk: hardware block 4470 * @idx: the index to release 4471 */ 4472 static enum ice_status 4473 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) 4474 { 4475 /* Masks to invoke a never match entry */ 4476 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4477 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; 4478 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; 4479 enum ice_status status; 4480 4481 /* write the TCAM entry */ 4482 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, 4483 dc_msk, nm_msk); 4484 if (status) 4485 return status; 4486 4487 /* release the TCAM entry */ 4488 status = ice_free_tcam_ent(hw, blk, idx); 4489 4490 return status; 4491 } 4492 4493 /** 4494 * ice_rem_prof_id - remove one profile from a VSIG 4495 * @hw: pointer to the HW struct 4496 * @blk: hardware block 4497 * @prof: pointer to profile structure to remove 4498 */ 4499 static enum ice_status 4500 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, 4501 struct ice_vsig_prof *prof) 4502 { 4503 enum ice_status status; 4504 u16 i; 4505 4506 for (i = 0; i < prof->tcam_count; i++) 4507 if (prof->tcam[i].in_use) { 4508 prof->tcam[i].in_use = false; 4509 status = ice_rel_tcam_idx(hw, blk, 4510 prof->tcam[i].tcam_idx); 4511 if (status) 4512 return ICE_ERR_HW_TABLE; 4513 } 4514 4515 return ICE_SUCCESS; 4516 } 4517 4518 /** 4519 * ice_rem_vsig - remove VSIG 4520 * @hw: pointer to the HW struct 4521 * @blk: hardware block 4522 * @vsig: the VSIG to remove 4523 * @chg: the change list 4524 */ 4525 static enum ice_status 4526 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4527 struct LIST_HEAD_TYPE *chg) 4528 { 4529 u16 idx = vsig & ICE_VSIG_IDX_M; 4530 struct ice_vsig_vsi *vsi_cur; 4531 struct ice_vsig_prof *d, *t; 4532 enum ice_status status; 4533 4534 /* remove TCAM entries */ 4535 LIST_FOR_EACH_ENTRY_SAFE(d, t, 4536 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4537 ice_vsig_prof, list) { 4538 status = ice_rem_prof_id(hw, blk, d); 4539 if (status) 4540 return status; 4541 4542 LIST_DEL(&d->list); 4543 ice_free(hw, d); 4544 } 4545 4546 /* Move all VSIS associated with this VSIG to the default VSIG */ 4547 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 4548 /* If the VSIG has at least 1 VSI then iterate through the list 4549 * and remove the VSIs before deleting the group. 4550 */ 4551 if (vsi_cur) 4552 do { 4553 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 4554 struct ice_chs_chg *p; 4555 4556 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4557 if (!p) 4558 return ICE_ERR_NO_MEMORY; 4559 4560 p->type = ICE_VSIG_REM; 4561 p->orig_vsig = vsig; 4562 p->vsig = ICE_DEFAULT_VSIG; 4563 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; 4564 4565 LIST_ADD(&p->list_entry, chg); 4566 4567 vsi_cur = tmp; 4568 } while (vsi_cur); 4569 4570 return ice_vsig_free(hw, blk, vsig); 4571 } 4572 4573 /** 4574 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG 4575 * @hw: pointer to the HW struct 4576 * @blk: hardware block 4577 * @vsig: VSIG to remove the profile from 4578 * @hdl: profile handle indicating which profile to remove 4579 * @chg: list to receive a record of changes 4580 */ 4581 static enum ice_status 4582 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 4583 struct LIST_HEAD_TYPE *chg) 4584 { 4585 u16 idx = vsig & ICE_VSIG_IDX_M; 4586 struct ice_vsig_prof *p, *t; 4587 enum ice_status status; 4588 4589 LIST_FOR_EACH_ENTRY_SAFE(p, t, 4590 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4591 ice_vsig_prof, list) 4592 if (p->profile_cookie == hdl) { 4593 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) 4594 /* this is the last profile, remove the VSIG */ 4595 return ice_rem_vsig(hw, blk, vsig, chg); 4596 4597 status = ice_rem_prof_id(hw, blk, p); 4598 if (!status) { 4599 LIST_DEL(&p->list); 4600 ice_free(hw, p); 4601 } 4602 return status; 4603 } 4604 4605 return ICE_ERR_DOES_NOT_EXIST; 4606 } 4607 4608 /** 4609 * ice_rem_flow_all - remove all flows with a particular profile 4610 * @hw: pointer to the HW struct 4611 * @blk: hardware block 4612 * @id: profile tracking ID 4613 */ 4614 static enum ice_status 4615 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) 4616 { 4617 struct ice_chs_chg *del, *tmp; 4618 enum ice_status status; 4619 struct LIST_HEAD_TYPE chg; 4620 u16 i; 4621 4622 INIT_LIST_HEAD(&chg); 4623 4624 for (i = 1; i < ICE_MAX_VSIGS; i++) 4625 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { 4626 if (ice_has_prof_vsig(hw, blk, i, id)) { 4627 status = ice_rem_prof_id_vsig(hw, blk, i, id, 4628 &chg); 4629 if (status) 4630 goto err_ice_rem_flow_all; 4631 } 4632 } 4633 4634 status = ice_upd_prof_hw(hw, blk, &chg); 4635 4636 err_ice_rem_flow_all: 4637 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 4638 LIST_DEL(&del->list_entry); 4639 ice_free(hw, del); 4640 } 4641 4642 return status; 4643 } 4644 4645 /** 4646 * ice_rem_prof - remove profile 4647 * @hw: pointer to the HW struct 4648 * @blk: hardware block 4649 * @id: profile tracking ID 4650 * 4651 * This will remove the profile specified by the ID parameter, which was 4652 * previously created through ice_add_prof. If any existing entries 4653 * are associated with this profile, they will be removed as well. 4654 */ 4655 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) 4656 { 4657 struct ice_prof_map *pmap; 4658 enum ice_status status; 4659 4660 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4661 4662 pmap = ice_search_prof_id(hw, blk, id); 4663 if (!pmap) { 4664 status = ICE_ERR_DOES_NOT_EXIST; 4665 goto err_ice_rem_prof; 4666 } 4667 4668 /* remove all flows with this profile */ 4669 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); 4670 if (status) 4671 goto err_ice_rem_prof; 4672 4673 /* dereference profile, and possibly remove */ 4674 ice_prof_dec_ref(hw, blk, pmap->prof_id); 4675 4676 LIST_DEL(&pmap->list); 4677 ice_free(hw, pmap); 4678 4679 err_ice_rem_prof: 4680 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4681 return status; 4682 } 4683 4684 /** 4685 * ice_get_prof - get profile 4686 * @hw: pointer to the HW struct 4687 * @blk: hardware block 4688 * @hdl: profile handle 4689 * @chg: change list 4690 */ 4691 static enum ice_status 4692 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, 4693 struct LIST_HEAD_TYPE *chg) 4694 { 4695 enum ice_status status = ICE_SUCCESS; 4696 struct ice_prof_map *map; 4697 struct ice_chs_chg *p; 4698 u16 i; 4699 4700 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4701 /* Get the details on the profile specified by the handle ID */ 4702 map = ice_search_prof_id(hw, blk, hdl); 4703 if (!map) { 4704 status = ICE_ERR_DOES_NOT_EXIST; 4705 goto err_ice_get_prof; 4706 } 4707 4708 for (i = 0; i < map->ptg_cnt; i++) 4709 if (!hw->blk[blk].es.written[map->prof_id]) { 4710 /* add ES to change list */ 4711 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4712 if (!p) { 4713 status = ICE_ERR_NO_MEMORY; 4714 goto err_ice_get_prof; 4715 } 4716 4717 p->type = ICE_PTG_ES_ADD; 4718 p->ptype = 0; 4719 p->ptg = map->ptg[i]; 4720 p->add_ptg = 0; 4721 4722 p->add_prof = 1; 4723 p->prof_id = map->prof_id; 4724 4725 hw->blk[blk].es.written[map->prof_id] = true; 4726 4727 LIST_ADD(&p->list_entry, chg); 4728 } 4729 4730 err_ice_get_prof: 4731 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4732 /* let caller clean up the change list */ 4733 return status; 4734 } 4735 4736 /** 4737 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG 4738 * @hw: pointer to the HW struct 4739 * @blk: hardware block 4740 * @vsig: VSIG from which to copy the list 4741 * @lst: output list 4742 * 4743 * This routine makes a copy of the list of profiles in the specified VSIG. 4744 */ 4745 static enum ice_status 4746 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4747 struct LIST_HEAD_TYPE *lst) 4748 { 4749 struct ice_vsig_prof *ent1, *ent2; 4750 u16 idx = vsig & ICE_VSIG_IDX_M; 4751 4752 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4753 ice_vsig_prof, list) { 4754 struct ice_vsig_prof *p; 4755 4756 /* copy to the input list */ 4757 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p), 4758 ICE_NONDMA_TO_NONDMA); 4759 if (!p) 4760 goto err_ice_get_profs_vsig; 4761 4762 LIST_ADD_TAIL(&p->list, lst); 4763 } 4764 4765 return ICE_SUCCESS; 4766 4767 err_ice_get_profs_vsig: 4768 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) { 4769 LIST_DEL(&ent1->list); 4770 ice_free(hw, ent1); 4771 } 4772 4773 return ICE_ERR_NO_MEMORY; 4774 } 4775 4776 /** 4777 * ice_add_prof_to_lst - add profile entry to a list 4778 * @hw: pointer to the HW struct 4779 * @blk: hardware block 4780 * @lst: the list to be added to 4781 * @hdl: profile handle of entry to add 4782 */ 4783 static enum ice_status 4784 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, 4785 struct LIST_HEAD_TYPE *lst, u64 hdl) 4786 { 4787 enum ice_status status = ICE_SUCCESS; 4788 struct ice_prof_map *map; 4789 struct ice_vsig_prof *p; 4790 u16 i; 4791 4792 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4793 map = ice_search_prof_id(hw, blk, hdl); 4794 if (!map) { 4795 status = ICE_ERR_DOES_NOT_EXIST; 4796 goto err_ice_add_prof_to_lst; 4797 } 4798 4799 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p)); 4800 if (!p) { 4801 status = ICE_ERR_NO_MEMORY; 4802 goto err_ice_add_prof_to_lst; 4803 } 4804 4805 p->profile_cookie = map->profile_cookie; 4806 p->prof_id = map->prof_id; 4807 p->tcam_count = map->ptg_cnt; 4808 4809 for (i = 0; i < map->ptg_cnt; i++) { 4810 p->tcam[i].prof_id = map->prof_id; 4811 p->tcam[i].tcam_idx = ICE_INVALID_TCAM; 4812 p->tcam[i].ptg = map->ptg[i]; 4813 } 4814 4815 LIST_ADD(&p->list, lst); 4816 4817 err_ice_add_prof_to_lst: 4818 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4819 return status; 4820 } 4821 4822 /** 4823 * ice_move_vsi - move VSI to another VSIG 4824 * @hw: pointer to the HW struct 4825 * @blk: hardware block 4826 * @vsi: the VSI to move 4827 * @vsig: the VSIG to move the VSI to 4828 * @chg: the change list 4829 */ 4830 static enum ice_status 4831 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, 4832 struct LIST_HEAD_TYPE *chg) 4833 { 4834 enum ice_status status; 4835 struct ice_chs_chg *p; 4836 u16 orig_vsig; 4837 4838 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4839 if (!p) 4840 return ICE_ERR_NO_MEMORY; 4841 4842 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 4843 if (!status) 4844 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 4845 4846 if (status) { 4847 ice_free(hw, p); 4848 return status; 4849 } 4850 4851 p->type = ICE_VSI_MOVE; 4852 p->vsi = vsi; 4853 p->orig_vsig = orig_vsig; 4854 p->vsig = vsig; 4855 4856 LIST_ADD(&p->list_entry, chg); 4857 4858 return ICE_SUCCESS; 4859 } 4860 4861 /** 4862 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list 4863 * @hw: pointer to the HW struct 4864 * @idx: the index of the TCAM entry to remove 4865 * @chg: the list of change structures to search 4866 */ 4867 static void 4868 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg) 4869 { 4870 struct ice_chs_chg *pos, *tmp; 4871 4872 LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) 4873 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { 4874 LIST_DEL(&tmp->list_entry); 4875 ice_free(hw, tmp); 4876 } 4877 } 4878 4879 /** 4880 * ice_prof_tcam_ena_dis - add enable or disable TCAM change 4881 * @hw: pointer to the HW struct 4882 * @blk: hardware block 4883 * @enable: true to enable, false to disable 4884 * @vsig: the VSIG of the TCAM entry 4885 * @tcam: pointer the TCAM info structure of the TCAM to disable 4886 * @chg: the change list 4887 * 4888 * This function appends an enable or disable TCAM entry in the change log 4889 */ 4890 static enum ice_status 4891 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, 4892 u16 vsig, struct ice_tcam_inf *tcam, 4893 struct LIST_HEAD_TYPE *chg) 4894 { 4895 enum ice_status status; 4896 struct ice_chs_chg *p; 4897 4898 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4899 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 4900 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 4901 4902 /* if disabling, free the TCAM */ 4903 if (!enable) { 4904 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); 4905 4906 /* if we have already created a change for this TCAM entry, then 4907 * we need to remove that entry, in order to prevent writing to 4908 * a TCAM entry we no longer will have ownership of. 4909 */ 4910 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); 4911 tcam->tcam_idx = 0; 4912 tcam->in_use = 0; 4913 return status; 4914 } 4915 4916 /* for re-enabling, reallocate a TCAM */ 4917 status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx); 4918 if (status) 4919 return status; 4920 4921 /* add TCAM to change list */ 4922 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4923 if (!p) 4924 return ICE_ERR_NO_MEMORY; 4925 4926 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, 4927 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, 4928 nm_msk); 4929 if (status) 4930 goto err_ice_prof_tcam_ena_dis; 4931 4932 tcam->in_use = 1; 4933 4934 p->type = ICE_TCAM_ADD; 4935 p->add_tcam_idx = true; 4936 p->prof_id = tcam->prof_id; 4937 p->ptg = tcam->ptg; 4938 p->vsig = 0; 4939 p->tcam_idx = tcam->tcam_idx; 4940 4941 /* log change */ 4942 LIST_ADD(&p->list_entry, chg); 4943 4944 return ICE_SUCCESS; 4945 4946 err_ice_prof_tcam_ena_dis: 4947 ice_free(hw, p); 4948 return status; 4949 } 4950 4951 /** 4952 * ice_adj_prof_priorities - adjust profile based on priorities 4953 * @hw: pointer to the HW struct 4954 * @blk: hardware block 4955 * @vsig: the VSIG for which to adjust profile priorities 4956 * @chg: the change list 4957 */ 4958 static enum ice_status 4959 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4960 struct LIST_HEAD_TYPE *chg) 4961 { 4962 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); 4963 enum ice_status status = ICE_SUCCESS; 4964 struct ice_vsig_prof *t; 4965 u16 idx; 4966 4967 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); 4968 idx = vsig & ICE_VSIG_IDX_M; 4969 4970 /* Priority is based on the order in which the profiles are added. The 4971 * newest added profile has highest priority and the oldest added 4972 * profile has the lowest priority. Since the profile property list for 4973 * a VSIG is sorted from newest to oldest, this code traverses the list 4974 * in order and enables the first of each PTG that it finds (that is not 4975 * already enabled); it also disables any duplicate PTGs that it finds 4976 * in the older profiles (that are currently enabled). 4977 */ 4978 4979 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4980 ice_vsig_prof, list) { 4981 u16 i; 4982 4983 for (i = 0; i < t->tcam_count; i++) { 4984 bool used; 4985 4986 /* Scan the priorities from newest to oldest. 4987 * Make sure that the newest profiles take priority. 4988 */ 4989 used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg); 4990 4991 if (used && t->tcam[i].in_use) { 4992 /* need to mark this PTG as never match, as it 4993 * was already in use and therefore duplicate 4994 * (and lower priority) 4995 */ 4996 status = ice_prof_tcam_ena_dis(hw, blk, false, 4997 vsig, 4998 &t->tcam[i], 4999 chg); 5000 if (status) 5001 return status; 5002 } else if (!used && !t->tcam[i].in_use) { 5003 /* need to enable this PTG, as it in not in use 5004 * and not enabled (highest priority) 5005 */ 5006 status = ice_prof_tcam_ena_dis(hw, blk, true, 5007 vsig, 5008 &t->tcam[i], 5009 chg); 5010 if (status) 5011 return status; 5012 } 5013 5014 /* keep track of used ptgs */ 5015 ice_set_bit(t->tcam[i].ptg, ptgs_used); 5016 } 5017 } 5018 5019 return status; 5020 } 5021 5022 /** 5023 * ice_add_prof_id_vsig - add profile to VSIG 5024 * @hw: pointer to the HW struct 5025 * @blk: hardware block 5026 * @vsig: the VSIG to which this profile is to be added 5027 * @hdl: the profile handle indicating the profile to add 5028 * @rev: true to add entries to the end of the list 5029 * @chg: the change list 5030 */ 5031 static enum ice_status 5032 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 5033 bool rev, struct LIST_HEAD_TYPE *chg) 5034 { 5035 /* Masks that ignore flags */ 5036 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 5037 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 5038 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 5039 enum ice_status status = ICE_SUCCESS; 5040 struct ice_prof_map *map; 5041 struct ice_vsig_prof *t; 5042 struct ice_chs_chg *p; 5043 u16 vsig_idx, i; 5044 5045 /* Error, if this VSIG already has this profile */ 5046 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) 5047 return ICE_ERR_ALREADY_EXISTS; 5048 5049 /* new VSIG profile structure */ 5050 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); 5051 if (!t) 5052 return ICE_ERR_NO_MEMORY; 5053 5054 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 5055 /* Get the details on the profile specified by the handle ID */ 5056 map = ice_search_prof_id(hw, blk, hdl); 5057 if (!map) { 5058 status = ICE_ERR_DOES_NOT_EXIST; 5059 goto err_ice_add_prof_id_vsig; 5060 } 5061 5062 t->profile_cookie = map->profile_cookie; 5063 t->prof_id = map->prof_id; 5064 t->tcam_count = map->ptg_cnt; 5065 5066 /* create TCAM entries */ 5067 for (i = 0; i < map->ptg_cnt; i++) { 5068 u16 tcam_idx; 5069 5070 /* add TCAM to change list */ 5071 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 5072 if (!p) { 5073 status = ICE_ERR_NO_MEMORY; 5074 goto err_ice_add_prof_id_vsig; 5075 } 5076 5077 /* allocate the TCAM entry index */ 5078 status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx); 5079 if (status) { 5080 ice_free(hw, p); 5081 goto err_ice_add_prof_id_vsig; 5082 } 5083 5084 t->tcam[i].ptg = map->ptg[i]; 5085 t->tcam[i].prof_id = map->prof_id; 5086 t->tcam[i].tcam_idx = tcam_idx; 5087 t->tcam[i].in_use = true; 5088 5089 p->type = ICE_TCAM_ADD; 5090 p->add_tcam_idx = true; 5091 p->prof_id = t->tcam[i].prof_id; 5092 p->ptg = t->tcam[i].ptg; 5093 p->vsig = vsig; 5094 p->tcam_idx = t->tcam[i].tcam_idx; 5095 5096 /* write the TCAM entry */ 5097 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, 5098 t->tcam[i].prof_id, 5099 t->tcam[i].ptg, vsig, 0, 0, 5100 vl_msk, dc_msk, nm_msk); 5101 if (status) { 5102 ice_free(hw, p); 5103 goto err_ice_add_prof_id_vsig; 5104 } 5105 5106 /* log change */ 5107 LIST_ADD(&p->list_entry, chg); 5108 } 5109 5110 /* add profile to VSIG */ 5111 vsig_idx = vsig & ICE_VSIG_IDX_M; 5112 if (rev) 5113 LIST_ADD_TAIL(&t->list, 5114 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 5115 else 5116 LIST_ADD(&t->list, 5117 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 5118 5119 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 5120 return status; 5121 5122 err_ice_add_prof_id_vsig: 5123 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 5124 /* let caller clean up the change list */ 5125 ice_free(hw, t); 5126 return status; 5127 } 5128 5129 /** 5130 * ice_create_prof_id_vsig - add a new VSIG with a single profile 5131 * @hw: pointer to the HW struct 5132 * @blk: hardware block 5133 * @vsi: the initial VSI that will be in VSIG 5134 * @hdl: the profile handle of the profile that will be added to the VSIG 5135 * @chg: the change list 5136 */ 5137 static enum ice_status 5138 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, 5139 struct LIST_HEAD_TYPE *chg) 5140 { 5141 enum ice_status status; 5142 struct ice_chs_chg *p; 5143 u16 new_vsig; 5144 5145 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 5146 if (!p) 5147 return ICE_ERR_NO_MEMORY; 5148 5149 new_vsig = ice_vsig_alloc(hw, blk); 5150 if (!new_vsig) { 5151 status = ICE_ERR_HW_TABLE; 5152 goto err_ice_create_prof_id_vsig; 5153 } 5154 5155 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); 5156 if (status) 5157 goto err_ice_create_prof_id_vsig; 5158 5159 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); 5160 if (status) 5161 goto err_ice_create_prof_id_vsig; 5162 5163 p->type = ICE_VSIG_ADD; 5164 p->vsi = vsi; 5165 p->orig_vsig = ICE_DEFAULT_VSIG; 5166 p->vsig = new_vsig; 5167 5168 LIST_ADD(&p->list_entry, chg); 5169 5170 return ICE_SUCCESS; 5171 5172 err_ice_create_prof_id_vsig: 5173 /* let caller clean up the change list */ 5174 ice_free(hw, p); 5175 return status; 5176 } 5177 5178 /** 5179 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles 5180 * @hw: pointer to the HW struct 5181 * @blk: hardware block 5182 * @vsi: the initial VSI that will be in VSIG 5183 * @lst: the list of profile that will be added to the VSIG 5184 * @new_vsig: return of new VSIG 5185 * @chg: the change list 5186 */ 5187 static enum ice_status 5188 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, 5189 struct LIST_HEAD_TYPE *lst, u16 *new_vsig, 5190 struct LIST_HEAD_TYPE *chg) 5191 { 5192 struct ice_vsig_prof *t; 5193 enum ice_status status; 5194 u16 vsig; 5195 5196 vsig = ice_vsig_alloc(hw, blk); 5197 if (!vsig) 5198 return ICE_ERR_HW_TABLE; 5199 5200 status = ice_move_vsi(hw, blk, vsi, vsig, chg); 5201 if (status) 5202 return status; 5203 5204 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) { 5205 /* Reverse the order here since we are copying the list */ 5206 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, 5207 true, chg); 5208 if (status) 5209 return status; 5210 } 5211 5212 *new_vsig = vsig; 5213 5214 return ICE_SUCCESS; 5215 } 5216 5217 /** 5218 * ice_find_prof_vsig - find a VSIG with a specific profile handle 5219 * @hw: pointer to the HW struct 5220 * @blk: hardware block 5221 * @hdl: the profile handle of the profile to search for 5222 * @vsig: returns the VSIG with the matching profile 5223 */ 5224 static bool 5225 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) 5226 { 5227 struct ice_vsig_prof *t; 5228 enum ice_status status; 5229 struct LIST_HEAD_TYPE lst; 5230 5231 INIT_LIST_HEAD(&lst); 5232 5233 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); 5234 if (!t) 5235 return false; 5236 5237 t->profile_cookie = hdl; 5238 LIST_ADD(&t->list, &lst); 5239 5240 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); 5241 5242 LIST_DEL(&t->list); 5243 ice_free(hw, t); 5244 5245 return status == ICE_SUCCESS; 5246 } 5247 5248 /** 5249 * ice_add_vsi_flow - add VSI flow 5250 * @hw: pointer to the HW struct 5251 * @blk: hardware block 5252 * @vsi: input VSI 5253 * @vsig: target VSIG to include the input VSI 5254 * 5255 * Calling this function will add the VSI to a given VSIG and 5256 * update the HW tables accordingly. This call can be used to 5257 * add multiple VSIs to a VSIG if we know beforehand that those 5258 * VSIs have the same characteristics of the VSIG. This will 5259 * save time in generating a new VSIG and TCAMs till a match is 5260 * found and subsequent rollback when a matching VSIG is found. 5261 */ 5262 enum ice_status 5263 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 5264 { 5265 struct ice_chs_chg *tmp, *del; 5266 struct LIST_HEAD_TYPE chg; 5267 enum ice_status status; 5268 5269 /* if target VSIG is default the move is invalid */ 5270 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG) 5271 return ICE_ERR_PARAM; 5272 5273 INIT_LIST_HEAD(&chg); 5274 5275 /* move VSI to the VSIG that matches */ 5276 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5277 /* update hardware if success */ 5278 if (!status) 5279 status = ice_upd_prof_hw(hw, blk, &chg); 5280 5281 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 5282 LIST_DEL(&del->list_entry); 5283 ice_free(hw, del); 5284 } 5285 5286 return status; 5287 } 5288 5289 /** 5290 * ice_add_prof_id_flow - add profile flow 5291 * @hw: pointer to the HW struct 5292 * @blk: hardware block 5293 * @vsi: the VSI to enable with the profile specified by ID 5294 * @hdl: profile handle 5295 * 5296 * Calling this function will update the hardware tables to enable the 5297 * profile indicated by the ID parameter for the VSIs specified in the VSI 5298 * array. Once successfully called, the flow will be enabled. 5299 */ 5300 enum ice_status 5301 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 5302 { 5303 struct ice_vsig_prof *tmp1, *del1; 5304 struct ice_chs_chg *tmp, *del; 5305 struct LIST_HEAD_TYPE union_lst; 5306 enum ice_status status; 5307 struct LIST_HEAD_TYPE chg; 5308 u16 vsig; 5309 5310 INIT_LIST_HEAD(&union_lst); 5311 INIT_LIST_HEAD(&chg); 5312 5313 /* Get profile */ 5314 status = ice_get_prof(hw, blk, hdl, &chg); 5315 if (status) 5316 return status; 5317 5318 /* determine if VSI is already part of a VSIG */ 5319 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 5320 if (!status && vsig) { 5321 bool only_vsi; 5322 u16 or_vsig; 5323 u16 ref; 5324 5325 /* found in VSIG */ 5326 or_vsig = vsig; 5327 5328 /* make sure that there is no overlap/conflict between the new 5329 * characteristics and the existing ones; we don't support that 5330 * scenario 5331 */ 5332 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { 5333 status = ICE_ERR_ALREADY_EXISTS; 5334 goto err_ice_add_prof_id_flow; 5335 } 5336 5337 /* last VSI in the VSIG? */ 5338 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 5339 if (status) 5340 goto err_ice_add_prof_id_flow; 5341 only_vsi = (ref == 1); 5342 5343 /* create a union of the current profiles and the one being 5344 * added 5345 */ 5346 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); 5347 if (status) 5348 goto err_ice_add_prof_id_flow; 5349 5350 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); 5351 if (status) 5352 goto err_ice_add_prof_id_flow; 5353 5354 /* search for an existing VSIG with an exact charc match */ 5355 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); 5356 if (!status) { 5357 /* move VSI to the VSIG that matches */ 5358 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5359 if (status) 5360 goto err_ice_add_prof_id_flow; 5361 5362 /* VSI has been moved out of or_vsig. If the or_vsig had 5363 * only that VSI it is now empty and can be removed. 5364 */ 5365 if (only_vsi) { 5366 status = ice_rem_vsig(hw, blk, or_vsig, &chg); 5367 if (status) 5368 goto err_ice_add_prof_id_flow; 5369 } 5370 } else if (only_vsi) { 5371 /* If the original VSIG only contains one VSI, then it 5372 * will be the requesting VSI. In this case the VSI is 5373 * not sharing entries and we can simply add the new 5374 * profile to the VSIG. 5375 */ 5376 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, 5377 &chg); 5378 if (status) 5379 goto err_ice_add_prof_id_flow; 5380 5381 /* Adjust priorities */ 5382 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 5383 if (status) 5384 goto err_ice_add_prof_id_flow; 5385 } else { 5386 /* No match, so we need a new VSIG */ 5387 status = ice_create_vsig_from_lst(hw, blk, vsi, 5388 &union_lst, &vsig, 5389 &chg); 5390 if (status) 5391 goto err_ice_add_prof_id_flow; 5392 5393 /* Adjust priorities */ 5394 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 5395 if (status) 5396 goto err_ice_add_prof_id_flow; 5397 } 5398 } else { 5399 /* need to find or add a VSIG */ 5400 /* search for an existing VSIG with an exact charc match */ 5401 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { 5402 /* found an exact match */ 5403 /* add or move VSI to the VSIG that matches */ 5404 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5405 if (status) 5406 goto err_ice_add_prof_id_flow; 5407 } else { 5408 /* we did not find an exact match */ 5409 /* we need to add a VSIG */ 5410 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, 5411 &chg); 5412 if (status) 5413 goto err_ice_add_prof_id_flow; 5414 } 5415 } 5416 5417 /* update hardware */ 5418 if (!status) 5419 status = ice_upd_prof_hw(hw, blk, &chg); 5420 5421 err_ice_add_prof_id_flow: 5422 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 5423 LIST_DEL(&del->list_entry); 5424 ice_free(hw, del); 5425 } 5426 5427 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) { 5428 LIST_DEL(&del1->list); 5429 ice_free(hw, del1); 5430 } 5431 5432 return status; 5433 } 5434 5435 /** 5436 * ice_add_flow - add flow 5437 * @hw: pointer to the HW struct 5438 * @blk: hardware block 5439 * @vsi: array of VSIs to enable with the profile specified by ID 5440 * @count: number of elements in the VSI array 5441 * @id: profile tracking ID 5442 * 5443 * Calling this function will update the hardware tables to enable the 5444 * profile indicated by the ID parameter for the VSIs specified in the VSI 5445 * array. Once successfully called, the flow will be enabled. 5446 */ 5447 enum ice_status 5448 ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 5449 u64 id) 5450 { 5451 enum ice_status status; 5452 u16 i; 5453 5454 for (i = 0; i < count; i++) { 5455 status = ice_add_prof_id_flow(hw, blk, vsi[i], id); 5456 if (status) 5457 return status; 5458 } 5459 5460 return ICE_SUCCESS; 5461 } 5462 5463 /** 5464 * ice_rem_prof_from_list - remove a profile from list 5465 * @hw: pointer to the HW struct 5466 * @lst: list to remove the profile from 5467 * @hdl: the profile handle indicating the profile to remove 5468 */ 5469 static enum ice_status 5470 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl) 5471 { 5472 struct ice_vsig_prof *ent, *tmp; 5473 5474 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) 5475 if (ent->profile_cookie == hdl) { 5476 LIST_DEL(&ent->list); 5477 ice_free(hw, ent); 5478 return ICE_SUCCESS; 5479 } 5480 5481 return ICE_ERR_DOES_NOT_EXIST; 5482 } 5483 5484 /** 5485 * ice_rem_prof_id_flow - remove flow 5486 * @hw: pointer to the HW struct 5487 * @blk: hardware block 5488 * @vsi: the VSI from which to remove the profile specified by ID 5489 * @hdl: profile tracking handle 5490 * 5491 * Calling this function will update the hardware tables to remove the 5492 * profile indicated by the ID parameter for the VSIs specified in the VSI 5493 * array. Once successfully called, the flow will be disabled. 5494 */ 5495 enum ice_status 5496 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 5497 { 5498 struct ice_vsig_prof *tmp1, *del1; 5499 struct ice_chs_chg *tmp, *del; 5500 struct LIST_HEAD_TYPE chg, copy; 5501 enum ice_status status; 5502 u16 vsig; 5503 5504 INIT_LIST_HEAD(©); 5505 INIT_LIST_HEAD(&chg); 5506 5507 /* determine if VSI is already part of a VSIG */ 5508 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 5509 if (!status && vsig) { 5510 bool last_profile; 5511 bool only_vsi; 5512 u16 ref; 5513 5514 /* found in VSIG */ 5515 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; 5516 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 5517 if (status) 5518 goto err_ice_rem_prof_id_flow; 5519 only_vsi = (ref == 1); 5520 5521 if (only_vsi) { 5522 /* If the original VSIG only contains one reference, 5523 * which will be the requesting VSI, then the VSI is not 5524 * sharing entries and we can simply remove the specific 5525 * characteristics from the VSIG. 5526 */ 5527 5528 if (last_profile) { 5529 /* If there are no profiles left for this VSIG, 5530 * then simply remove the VSIG. 5531 */ 5532 status = ice_rem_vsig(hw, blk, vsig, &chg); 5533 if (status) 5534 goto err_ice_rem_prof_id_flow; 5535 } else { 5536 status = ice_rem_prof_id_vsig(hw, blk, vsig, 5537 hdl, &chg); 5538 if (status) 5539 goto err_ice_rem_prof_id_flow; 5540 5541 /* Adjust priorities */ 5542 status = ice_adj_prof_priorities(hw, blk, vsig, 5543 &chg); 5544 if (status) 5545 goto err_ice_rem_prof_id_flow; 5546 } 5547 5548 } else { 5549 /* Make a copy of the VSIG's list of Profiles */ 5550 status = ice_get_profs_vsig(hw, blk, vsig, ©); 5551 if (status) 5552 goto err_ice_rem_prof_id_flow; 5553 5554 /* Remove specified profile entry from the list */ 5555 status = ice_rem_prof_from_list(hw, ©, hdl); 5556 if (status) 5557 goto err_ice_rem_prof_id_flow; 5558 5559 if (LIST_EMPTY(©)) { 5560 status = ice_move_vsi(hw, blk, vsi, 5561 ICE_DEFAULT_VSIG, &chg); 5562 if (status) 5563 goto err_ice_rem_prof_id_flow; 5564 5565 } else if (!ice_find_dup_props_vsig(hw, blk, ©, 5566 &vsig)) { 5567 /* found an exact match */ 5568 /* add or move VSI to the VSIG that matches */ 5569 /* Search for a VSIG with a matching profile 5570 * list 5571 */ 5572 5573 /* Found match, move VSI to the matching VSIG */ 5574 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5575 if (status) 5576 goto err_ice_rem_prof_id_flow; 5577 } else { 5578 /* since no existing VSIG supports this 5579 * characteristic pattern, we need to create a 5580 * new VSIG and TCAM entries 5581 */ 5582 status = ice_create_vsig_from_lst(hw, blk, vsi, 5583 ©, &vsig, 5584 &chg); 5585 if (status) 5586 goto err_ice_rem_prof_id_flow; 5587 5588 /* Adjust priorities */ 5589 status = ice_adj_prof_priorities(hw, blk, vsig, 5590 &chg); 5591 if (status) 5592 goto err_ice_rem_prof_id_flow; 5593 } 5594 } 5595 } else { 5596 status = ICE_ERR_DOES_NOT_EXIST; 5597 } 5598 5599 /* update hardware tables */ 5600 if (!status) 5601 status = ice_upd_prof_hw(hw, blk, &chg); 5602 5603 err_ice_rem_prof_id_flow: 5604 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 5605 LIST_DEL(&del->list_entry); 5606 ice_free(hw, del); 5607 } 5608 5609 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) { 5610 LIST_DEL(&del1->list); 5611 ice_free(hw, del1); 5612 } 5613 5614 return status; 5615 } 5616 5617 /** 5618 * ice_rem_flow - remove flow 5619 * @hw: pointer to the HW struct 5620 * @blk: hardware block 5621 * @vsi: array of VSIs from which to remove the profile specified by ID 5622 * @count: number of elements in the VSI array 5623 * @id: profile tracking ID 5624 * 5625 * The function will remove flows from the specified VSIs that were enabled 5626 * using ice_add_flow. The ID value will indicated which profile will be 5627 * removed. Once successfully called, the flow will be disabled. 5628 */ 5629 enum ice_status 5630 ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 5631 u64 id) 5632 { 5633 enum ice_status status; 5634 u16 i; 5635 5636 for (i = 0; i < count; i++) { 5637 status = ice_rem_prof_id_flow(hw, blk, vsi[i], id); 5638 if (status) 5639 return status; 5640 } 5641 5642 return ICE_SUCCESS; 5643 } 5644