1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2021, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_common.h" 34 #include "ice_flex_pipe.h" 35 #include "ice_protocol_type.h" 36 #include "ice_flow.h" 37 38 /* To support tunneling entries by PF, the package will append the PF number to 39 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. 40 */ 41 static const struct ice_tunnel_type_scan tnls[] = { 42 { TNL_VXLAN, "TNL_VXLAN_PF" }, 43 { TNL_GENEVE, "TNL_GENEVE_PF" }, 44 { TNL_LAST, "" } 45 }; 46 47 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { 48 /* SWITCH */ 49 { 50 ICE_SID_XLT0_SW, 51 ICE_SID_XLT_KEY_BUILDER_SW, 52 ICE_SID_XLT1_SW, 53 ICE_SID_XLT2_SW, 54 ICE_SID_PROFID_TCAM_SW, 55 ICE_SID_PROFID_REDIR_SW, 56 ICE_SID_FLD_VEC_SW, 57 ICE_SID_CDID_KEY_BUILDER_SW, 58 ICE_SID_CDID_REDIR_SW 59 }, 60 61 /* ACL */ 62 { 63 ICE_SID_XLT0_ACL, 64 ICE_SID_XLT_KEY_BUILDER_ACL, 65 ICE_SID_XLT1_ACL, 66 ICE_SID_XLT2_ACL, 67 ICE_SID_PROFID_TCAM_ACL, 68 ICE_SID_PROFID_REDIR_ACL, 69 ICE_SID_FLD_VEC_ACL, 70 ICE_SID_CDID_KEY_BUILDER_ACL, 71 ICE_SID_CDID_REDIR_ACL 72 }, 73 74 /* FD */ 75 { 76 ICE_SID_XLT0_FD, 77 ICE_SID_XLT_KEY_BUILDER_FD, 78 ICE_SID_XLT1_FD, 79 ICE_SID_XLT2_FD, 80 ICE_SID_PROFID_TCAM_FD, 81 ICE_SID_PROFID_REDIR_FD, 82 ICE_SID_FLD_VEC_FD, 83 ICE_SID_CDID_KEY_BUILDER_FD, 84 ICE_SID_CDID_REDIR_FD 85 }, 86 87 /* RSS */ 88 { 89 ICE_SID_XLT0_RSS, 90 ICE_SID_XLT_KEY_BUILDER_RSS, 91 ICE_SID_XLT1_RSS, 92 ICE_SID_XLT2_RSS, 93 ICE_SID_PROFID_TCAM_RSS, 94 ICE_SID_PROFID_REDIR_RSS, 95 ICE_SID_FLD_VEC_RSS, 96 ICE_SID_CDID_KEY_BUILDER_RSS, 97 ICE_SID_CDID_REDIR_RSS 98 }, 99 100 /* PE */ 101 { 102 ICE_SID_XLT0_PE, 103 ICE_SID_XLT_KEY_BUILDER_PE, 104 ICE_SID_XLT1_PE, 105 ICE_SID_XLT2_PE, 106 ICE_SID_PROFID_TCAM_PE, 107 ICE_SID_PROFID_REDIR_PE, 108 ICE_SID_FLD_VEC_PE, 109 ICE_SID_CDID_KEY_BUILDER_PE, 110 ICE_SID_CDID_REDIR_PE 111 } 112 }; 113 114 /** 115 * ice_sect_id - returns section ID 116 * @blk: block type 117 * @sect: section type 118 * 119 * This helper function returns the proper section ID given a block type and a 120 * section type. 121 */ 122 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) 123 { 124 return ice_sect_lkup[blk][sect]; 125 } 126 127 /** 128 * ice_pkg_val_buf 129 * @buf: pointer to the ice buffer 130 * 131 * This helper function validates a buffer's header. 132 */ 133 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 134 { 135 struct ice_buf_hdr *hdr; 136 u16 section_count; 137 u16 data_end; 138 139 hdr = (struct ice_buf_hdr *)buf->buf; 140 /* verify data */ 141 section_count = LE16_TO_CPU(hdr->section_count); 142 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) 143 return NULL; 144 145 data_end = LE16_TO_CPU(hdr->data_end); 146 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) 147 return NULL; 148 149 return hdr; 150 } 151 152 /** 153 * ice_find_buf_table 154 * @ice_seg: pointer to the ice segment 155 * 156 * Returns the address of the buffer table within the ice segment. 157 */ 158 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) 159 { 160 struct ice_nvm_table *nvms; 161 162 nvms = (struct ice_nvm_table *) 163 (ice_seg->device_table + 164 LE32_TO_CPU(ice_seg->device_table_count)); 165 166 return (_FORCE_ struct ice_buf_table *) 167 (nvms->vers + LE32_TO_CPU(nvms->table_count)); 168 } 169 170 /** 171 * ice_pkg_enum_buf 172 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 173 * @state: pointer to the enum state 174 * 175 * This function will enumerate all the buffers in the ice segment. The first 176 * call is made with the ice_seg parameter non-NULL; on subsequent calls, 177 * ice_seg is set to NULL which continues the enumeration. When the function 178 * returns a NULL pointer, then the end of the buffers has been reached, or an 179 * unexpected value has been detected (for example an invalid section count or 180 * an invalid buffer end value). 181 */ 182 static struct ice_buf_hdr * 183 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 184 { 185 if (ice_seg) { 186 state->buf_table = ice_find_buf_table(ice_seg); 187 if (!state->buf_table) 188 return NULL; 189 190 state->buf_idx = 0; 191 return ice_pkg_val_buf(state->buf_table->buf_array); 192 } 193 194 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) 195 return ice_pkg_val_buf(state->buf_table->buf_array + 196 state->buf_idx); 197 else 198 return NULL; 199 } 200 201 /** 202 * ice_pkg_advance_sect 203 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 204 * @state: pointer to the enum state 205 * 206 * This helper function will advance the section within the ice segment, 207 * also advancing the buffer if needed. 208 */ 209 static bool 210 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) 211 { 212 if (!ice_seg && !state->buf) 213 return false; 214 215 if (!ice_seg && state->buf) 216 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) 217 return true; 218 219 state->buf = ice_pkg_enum_buf(ice_seg, state); 220 if (!state->buf) 221 return false; 222 223 /* start of new buffer, reset section index */ 224 state->sect_idx = 0; 225 return true; 226 } 227 228 /** 229 * ice_pkg_enum_section 230 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 231 * @state: pointer to the enum state 232 * @sect_type: section type to enumerate 233 * 234 * This function will enumerate all the sections of a particular type in the 235 * ice segment. The first call is made with the ice_seg parameter non-NULL; 236 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 237 * When the function returns a NULL pointer, then the end of the matching 238 * sections has been reached. 239 */ 240 static void * 241 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 242 u32 sect_type) 243 { 244 u16 offset, size; 245 246 if (ice_seg) 247 state->type = sect_type; 248 249 if (!ice_pkg_advance_sect(ice_seg, state)) 250 return NULL; 251 252 /* scan for next matching section */ 253 while (state->buf->section_entry[state->sect_idx].type != 254 CPU_TO_LE32(state->type)) 255 if (!ice_pkg_advance_sect(NULL, state)) 256 return NULL; 257 258 /* validate section */ 259 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); 260 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) 261 return NULL; 262 263 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); 264 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) 265 return NULL; 266 267 /* make sure the section fits in the buffer */ 268 if (offset + size > ICE_PKG_BUF_SIZE) 269 return NULL; 270 271 state->sect_type = 272 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); 273 274 /* calc pointer to this section */ 275 state->sect = ((u8 *)state->buf) + 276 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); 277 278 return state->sect; 279 } 280 281 /** 282 * ice_pkg_enum_entry 283 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 284 * @state: pointer to the enum state 285 * @sect_type: section type to enumerate 286 * @offset: pointer to variable that receives the offset in the table (optional) 287 * @handler: function that handles access to the entries into the section type 288 * 289 * This function will enumerate all the entries in particular section type in 290 * the ice segment. The first call is made with the ice_seg parameter non-NULL; 291 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 292 * When the function returns a NULL pointer, then the end of the entries has 293 * been reached. 294 * 295 * Since each section may have a different header and entry size, the handler 296 * function is needed to determine the number and location entries in each 297 * section. 298 * 299 * The offset parameter is optional, but should be used for sections that 300 * contain an offset for each section table. For such cases, the section handler 301 * function must return the appropriate offset + index to give the absolution 302 * offset for each entry. For example, if the base for a section's header 303 * indicates a base offset of 10, and the index for the entry is 2, then 304 * section handler function should set the offset to 10 + 2 = 12. 305 */ 306 static void * 307 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 308 u32 sect_type, u32 *offset, 309 void *(*handler)(u32 sect_type, void *section, 310 u32 index, u32 *offset)) 311 { 312 void *entry; 313 314 if (ice_seg) { 315 if (!handler) 316 return NULL; 317 318 if (!ice_pkg_enum_section(ice_seg, state, sect_type)) 319 return NULL; 320 321 state->entry_idx = 0; 322 state->handler = handler; 323 } else { 324 state->entry_idx++; 325 } 326 327 if (!state->handler) 328 return NULL; 329 330 /* get entry */ 331 entry = state->handler(state->sect_type, state->sect, state->entry_idx, 332 offset); 333 if (!entry) { 334 /* end of a section, look for another section of this type */ 335 if (!ice_pkg_enum_section(NULL, state, 0)) 336 return NULL; 337 338 state->entry_idx = 0; 339 entry = state->handler(state->sect_type, state->sect, 340 state->entry_idx, offset); 341 } 342 343 return entry; 344 } 345 346 /** 347 * ice_boost_tcam_handler 348 * @sect_type: section type 349 * @section: pointer to section 350 * @index: index of the boost TCAM entry to be returned 351 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections 352 * 353 * This is a callback function that can be passed to ice_pkg_enum_entry. 354 * Handles enumeration of individual boost TCAM entries. 355 */ 356 static void * 357 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) 358 { 359 struct ice_boost_tcam_section *boost; 360 361 if (!section) 362 return NULL; 363 364 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) 365 return NULL; 366 367 if (index > ICE_MAX_BST_TCAMS_IN_BUF) 368 return NULL; 369 370 if (offset) 371 *offset = 0; 372 373 boost = (struct ice_boost_tcam_section *)section; 374 if (index >= LE16_TO_CPU(boost->count)) 375 return NULL; 376 377 return boost->tcam + index; 378 } 379 380 /** 381 * ice_find_boost_entry 382 * @ice_seg: pointer to the ice segment (non-NULL) 383 * @addr: Boost TCAM address of entry to search for 384 * @entry: returns pointer to the entry 385 * 386 * Finds a particular Boost TCAM entry and returns a pointer to that entry 387 * if it is found. The ice_seg parameter must not be NULL since the first call 388 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. 389 */ 390 static enum ice_status 391 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, 392 struct ice_boost_tcam_entry **entry) 393 { 394 struct ice_boost_tcam_entry *tcam; 395 struct ice_pkg_enum state; 396 397 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 398 399 if (!ice_seg) 400 return ICE_ERR_PARAM; 401 402 do { 403 tcam = (struct ice_boost_tcam_entry *) 404 ice_pkg_enum_entry(ice_seg, &state, 405 ICE_SID_RXPARSER_BOOST_TCAM, NULL, 406 ice_boost_tcam_handler); 407 if (tcam && LE16_TO_CPU(tcam->addr) == addr) { 408 *entry = tcam; 409 return ICE_SUCCESS; 410 } 411 412 ice_seg = NULL; 413 } while (tcam); 414 415 *entry = NULL; 416 return ICE_ERR_CFG; 417 } 418 419 /** 420 * ice_label_enum_handler 421 * @sect_type: section type 422 * @section: pointer to section 423 * @index: index of the label entry to be returned 424 * @offset: pointer to receive absolute offset, always zero for label sections 425 * 426 * This is a callback function that can be passed to ice_pkg_enum_entry. 427 * Handles enumeration of individual label entries. 428 */ 429 static void * 430 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, 431 u32 *offset) 432 { 433 struct ice_label_section *labels; 434 435 if (!section) 436 return NULL; 437 438 if (index > ICE_MAX_LABELS_IN_BUF) 439 return NULL; 440 441 if (offset) 442 *offset = 0; 443 444 labels = (struct ice_label_section *)section; 445 if (index >= LE16_TO_CPU(labels->count)) 446 return NULL; 447 448 return labels->label + index; 449 } 450 451 /** 452 * ice_enum_labels 453 * @ice_seg: pointer to the ice segment (NULL on subsequent calls) 454 * @type: the section type that will contain the label (0 on subsequent calls) 455 * @state: ice_pkg_enum structure that will hold the state of the enumeration 456 * @value: pointer to a value that will return the label's value if found 457 * 458 * Enumerates a list of labels in the package. The caller will call 459 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call 460 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL 461 * the end of the list has been reached. 462 */ 463 static char * 464 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, 465 u16 *value) 466 { 467 struct ice_label *label; 468 469 /* Check for valid label section on first call */ 470 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) 471 return NULL; 472 473 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, 474 NULL, 475 ice_label_enum_handler); 476 if (!label) 477 return NULL; 478 479 *value = LE16_TO_CPU(label->value); 480 return label->name; 481 } 482 483 /** 484 * ice_init_pkg_hints 485 * @hw: pointer to the HW structure 486 * @ice_seg: pointer to the segment of the package scan (non-NULL) 487 * 488 * This function will scan the package and save off relevant information 489 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL 490 * since the first call to ice_enum_labels requires a pointer to an actual 491 * ice_seg structure. 492 */ 493 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) 494 { 495 struct ice_pkg_enum state; 496 char *label_name; 497 u16 val; 498 int i; 499 500 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); 501 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 502 503 if (!ice_seg) 504 return; 505 506 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, 507 &val); 508 509 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { 510 for (i = 0; tnls[i].type != TNL_LAST; i++) { 511 size_t len = strlen(tnls[i].label_prefix); 512 513 /* Look for matching label start, before continuing */ 514 if (strncmp(label_name, tnls[i].label_prefix, len)) 515 continue; 516 517 /* Make sure this label matches our PF. Note that the PF 518 * character ('0' - '7') will be located where our 519 * prefix string's null terminator is located. 520 */ 521 if ((label_name[len] - '0') == hw->pf_id) { 522 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; 523 hw->tnl.tbl[hw->tnl.count].valid = false; 524 hw->tnl.tbl[hw->tnl.count].in_use = false; 525 hw->tnl.tbl[hw->tnl.count].marked = false; 526 hw->tnl.tbl[hw->tnl.count].boost_addr = val; 527 hw->tnl.tbl[hw->tnl.count].port = 0; 528 hw->tnl.count++; 529 break; 530 } 531 } 532 533 label_name = ice_enum_labels(NULL, 0, &state, &val); 534 } 535 536 /* Cache the appropriate boost TCAM entry pointers */ 537 for (i = 0; i < hw->tnl.count; i++) { 538 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, 539 &hw->tnl.tbl[i].boost_entry); 540 if (hw->tnl.tbl[i].boost_entry) 541 hw->tnl.tbl[i].valid = true; 542 } 543 } 544 545 /* Key creation */ 546 547 #define ICE_DC_KEY 0x1 /* don't care */ 548 #define ICE_DC_KEYINV 0x1 549 #define ICE_NM_KEY 0x0 /* never match */ 550 #define ICE_NM_KEYINV 0x0 551 #define ICE_0_KEY 0x1 /* match 0 */ 552 #define ICE_0_KEYINV 0x0 553 #define ICE_1_KEY 0x0 /* match 1 */ 554 #define ICE_1_KEYINV 0x1 555 556 /** 557 * ice_gen_key_word - generate 16-bits of a key/mask word 558 * @val: the value 559 * @valid: valid bits mask (change only the valid bits) 560 * @dont_care: don't care mask 561 * @nvr_mtch: never match mask 562 * @key: pointer to an array of where the resulting key portion 563 * @key_inv: pointer to an array of where the resulting key invert portion 564 * 565 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask 566 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits 567 * of key and 8 bits of key invert. 568 * 569 * '0' = b01, always match a 0 bit 570 * '1' = b10, always match a 1 bit 571 * '?' = b11, don't care bit (always matches) 572 * '~' = b00, never match bit 573 * 574 * Input: 575 * val: b0 1 0 1 0 1 576 * dont_care: b0 0 1 1 0 0 577 * never_mtch: b0 0 0 0 1 1 578 * ------------------------------ 579 * Result: key: b01 10 11 11 00 00 580 */ 581 static enum ice_status 582 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, 583 u8 *key_inv) 584 { 585 u8 in_key = *key, in_key_inv = *key_inv; 586 u8 i; 587 588 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ 589 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) 590 return ICE_ERR_CFG; 591 592 *key = 0; 593 *key_inv = 0; 594 595 /* encode the 8 bits into 8-bit key and 8-bit key invert */ 596 for (i = 0; i < 8; i++) { 597 *key >>= 1; 598 *key_inv >>= 1; 599 600 if (!(valid & 0x1)) { /* change only valid bits */ 601 *key |= (in_key & 0x1) << 7; 602 *key_inv |= (in_key_inv & 0x1) << 7; 603 } else if (dont_care & 0x1) { /* don't care bit */ 604 *key |= ICE_DC_KEY << 7; 605 *key_inv |= ICE_DC_KEYINV << 7; 606 } else if (nvr_mtch & 0x1) { /* never match bit */ 607 *key |= ICE_NM_KEY << 7; 608 *key_inv |= ICE_NM_KEYINV << 7; 609 } else if (val & 0x01) { /* exact 1 match */ 610 *key |= ICE_1_KEY << 7; 611 *key_inv |= ICE_1_KEYINV << 7; 612 } else { /* exact 0 match */ 613 *key |= ICE_0_KEY << 7; 614 *key_inv |= ICE_0_KEYINV << 7; 615 } 616 617 dont_care >>= 1; 618 nvr_mtch >>= 1; 619 valid >>= 1; 620 val >>= 1; 621 in_key >>= 1; 622 in_key_inv >>= 1; 623 } 624 625 return ICE_SUCCESS; 626 } 627 628 /** 629 * ice_bits_max_set - determine if the number of bits set is within a maximum 630 * @mask: pointer to the byte array which is the mask 631 * @size: the number of bytes in the mask 632 * @max: the max number of set bits 633 * 634 * This function determines if there are at most 'max' number of bits set in an 635 * array. Returns true if the number for bits set is <= max or will return false 636 * otherwise. 637 */ 638 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) 639 { 640 u16 count = 0; 641 u16 i; 642 643 /* check each byte */ 644 for (i = 0; i < size; i++) { 645 /* if 0, go to next byte */ 646 if (!mask[i]) 647 continue; 648 649 /* We know there is at least one set bit in this byte because of 650 * the above check; if we already have found 'max' number of 651 * bits set, then we can return failure now. 652 */ 653 if (count == max) 654 return false; 655 656 /* count the bits in this byte, checking threshold */ 657 count += ice_hweight8(mask[i]); 658 if (count > max) 659 return false; 660 } 661 662 return true; 663 } 664 665 /** 666 * ice_set_key - generate a variable sized key with multiples of 16-bits 667 * @key: pointer to where the key will be stored 668 * @size: the size of the complete key in bytes (must be even) 669 * @val: array of 8-bit values that makes up the value portion of the key 670 * @upd: array of 8-bit masks that determine what key portion to update 671 * @dc: array of 8-bit masks that make up the don't care mask 672 * @nm: array of 8-bit masks that make up the never match mask 673 * @off: the offset of the first byte in the key to update 674 * @len: the number of bytes in the key update 675 * 676 * This function generates a key from a value, a don't care mask and a never 677 * match mask. 678 * upd, dc, and nm are optional parameters, and can be NULL: 679 * upd == NULL --> upd mask is all 1's (update all bits) 680 * dc == NULL --> dc mask is all 0's (no don't care bits) 681 * nm == NULL --> nm mask is all 0's (no never match bits) 682 */ 683 static enum ice_status 684 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, 685 u16 len) 686 { 687 u16 half_size; 688 u16 i; 689 690 /* size must be a multiple of 2 bytes. */ 691 if (size % 2) 692 return ICE_ERR_CFG; 693 half_size = size / 2; 694 695 if (off + len > half_size) 696 return ICE_ERR_CFG; 697 698 /* Make sure at most one bit is set in the never match mask. Having more 699 * than one never match mask bit set will cause HW to consume excessive 700 * power otherwise; this is a power management efficiency check. 701 */ 702 #define ICE_NVR_MTCH_BITS_MAX 1 703 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) 704 return ICE_ERR_CFG; 705 706 for (i = 0; i < len; i++) 707 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, 708 dc ? dc[i] : 0, nm ? nm[i] : 0, 709 key + off + i, key + half_size + off + i)) 710 return ICE_ERR_CFG; 711 712 return ICE_SUCCESS; 713 } 714 715 /** 716 * ice_acquire_global_cfg_lock 717 * @hw: pointer to the HW structure 718 * @access: access type (read or write) 719 * 720 * This function will request ownership of the global config lock for reading 721 * or writing of the package. When attempting to obtain write access, the 722 * caller must check for the following two return values: 723 * 724 * ICE_SUCCESS - Means the caller has acquired the global config lock 725 * and can perform writing of the package. 726 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the 727 * package or has found that no update was necessary; in 728 * this case, the caller can just skip performing any 729 * update of the package. 730 */ 731 static enum ice_status 732 ice_acquire_global_cfg_lock(struct ice_hw *hw, 733 enum ice_aq_res_access_type access) 734 { 735 enum ice_status status; 736 737 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 738 739 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, 740 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 741 742 if (status == ICE_ERR_AQ_NO_WORK) 743 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); 744 745 return status; 746 } 747 748 /** 749 * ice_release_global_cfg_lock 750 * @hw: pointer to the HW structure 751 * 752 * This function will release the global config lock. 753 */ 754 static void ice_release_global_cfg_lock(struct ice_hw *hw) 755 { 756 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); 757 } 758 759 /** 760 * ice_acquire_change_lock 761 * @hw: pointer to the HW structure 762 * @access: access type (read or write) 763 * 764 * This function will request ownership of the change lock. 765 */ 766 static enum ice_status 767 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) 768 { 769 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 770 771 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, 772 ICE_CHANGE_LOCK_TIMEOUT); 773 } 774 775 /** 776 * ice_release_change_lock 777 * @hw: pointer to the HW structure 778 * 779 * This function will release the change lock using the proper Admin Command. 780 */ 781 static void ice_release_change_lock(struct ice_hw *hw) 782 { 783 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 784 785 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); 786 } 787 788 /** 789 * ice_aq_download_pkg 790 * @hw: pointer to the hardware structure 791 * @pkg_buf: the package buffer to transfer 792 * @buf_size: the size of the package buffer 793 * @last_buf: last buffer indicator 794 * @error_offset: returns error offset 795 * @error_info: returns error information 796 * @cd: pointer to command details structure or NULL 797 * 798 * Download Package (0x0C40) 799 */ 800 static enum ice_status 801 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 802 u16 buf_size, bool last_buf, u32 *error_offset, 803 u32 *error_info, struct ice_sq_cd *cd) 804 { 805 struct ice_aqc_download_pkg *cmd; 806 struct ice_aq_desc desc; 807 enum ice_status status; 808 809 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 810 811 if (error_offset) 812 *error_offset = 0; 813 if (error_info) 814 *error_info = 0; 815 816 cmd = &desc.params.download_pkg; 817 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 818 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 819 820 if (last_buf) 821 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 822 823 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 824 if (status == ICE_ERR_AQ_ERROR) { 825 /* Read error from buffer only when the FW returned an error */ 826 struct ice_aqc_download_pkg_resp *resp; 827 828 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 829 if (error_offset) 830 *error_offset = LE32_TO_CPU(resp->error_offset); 831 if (error_info) 832 *error_info = LE32_TO_CPU(resp->error_info); 833 } 834 835 return status; 836 } 837 838 /** 839 * ice_aq_upload_section 840 * @hw: pointer to the hardware structure 841 * @pkg_buf: the package buffer which will receive the section 842 * @buf_size: the size of the package buffer 843 * @cd: pointer to command details structure or NULL 844 * 845 * Upload Section (0x0C41) 846 */ 847 enum ice_status 848 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 849 u16 buf_size, struct ice_sq_cd *cd) 850 { 851 struct ice_aq_desc desc; 852 853 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 854 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); 855 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 856 857 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 858 } 859 860 /** 861 * ice_aq_update_pkg 862 * @hw: pointer to the hardware structure 863 * @pkg_buf: the package cmd buffer 864 * @buf_size: the size of the package cmd buffer 865 * @last_buf: last buffer indicator 866 * @error_offset: returns error offset 867 * @error_info: returns error information 868 * @cd: pointer to command details structure or NULL 869 * 870 * Update Package (0x0C42) 871 */ 872 static enum ice_status 873 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, 874 bool last_buf, u32 *error_offset, u32 *error_info, 875 struct ice_sq_cd *cd) 876 { 877 struct ice_aqc_download_pkg *cmd; 878 struct ice_aq_desc desc; 879 enum ice_status status; 880 881 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 882 883 if (error_offset) 884 *error_offset = 0; 885 if (error_info) 886 *error_info = 0; 887 888 cmd = &desc.params.download_pkg; 889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); 890 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 891 892 if (last_buf) 893 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 894 895 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 896 if (status == ICE_ERR_AQ_ERROR) { 897 /* Read error from buffer only when the FW returned an error */ 898 struct ice_aqc_download_pkg_resp *resp; 899 900 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 901 if (error_offset) 902 *error_offset = LE32_TO_CPU(resp->error_offset); 903 if (error_info) 904 *error_info = LE32_TO_CPU(resp->error_info); 905 } 906 907 return status; 908 } 909 910 /** 911 * ice_find_seg_in_pkg 912 * @hw: pointer to the hardware structure 913 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) 914 * @pkg_hdr: pointer to the package header to be searched 915 * 916 * This function searches a package file for a particular segment type. On 917 * success it returns a pointer to the segment header, otherwise it will 918 * return NULL. 919 */ 920 static struct ice_generic_seg_hdr * 921 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 922 struct ice_pkg_hdr *pkg_hdr) 923 { 924 u32 i; 925 926 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 927 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 928 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, 929 pkg_hdr->pkg_format_ver.update, 930 pkg_hdr->pkg_format_ver.draft); 931 932 /* Search all package segments for the requested segment type */ 933 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { 934 struct ice_generic_seg_hdr *seg; 935 936 seg = (struct ice_generic_seg_hdr *) 937 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); 938 939 if (LE32_TO_CPU(seg->seg_type) == seg_type) 940 return seg; 941 } 942 943 return NULL; 944 } 945 946 /** 947 * ice_update_pkg 948 * @hw: pointer to the hardware structure 949 * @bufs: pointer to an array of buffers 950 * @count: the number of buffers in the array 951 * 952 * Obtains change lock and updates package. 953 */ 954 enum ice_status 955 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 956 { 957 enum ice_status status; 958 u32 offset, info, i; 959 960 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 961 if (status) 962 return status; 963 964 for (i = 0; i < count; i++) { 965 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); 966 bool last = ((i + 1) == count); 967 968 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), 969 last, &offset, &info, NULL); 970 971 if (status) { 972 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", 973 status, offset, info); 974 break; 975 } 976 } 977 978 ice_release_change_lock(hw); 979 980 return status; 981 } 982 983 /** 984 * ice_dwnld_cfg_bufs 985 * @hw: pointer to the hardware structure 986 * @bufs: pointer to an array of buffers 987 * @count: the number of buffers in the array 988 * 989 * Obtains global config lock and downloads the package configuration buffers 990 * to the firmware. Metadata buffers are skipped, and the first metadata buffer 991 * found indicates that the rest of the buffers are all metadata buffers. 992 */ 993 static enum ice_status 994 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 995 { 996 enum ice_status status; 997 struct ice_buf_hdr *bh; 998 u32 offset, info, i; 999 1000 if (!bufs || !count) 1001 return ICE_ERR_PARAM; 1002 1003 /* If the first buffer's first section has its metadata bit set 1004 * then there are no buffers to be downloaded, and the operation is 1005 * considered a success. 1006 */ 1007 bh = (struct ice_buf_hdr *)bufs; 1008 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) 1009 return ICE_SUCCESS; 1010 1011 /* reset pkg_dwnld_status in case this function is called in the 1012 * reset/rebuild flow 1013 */ 1014 hw->pkg_dwnld_status = ICE_AQ_RC_OK; 1015 1016 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1017 if (status) { 1018 if (status == ICE_ERR_AQ_NO_WORK) 1019 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; 1020 else 1021 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 1022 return status; 1023 } 1024 1025 for (i = 0; i < count; i++) { 1026 bool last = ((i + 1) == count); 1027 1028 if (!last) { 1029 /* check next buffer for metadata flag */ 1030 bh = (struct ice_buf_hdr *)(bufs + i + 1); 1031 1032 /* A set metadata flag in the next buffer will signal 1033 * that the current buffer will be the last buffer 1034 * downloaded 1035 */ 1036 if (LE16_TO_CPU(bh->section_count)) 1037 if (LE32_TO_CPU(bh->section_entry[0].type) & 1038 ICE_METADATA_BUF) 1039 last = true; 1040 } 1041 1042 bh = (struct ice_buf_hdr *)(bufs + i); 1043 1044 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, 1045 &offset, &info, NULL); 1046 1047 /* Save AQ status from download package */ 1048 hw->pkg_dwnld_status = hw->adminq.sq_last_status; 1049 if (status) { 1050 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", 1051 status, offset, info); 1052 1053 break; 1054 } 1055 1056 if (last) 1057 break; 1058 } 1059 1060 if (!status) { 1061 status = ice_set_vlan_mode(hw); 1062 if (status) 1063 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", 1064 status); 1065 } 1066 1067 ice_release_global_cfg_lock(hw); 1068 1069 return status; 1070 } 1071 1072 /** 1073 * ice_aq_get_pkg_info_list 1074 * @hw: pointer to the hardware structure 1075 * @pkg_info: the buffer which will receive the information list 1076 * @buf_size: the size of the pkg_info information buffer 1077 * @cd: pointer to command details structure or NULL 1078 * 1079 * Get Package Info List (0x0C43) 1080 */ 1081 static enum ice_status 1082 ice_aq_get_pkg_info_list(struct ice_hw *hw, 1083 struct ice_aqc_get_pkg_info_resp *pkg_info, 1084 u16 buf_size, struct ice_sq_cd *cd) 1085 { 1086 struct ice_aq_desc desc; 1087 1088 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1089 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); 1090 1091 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); 1092 } 1093 1094 /** 1095 * ice_download_pkg 1096 * @hw: pointer to the hardware structure 1097 * @ice_seg: pointer to the segment of the package to be downloaded 1098 * 1099 * Handles the download of a complete package. 1100 */ 1101 static enum ice_status 1102 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) 1103 { 1104 struct ice_buf_table *ice_buf_tbl; 1105 1106 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1107 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", 1108 ice_seg->hdr.seg_format_ver.major, 1109 ice_seg->hdr.seg_format_ver.minor, 1110 ice_seg->hdr.seg_format_ver.update, 1111 ice_seg->hdr.seg_format_ver.draft); 1112 1113 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 1114 LE32_TO_CPU(ice_seg->hdr.seg_type), 1115 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); 1116 1117 ice_buf_tbl = ice_find_buf_table(ice_seg); 1118 1119 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", 1120 LE32_TO_CPU(ice_buf_tbl->buf_count)); 1121 1122 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, 1123 LE32_TO_CPU(ice_buf_tbl->buf_count)); 1124 } 1125 1126 /** 1127 * ice_init_pkg_info 1128 * @hw: pointer to the hardware structure 1129 * @pkg_hdr: pointer to the driver's package hdr 1130 * 1131 * Saves off the package details into the HW structure. 1132 */ 1133 static enum ice_status 1134 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1135 { 1136 struct ice_generic_seg_hdr *seg_hdr; 1137 1138 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1139 if (!pkg_hdr) 1140 return ICE_ERR_PARAM; 1141 1142 seg_hdr = (struct ice_generic_seg_hdr *) 1143 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); 1144 if (seg_hdr) { 1145 struct ice_meta_sect *meta; 1146 struct ice_pkg_enum state; 1147 1148 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1149 1150 /* Get package information from the Metadata Section */ 1151 meta = (struct ice_meta_sect *) 1152 ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, 1153 ICE_SID_METADATA); 1154 if (!meta) { 1155 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); 1156 return ICE_ERR_CFG; 1157 } 1158 1159 hw->pkg_ver = meta->ver; 1160 ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), 1161 ICE_NONDMA_TO_NONDMA); 1162 1163 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", 1164 meta->ver.major, meta->ver.minor, meta->ver.update, 1165 meta->ver.draft, meta->name); 1166 1167 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; 1168 ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, 1169 sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); 1170 1171 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", 1172 seg_hdr->seg_format_ver.major, 1173 seg_hdr->seg_format_ver.minor, 1174 seg_hdr->seg_format_ver.update, 1175 seg_hdr->seg_format_ver.draft, 1176 seg_hdr->seg_id); 1177 } else { 1178 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); 1179 return ICE_ERR_CFG; 1180 } 1181 1182 return ICE_SUCCESS; 1183 } 1184 1185 /** 1186 * ice_get_pkg_info 1187 * @hw: pointer to the hardware structure 1188 * 1189 * Store details of the package currently loaded in HW into the HW structure. 1190 */ 1191 static enum ice_status ice_get_pkg_info(struct ice_hw *hw) 1192 { 1193 struct ice_aqc_get_pkg_info_resp *pkg_info; 1194 enum ice_status status; 1195 u16 size; 1196 u32 i; 1197 1198 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1199 1200 size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); 1201 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); 1202 if (!pkg_info) 1203 return ICE_ERR_NO_MEMORY; 1204 1205 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); 1206 if (status) 1207 goto init_pkg_free_alloc; 1208 1209 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { 1210 #define ICE_PKG_FLAG_COUNT 4 1211 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; 1212 u8 place = 0; 1213 1214 if (pkg_info->pkg_info[i].is_active) { 1215 flags[place++] = 'A'; 1216 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 1217 hw->active_track_id = 1218 LE32_TO_CPU(pkg_info->pkg_info[i].track_id); 1219 ice_memcpy(hw->active_pkg_name, 1220 pkg_info->pkg_info[i].name, 1221 sizeof(pkg_info->pkg_info[i].name), 1222 ICE_NONDMA_TO_NONDMA); 1223 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 1224 } 1225 if (pkg_info->pkg_info[i].is_active_at_boot) 1226 flags[place++] = 'B'; 1227 if (pkg_info->pkg_info[i].is_modified) 1228 flags[place++] = 'M'; 1229 if (pkg_info->pkg_info[i].is_in_nvm) 1230 flags[place++] = 'N'; 1231 1232 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", 1233 i, pkg_info->pkg_info[i].ver.major, 1234 pkg_info->pkg_info[i].ver.minor, 1235 pkg_info->pkg_info[i].ver.update, 1236 pkg_info->pkg_info[i].ver.draft, 1237 pkg_info->pkg_info[i].name, flags); 1238 } 1239 1240 init_pkg_free_alloc: 1241 ice_free(hw, pkg_info); 1242 1243 return status; 1244 } 1245 1246 /** 1247 * ice_find_label_value 1248 * @ice_seg: pointer to the ice segment (non-NULL) 1249 * @name: name of the label to search for 1250 * @type: the section type that will contain the label 1251 * @value: pointer to a value that will return the label's value if found 1252 * 1253 * Finds a label's value given the label name and the section type to search. 1254 * The ice_seg parameter must not be NULL since the first call to 1255 * ice_enum_labels requires a pointer to an actual ice_seg structure. 1256 */ 1257 enum ice_status 1258 ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, 1259 u16 *value) 1260 { 1261 struct ice_pkg_enum state; 1262 char *label_name; 1263 u16 val; 1264 1265 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1266 1267 if (!ice_seg) 1268 return ICE_ERR_PARAM; 1269 1270 do { 1271 label_name = ice_enum_labels(ice_seg, type, &state, &val); 1272 if (label_name && !strcmp(label_name, name)) { 1273 *value = val; 1274 return ICE_SUCCESS; 1275 } 1276 1277 ice_seg = NULL; 1278 } while (label_name); 1279 1280 return ICE_ERR_CFG; 1281 } 1282 1283 /** 1284 * ice_verify_pkg - verify package 1285 * @pkg: pointer to the package buffer 1286 * @len: size of the package buffer 1287 * 1288 * Verifies various attributes of the package file, including length, format 1289 * version, and the requirement of at least one segment. 1290 */ 1291 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 1292 { 1293 u32 seg_count; 1294 u32 i; 1295 1296 if (len < ice_struct_size(pkg, seg_offset, 1)) 1297 return ICE_ERR_BUF_TOO_SHORT; 1298 1299 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || 1300 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || 1301 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || 1302 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) 1303 return ICE_ERR_CFG; 1304 1305 /* pkg must have at least one segment */ 1306 seg_count = LE32_TO_CPU(pkg->seg_count); 1307 if (seg_count < 1) 1308 return ICE_ERR_CFG; 1309 1310 /* make sure segment array fits in package length */ 1311 if (len < ice_struct_size(pkg, seg_offset, seg_count)) 1312 return ICE_ERR_BUF_TOO_SHORT; 1313 1314 /* all segments must fit within length */ 1315 for (i = 0; i < seg_count; i++) { 1316 u32 off = LE32_TO_CPU(pkg->seg_offset[i]); 1317 struct ice_generic_seg_hdr *seg; 1318 1319 /* segment header must fit */ 1320 if (len < off + sizeof(*seg)) 1321 return ICE_ERR_BUF_TOO_SHORT; 1322 1323 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); 1324 1325 /* segment body must fit */ 1326 if (len < off + LE32_TO_CPU(seg->seg_size)) 1327 return ICE_ERR_BUF_TOO_SHORT; 1328 } 1329 1330 return ICE_SUCCESS; 1331 } 1332 1333 /** 1334 * ice_free_seg - free package segment pointer 1335 * @hw: pointer to the hardware structure 1336 * 1337 * Frees the package segment pointer in the proper manner, depending on if the 1338 * segment was allocated or just the passed in pointer was stored. 1339 */ 1340 void ice_free_seg(struct ice_hw *hw) 1341 { 1342 if (hw->pkg_copy) { 1343 ice_free(hw, hw->pkg_copy); 1344 hw->pkg_copy = NULL; 1345 hw->pkg_size = 0; 1346 } 1347 hw->seg = NULL; 1348 } 1349 1350 /** 1351 * ice_init_pkg_regs - initialize additional package registers 1352 * @hw: pointer to the hardware structure 1353 */ 1354 static void ice_init_pkg_regs(struct ice_hw *hw) 1355 { 1356 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF 1357 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF 1358 #define ICE_SW_BLK_IDX 0 1359 1360 /* setup Switch block input mask, which is 48-bits in two parts */ 1361 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); 1362 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); 1363 } 1364 1365 /** 1366 * ice_chk_pkg_version - check package version for compatibility with driver 1367 * @pkg_ver: pointer to a version structure to check 1368 * 1369 * Check to make sure that the package about to be downloaded is compatible with 1370 * the driver. To be compatible, the major and minor components of the package 1371 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR 1372 * definitions. 1373 */ 1374 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) 1375 { 1376 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || 1377 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) 1378 return ICE_ERR_NOT_SUPPORTED; 1379 1380 return ICE_SUCCESS; 1381 } 1382 1383 /** 1384 * ice_chk_pkg_compat 1385 * @hw: pointer to the hardware structure 1386 * @ospkg: pointer to the package hdr 1387 * @seg: pointer to the package segment hdr 1388 * 1389 * This function checks the package version compatibility with driver and NVM 1390 */ 1391 static enum ice_status 1392 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, 1393 struct ice_seg **seg) 1394 { 1395 struct ice_aqc_get_pkg_info_resp *pkg; 1396 enum ice_status status; 1397 u16 size; 1398 u32 i; 1399 1400 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1401 1402 /* Check package version compatibility */ 1403 status = ice_chk_pkg_version(&hw->pkg_ver); 1404 if (status) { 1405 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); 1406 return status; 1407 } 1408 1409 /* find ICE segment in given package */ 1410 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, 1411 ospkg); 1412 if (!*seg) { 1413 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 1414 return ICE_ERR_CFG; 1415 } 1416 1417 /* Check if FW is compatible with the OS package */ 1418 size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); 1419 pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); 1420 if (!pkg) 1421 return ICE_ERR_NO_MEMORY; 1422 1423 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); 1424 if (status) 1425 goto fw_ddp_compat_free_alloc; 1426 1427 for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { 1428 /* loop till we find the NVM package */ 1429 if (!pkg->pkg_info[i].is_in_nvm) 1430 continue; 1431 if ((*seg)->hdr.seg_format_ver.major != 1432 pkg->pkg_info[i].ver.major || 1433 (*seg)->hdr.seg_format_ver.minor > 1434 pkg->pkg_info[i].ver.minor) { 1435 status = ICE_ERR_FW_DDP_MISMATCH; 1436 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); 1437 } 1438 /* done processing NVM package so break */ 1439 break; 1440 } 1441 fw_ddp_compat_free_alloc: 1442 ice_free(hw, pkg); 1443 return status; 1444 } 1445 1446 /** 1447 * ice_sw_fv_handler 1448 * @sect_type: section type 1449 * @section: pointer to section 1450 * @index: index of the field vector entry to be returned 1451 * @offset: ptr to variable that receives the offset in the field vector table 1452 * 1453 * This is a callback function that can be passed to ice_pkg_enum_entry. 1454 * This function treats the given section as of type ice_sw_fv_section and 1455 * enumerates offset field. "offset" is an index into the field vector table. 1456 */ 1457 static void * 1458 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) 1459 { 1460 struct ice_sw_fv_section *fv_section = 1461 (struct ice_sw_fv_section *)section; 1462 1463 if (!section || sect_type != ICE_SID_FLD_VEC_SW) 1464 return NULL; 1465 if (index >= LE16_TO_CPU(fv_section->count)) 1466 return NULL; 1467 if (offset) 1468 /* "index" passed in to this function is relative to a given 1469 * 4k block. To get to the true index into the field vector 1470 * table need to add the relative index to the base_offset 1471 * field of this section 1472 */ 1473 *offset = LE16_TO_CPU(fv_section->base_offset) + index; 1474 return fv_section->fv + index; 1475 } 1476 1477 /** 1478 * ice_get_prof_index_max - get the max profile index for used profile 1479 * @hw: pointer to the HW struct 1480 * 1481 * Calling this function will get the max profile index for used profile 1482 * and store the index number in struct ice_switch_info *switch_info 1483 * in hw for following use. 1484 */ 1485 static int ice_get_prof_index_max(struct ice_hw *hw) 1486 { 1487 u16 prof_index = 0, j, max_prof_index = 0; 1488 struct ice_pkg_enum state; 1489 struct ice_seg *ice_seg; 1490 bool flag = false; 1491 struct ice_fv *fv; 1492 u32 offset; 1493 1494 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1495 1496 if (!hw->seg) 1497 return ICE_ERR_PARAM; 1498 1499 ice_seg = hw->seg; 1500 1501 do { 1502 fv = (struct ice_fv *) 1503 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1504 &offset, ice_sw_fv_handler); 1505 if (!fv) 1506 break; 1507 ice_seg = NULL; 1508 1509 /* in the profile that not be used, the prot_id is set to 0xff 1510 * and the off is set to 0x1ff for all the field vectors. 1511 */ 1512 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 1513 if (fv->ew[j].prot_id != ICE_PROT_INVALID || 1514 fv->ew[j].off != ICE_FV_OFFSET_INVAL) 1515 flag = true; 1516 if (flag && prof_index > max_prof_index) 1517 max_prof_index = prof_index; 1518 1519 prof_index++; 1520 flag = false; 1521 } while (fv); 1522 1523 hw->switch_info->max_used_prof_index = max_prof_index; 1524 1525 return ICE_SUCCESS; 1526 } 1527 1528 /** 1529 * ice_init_pkg - initialize/download package 1530 * @hw: pointer to the hardware structure 1531 * @buf: pointer to the package buffer 1532 * @len: size of the package buffer 1533 * 1534 * This function initializes a package. The package contains HW tables 1535 * required to do packet processing. First, the function extracts package 1536 * information such as version. Then it finds the ice configuration segment 1537 * within the package; this function then saves a copy of the segment pointer 1538 * within the supplied package buffer. Next, the function will cache any hints 1539 * from the package, followed by downloading the package itself. Note, that if 1540 * a previous PF driver has already downloaded the package successfully, then 1541 * the current driver will not have to download the package again. 1542 * 1543 * The local package contents will be used to query default behavior and to 1544 * update specific sections of the HW's version of the package (e.g. to update 1545 * the parse graph to understand new protocols). 1546 * 1547 * This function stores a pointer to the package buffer memory, and it is 1548 * expected that the supplied buffer will not be freed immediately. If the 1549 * package buffer needs to be freed, such as when read from a file, use 1550 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this 1551 * case. 1552 */ 1553 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) 1554 { 1555 struct ice_pkg_hdr *pkg; 1556 enum ice_status status; 1557 struct ice_seg *seg; 1558 1559 if (!buf || !len) 1560 return ICE_ERR_PARAM; 1561 1562 pkg = (struct ice_pkg_hdr *)buf; 1563 status = ice_verify_pkg(pkg, len); 1564 if (status) { 1565 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", 1566 status); 1567 return status; 1568 } 1569 1570 /* initialize package info */ 1571 status = ice_init_pkg_info(hw, pkg); 1572 if (status) 1573 return status; 1574 1575 /* before downloading the package, check package version for 1576 * compatibility with driver 1577 */ 1578 status = ice_chk_pkg_compat(hw, pkg, &seg); 1579 if (status) 1580 return status; 1581 1582 /* initialize package hints and then download package */ 1583 ice_init_pkg_hints(hw, seg); 1584 status = ice_download_pkg(hw, seg); 1585 if (status == ICE_ERR_AQ_NO_WORK) { 1586 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); 1587 status = ICE_SUCCESS; 1588 } 1589 1590 /* Get information on the package currently loaded in HW, then make sure 1591 * the driver is compatible with this version. 1592 */ 1593 if (!status) { 1594 status = ice_get_pkg_info(hw); 1595 if (!status) 1596 status = ice_chk_pkg_version(&hw->active_pkg_ver); 1597 } 1598 1599 if (!status) { 1600 hw->seg = seg; 1601 /* on successful package download update other required 1602 * registers to support the package and fill HW tables 1603 * with package content. 1604 */ 1605 ice_init_pkg_regs(hw); 1606 ice_fill_blk_tbls(hw); 1607 ice_get_prof_index_max(hw); 1608 } else { 1609 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", 1610 status); 1611 } 1612 1613 return status; 1614 } 1615 1616 /** 1617 * ice_copy_and_init_pkg - initialize/download a copy of the package 1618 * @hw: pointer to the hardware structure 1619 * @buf: pointer to the package buffer 1620 * @len: size of the package buffer 1621 * 1622 * This function copies the package buffer, and then calls ice_init_pkg() to 1623 * initialize the copied package contents. 1624 * 1625 * The copying is necessary if the package buffer supplied is constant, or if 1626 * the memory may disappear shortly after calling this function. 1627 * 1628 * If the package buffer resides in the data segment and can be modified, the 1629 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). 1630 * 1631 * However, if the package buffer needs to be copied first, such as when being 1632 * read from a file, the caller should use ice_copy_and_init_pkg(). 1633 * 1634 * This function will first copy the package buffer, before calling 1635 * ice_init_pkg(). The caller is free to immediately destroy the original 1636 * package buffer, as the new copy will be managed by this function and 1637 * related routines. 1638 */ 1639 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) 1640 { 1641 enum ice_status status; 1642 u8 *buf_copy; 1643 1644 if (!buf || !len) 1645 return ICE_ERR_PARAM; 1646 1647 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); 1648 1649 status = ice_init_pkg(hw, buf_copy, len); 1650 if (status) { 1651 /* Free the copy, since we failed to initialize the package */ 1652 ice_free(hw, buf_copy); 1653 } else { 1654 /* Track the copied pkg so we can free it later */ 1655 hw->pkg_copy = buf_copy; 1656 hw->pkg_size = len; 1657 } 1658 1659 return status; 1660 } 1661 1662 /** 1663 * ice_pkg_buf_alloc 1664 * @hw: pointer to the HW structure 1665 * 1666 * Allocates a package buffer and returns a pointer to the buffer header. 1667 * Note: all package contents must be in Little Endian form. 1668 */ 1669 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) 1670 { 1671 struct ice_buf_build *bld; 1672 struct ice_buf_hdr *buf; 1673 1674 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); 1675 if (!bld) 1676 return NULL; 1677 1678 buf = (struct ice_buf_hdr *)bld; 1679 buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, 1680 section_entry)); 1681 return bld; 1682 } 1683 1684 /** 1685 * ice_get_sw_prof_type - determine switch profile type 1686 * @hw: pointer to the HW structure 1687 * @fv: pointer to the switch field vector 1688 */ 1689 static enum ice_prof_type 1690 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) 1691 { 1692 u16 i; 1693 1694 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { 1695 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ 1696 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && 1697 fv->ew[i].off == ICE_VNI_OFFSET) 1698 return ICE_PROF_TUN_UDP; 1699 1700 /* GRE tunnel will have GRE protocol */ 1701 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) 1702 return ICE_PROF_TUN_GRE; 1703 } 1704 1705 return ICE_PROF_NON_TUN; 1706 } 1707 1708 /** 1709 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type 1710 * @hw: pointer to hardware structure 1711 * @req_profs: type of profiles requested 1712 * @bm: pointer to memory for returning the bitmap of field vectors 1713 */ 1714 void 1715 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, 1716 ice_bitmap_t *bm) 1717 { 1718 struct ice_pkg_enum state; 1719 struct ice_seg *ice_seg; 1720 struct ice_fv *fv; 1721 1722 if (req_profs == ICE_PROF_ALL) { 1723 ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); 1724 return; 1725 } 1726 1727 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1728 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); 1729 ice_seg = hw->seg; 1730 do { 1731 enum ice_prof_type prof_type; 1732 u32 offset; 1733 1734 fv = (struct ice_fv *) 1735 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1736 &offset, ice_sw_fv_handler); 1737 ice_seg = NULL; 1738 1739 if (fv) { 1740 /* Determine field vector type */ 1741 prof_type = ice_get_sw_prof_type(hw, fv); 1742 1743 if (req_profs & prof_type) 1744 ice_set_bit((u16)offset, bm); 1745 } 1746 } while (fv); 1747 } 1748 1749 /** 1750 * ice_get_sw_fv_list 1751 * @hw: pointer to the HW structure 1752 * @prot_ids: field vector to search for with a given protocol ID 1753 * @ids_cnt: lookup/protocol count 1754 * @bm: bitmap of field vectors to consider 1755 * @fv_list: Head of a list 1756 * 1757 * Finds all the field vector entries from switch block that contain 1758 * a given protocol ID and returns a list of structures of type 1759 * "ice_sw_fv_list_entry". Every structure in the list has a field vector 1760 * definition and profile ID information 1761 * NOTE: The caller of the function is responsible for freeing the memory 1762 * allocated for every list entry. 1763 */ 1764 enum ice_status 1765 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, 1766 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) 1767 { 1768 struct ice_sw_fv_list_entry *fvl; 1769 struct ice_sw_fv_list_entry *tmp; 1770 struct ice_pkg_enum state; 1771 struct ice_seg *ice_seg; 1772 struct ice_fv *fv; 1773 u32 offset; 1774 1775 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1776 1777 if (!ids_cnt || !hw->seg) 1778 return ICE_ERR_PARAM; 1779 1780 ice_seg = hw->seg; 1781 do { 1782 u16 i; 1783 1784 fv = (struct ice_fv *) 1785 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1786 &offset, ice_sw_fv_handler); 1787 if (!fv) 1788 break; 1789 ice_seg = NULL; 1790 1791 /* If field vector is not in the bitmap list, then skip this 1792 * profile. 1793 */ 1794 if (!ice_is_bit_set(bm, (u16)offset)) 1795 continue; 1796 1797 for (i = 0; i < ids_cnt; i++) { 1798 int j; 1799 1800 /* This code assumes that if a switch field vector line 1801 * has a matching protocol, then this line will contain 1802 * the entries necessary to represent every field in 1803 * that protocol header. 1804 */ 1805 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 1806 if (fv->ew[j].prot_id == prot_ids[i]) 1807 break; 1808 if (j >= hw->blk[ICE_BLK_SW].es.fvw) 1809 break; 1810 if (i + 1 == ids_cnt) { 1811 fvl = (struct ice_sw_fv_list_entry *) 1812 ice_malloc(hw, sizeof(*fvl)); 1813 if (!fvl) 1814 goto err; 1815 fvl->fv_ptr = fv; 1816 fvl->profile_id = offset; 1817 LIST_ADD(&fvl->list_entry, fv_list); 1818 break; 1819 } 1820 } 1821 } while (fv); 1822 if (LIST_EMPTY(fv_list)) 1823 return ICE_ERR_CFG; 1824 return ICE_SUCCESS; 1825 1826 err: 1827 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, 1828 list_entry) { 1829 LIST_DEL(&fvl->list_entry); 1830 ice_free(hw, fvl); 1831 } 1832 1833 return ICE_ERR_NO_MEMORY; 1834 } 1835 1836 /** 1837 * ice_init_prof_result_bm - Initialize the profile result index bitmap 1838 * @hw: pointer to hardware structure 1839 */ 1840 void ice_init_prof_result_bm(struct ice_hw *hw) 1841 { 1842 struct ice_pkg_enum state; 1843 struct ice_seg *ice_seg; 1844 struct ice_fv *fv; 1845 1846 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 1847 1848 if (!hw->seg) 1849 return; 1850 1851 ice_seg = hw->seg; 1852 do { 1853 u32 off; 1854 u16 i; 1855 1856 fv = (struct ice_fv *) 1857 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 1858 &off, ice_sw_fv_handler); 1859 ice_seg = NULL; 1860 if (!fv) 1861 break; 1862 1863 ice_zero_bitmap(hw->switch_info->prof_res_bm[off], 1864 ICE_MAX_FV_WORDS); 1865 1866 /* Determine empty field vector indices, these can be 1867 * used for recipe results. Skip index 0, since it is 1868 * always used for Switch ID. 1869 */ 1870 for (i = 1; i < ICE_MAX_FV_WORDS; i++) 1871 if (fv->ew[i].prot_id == ICE_PROT_INVALID && 1872 fv->ew[i].off == ICE_FV_OFFSET_INVAL) 1873 ice_set_bit(i, 1874 hw->switch_info->prof_res_bm[off]); 1875 } while (fv); 1876 } 1877 1878 /** 1879 * ice_pkg_buf_free 1880 * @hw: pointer to the HW structure 1881 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1882 * 1883 * Frees a package buffer 1884 */ 1885 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) 1886 { 1887 ice_free(hw, bld); 1888 } 1889 1890 /** 1891 * ice_pkg_buf_reserve_section 1892 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1893 * @count: the number of sections to reserve 1894 * 1895 * Reserves one or more section table entries in a package buffer. This routine 1896 * can be called multiple times as long as they are made before calling 1897 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 1898 * is called once, the number of sections that can be allocated will not be able 1899 * to be increased; not using all reserved sections is fine, but this will 1900 * result in some wasted space in the buffer. 1901 * Note: all package contents must be in Little Endian form. 1902 */ 1903 static enum ice_status 1904 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) 1905 { 1906 struct ice_buf_hdr *buf; 1907 u16 section_count; 1908 u16 data_end; 1909 1910 if (!bld) 1911 return ICE_ERR_PARAM; 1912 1913 buf = (struct ice_buf_hdr *)&bld->buf; 1914 1915 /* already an active section, can't increase table size */ 1916 section_count = LE16_TO_CPU(buf->section_count); 1917 if (section_count > 0) 1918 return ICE_ERR_CFG; 1919 1920 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) 1921 return ICE_ERR_CFG; 1922 bld->reserved_section_table_entries += count; 1923 1924 data_end = LE16_TO_CPU(buf->data_end) + 1925 FLEX_ARRAY_SIZE(buf, section_entry, count); 1926 buf->data_end = CPU_TO_LE16(data_end); 1927 1928 return ICE_SUCCESS; 1929 } 1930 1931 /** 1932 * ice_pkg_buf_alloc_section 1933 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1934 * @type: the section type value 1935 * @size: the size of the section to reserve (in bytes) 1936 * 1937 * Reserves memory in the buffer for a section's content and updates the 1938 * buffers' status accordingly. This routine returns a pointer to the first 1939 * byte of the section start within the buffer, which is used to fill in the 1940 * section contents. 1941 * Note: all package contents must be in Little Endian form. 1942 */ 1943 static void * 1944 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) 1945 { 1946 struct ice_buf_hdr *buf; 1947 u16 sect_count; 1948 u16 data_end; 1949 1950 if (!bld || !type || !size) 1951 return NULL; 1952 1953 buf = (struct ice_buf_hdr *)&bld->buf; 1954 1955 /* check for enough space left in buffer */ 1956 data_end = LE16_TO_CPU(buf->data_end); 1957 1958 /* section start must align on 4 byte boundary */ 1959 data_end = ICE_ALIGN(data_end, 4); 1960 1961 if ((data_end + size) > ICE_MAX_S_DATA_END) 1962 return NULL; 1963 1964 /* check for more available section table entries */ 1965 sect_count = LE16_TO_CPU(buf->section_count); 1966 if (sect_count < bld->reserved_section_table_entries) { 1967 void *section_ptr = ((u8 *)buf) + data_end; 1968 1969 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); 1970 buf->section_entry[sect_count].size = CPU_TO_LE16(size); 1971 buf->section_entry[sect_count].type = CPU_TO_LE32(type); 1972 1973 data_end += size; 1974 buf->data_end = CPU_TO_LE16(data_end); 1975 1976 buf->section_count = CPU_TO_LE16(sect_count + 1); 1977 return section_ptr; 1978 } 1979 1980 /* no free section table entries */ 1981 return NULL; 1982 } 1983 1984 /** 1985 * ice_pkg_buf_alloc_single_section 1986 * @hw: pointer to the HW structure 1987 * @type: the section type value 1988 * @size: the size of the section to reserve (in bytes) 1989 * @section: returns pointer to the section 1990 * 1991 * Allocates a package buffer with a single section. 1992 * Note: all package contents must be in Little Endian form. 1993 */ 1994 static struct ice_buf_build * 1995 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, 1996 void **section) 1997 { 1998 struct ice_buf_build *buf; 1999 2000 if (!section) 2001 return NULL; 2002 2003 buf = ice_pkg_buf_alloc(hw); 2004 if (!buf) 2005 return NULL; 2006 2007 if (ice_pkg_buf_reserve_section(buf, 1)) 2008 goto ice_pkg_buf_alloc_single_section_err; 2009 2010 *section = ice_pkg_buf_alloc_section(buf, type, size); 2011 if (!*section) 2012 goto ice_pkg_buf_alloc_single_section_err; 2013 2014 return buf; 2015 2016 ice_pkg_buf_alloc_single_section_err: 2017 ice_pkg_buf_free(hw, buf); 2018 return NULL; 2019 } 2020 2021 /** 2022 * ice_pkg_buf_unreserve_section 2023 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2024 * @count: the number of sections to unreserve 2025 * 2026 * Unreserves one or more section table entries in a package buffer, releasing 2027 * space that can be used for section data. This routine can be called 2028 * multiple times as long as they are made before calling 2029 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 2030 * is called once, the number of sections that can be allocated will not be able 2031 * to be increased; not using all reserved sections is fine, but this will 2032 * result in some wasted space in the buffer. 2033 * Note: all package contents must be in Little Endian form. 2034 */ 2035 enum ice_status 2036 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) 2037 { 2038 struct ice_buf_hdr *buf; 2039 u16 section_count; 2040 u16 data_end; 2041 2042 if (!bld) 2043 return ICE_ERR_PARAM; 2044 2045 buf = (struct ice_buf_hdr *)&bld->buf; 2046 2047 /* already an active section, can't decrease table size */ 2048 section_count = LE16_TO_CPU(buf->section_count); 2049 if (section_count > 0) 2050 return ICE_ERR_CFG; 2051 2052 if (count > bld->reserved_section_table_entries) 2053 return ICE_ERR_CFG; 2054 bld->reserved_section_table_entries -= count; 2055 2056 data_end = LE16_TO_CPU(buf->data_end) - 2057 FLEX_ARRAY_SIZE(buf, section_entry, count); 2058 buf->data_end = CPU_TO_LE16(data_end); 2059 2060 return ICE_SUCCESS; 2061 } 2062 2063 /** 2064 * ice_pkg_buf_get_free_space 2065 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2066 * 2067 * Returns the number of free bytes remaining in the buffer. 2068 * Note: all package contents must be in Little Endian form. 2069 */ 2070 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) 2071 { 2072 struct ice_buf_hdr *buf; 2073 2074 if (!bld) 2075 return 0; 2076 2077 buf = (struct ice_buf_hdr *)&bld->buf; 2078 return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); 2079 } 2080 2081 /** 2082 * ice_pkg_buf_get_active_sections 2083 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2084 * 2085 * Returns the number of active sections. Before using the package buffer 2086 * in an update package command, the caller should make sure that there is at 2087 * least one active section - otherwise, the buffer is not legal and should 2088 * not be used. 2089 * Note: all package contents must be in Little Endian form. 2090 */ 2091 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) 2092 { 2093 struct ice_buf_hdr *buf; 2094 2095 if (!bld) 2096 return 0; 2097 2098 buf = (struct ice_buf_hdr *)&bld->buf; 2099 return LE16_TO_CPU(buf->section_count); 2100 } 2101 2102 /** 2103 * ice_pkg_buf 2104 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 2105 * 2106 * Return a pointer to the buffer's header 2107 */ 2108 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) 2109 { 2110 if (!bld) 2111 return NULL; 2112 2113 return &bld->buf; 2114 } 2115 2116 /** 2117 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage 2118 * @hw: pointer to the HW structure 2119 * @port: port to search for 2120 * @index: optionally returns index 2121 * 2122 * Returns whether a port is already in use as a tunnel, and optionally its 2123 * index 2124 */ 2125 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) 2126 { 2127 u16 i; 2128 2129 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2130 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { 2131 if (index) 2132 *index = i; 2133 return true; 2134 } 2135 2136 return false; 2137 } 2138 2139 /** 2140 * ice_tunnel_port_in_use 2141 * @hw: pointer to the HW structure 2142 * @port: port to search for 2143 * @index: optionally returns index 2144 * 2145 * Returns whether a port is already in use as a tunnel, and optionally its 2146 * index 2147 */ 2148 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) 2149 { 2150 bool res; 2151 2152 ice_acquire_lock(&hw->tnl_lock); 2153 res = ice_tunnel_port_in_use_hlpr(hw, port, index); 2154 ice_release_lock(&hw->tnl_lock); 2155 2156 return res; 2157 } 2158 2159 /** 2160 * ice_tunnel_get_type 2161 * @hw: pointer to the HW structure 2162 * @port: port to search for 2163 * @type: returns tunnel index 2164 * 2165 * For a given port number, will return the type of tunnel. 2166 */ 2167 bool 2168 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) 2169 { 2170 bool res = false; 2171 u16 i; 2172 2173 ice_acquire_lock(&hw->tnl_lock); 2174 2175 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2176 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { 2177 *type = hw->tnl.tbl[i].type; 2178 res = true; 2179 break; 2180 } 2181 2182 ice_release_lock(&hw->tnl_lock); 2183 2184 return res; 2185 } 2186 2187 /** 2188 * ice_find_free_tunnel_entry 2189 * @hw: pointer to the HW structure 2190 * @type: tunnel type 2191 * @index: optionally returns index 2192 * 2193 * Returns whether there is a free tunnel entry, and optionally its index 2194 */ 2195 static bool 2196 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, 2197 u16 *index) 2198 { 2199 u16 i; 2200 2201 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2202 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && 2203 hw->tnl.tbl[i].type == type) { 2204 if (index) 2205 *index = i; 2206 return true; 2207 } 2208 2209 return false; 2210 } 2211 2212 /** 2213 * ice_get_open_tunnel_port - retrieve an open tunnel port 2214 * @hw: pointer to the HW structure 2215 * @type: tunnel type (TNL_ALL will return any open port) 2216 * @port: returns open port 2217 */ 2218 bool 2219 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, 2220 u16 *port) 2221 { 2222 bool res = false; 2223 u16 i; 2224 2225 ice_acquire_lock(&hw->tnl_lock); 2226 2227 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2228 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 2229 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { 2230 *port = hw->tnl.tbl[i].port; 2231 res = true; 2232 break; 2233 } 2234 2235 ice_release_lock(&hw->tnl_lock); 2236 2237 return res; 2238 } 2239 2240 /** 2241 * ice_create_tunnel 2242 * @hw: pointer to the HW structure 2243 * @type: type of tunnel 2244 * @port: port of tunnel to create 2245 * 2246 * Create a tunnel by updating the parse graph in the parser. We do that by 2247 * creating a package buffer with the tunnel info and issuing an update package 2248 * command. 2249 */ 2250 enum ice_status 2251 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) 2252 { 2253 struct ice_boost_tcam_section *sect_rx, *sect_tx; 2254 enum ice_status status = ICE_ERR_MAX_LIMIT; 2255 struct ice_buf_build *bld; 2256 u16 index; 2257 2258 ice_acquire_lock(&hw->tnl_lock); 2259 2260 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { 2261 hw->tnl.tbl[index].ref++; 2262 status = ICE_SUCCESS; 2263 goto ice_create_tunnel_end; 2264 } 2265 2266 if (!ice_find_free_tunnel_entry(hw, type, &index)) { 2267 status = ICE_ERR_OUT_OF_RANGE; 2268 goto ice_create_tunnel_end; 2269 } 2270 2271 bld = ice_pkg_buf_alloc(hw); 2272 if (!bld) { 2273 status = ICE_ERR_NO_MEMORY; 2274 goto ice_create_tunnel_end; 2275 } 2276 2277 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 2278 if (ice_pkg_buf_reserve_section(bld, 2)) 2279 goto ice_create_tunnel_err; 2280 2281 sect_rx = (struct ice_boost_tcam_section *) 2282 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 2283 ice_struct_size(sect_rx, tcam, 1)); 2284 if (!sect_rx) 2285 goto ice_create_tunnel_err; 2286 sect_rx->count = CPU_TO_LE16(1); 2287 2288 sect_tx = (struct ice_boost_tcam_section *) 2289 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 2290 ice_struct_size(sect_tx, tcam, 1)); 2291 if (!sect_tx) 2292 goto ice_create_tunnel_err; 2293 sect_tx->count = CPU_TO_LE16(1); 2294 2295 /* copy original boost entry to update package buffer */ 2296 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, 2297 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); 2298 2299 /* over-write the never-match dest port key bits with the encoded port 2300 * bits 2301 */ 2302 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), 2303 (u8 *)&port, NULL, NULL, NULL, 2304 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), 2305 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); 2306 2307 /* exact copy of entry to Tx section entry */ 2308 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), 2309 ICE_NONDMA_TO_NONDMA); 2310 2311 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2312 if (!status) { 2313 hw->tnl.tbl[index].port = port; 2314 hw->tnl.tbl[index].in_use = true; 2315 hw->tnl.tbl[index].ref = 1; 2316 } 2317 2318 ice_create_tunnel_err: 2319 ice_pkg_buf_free(hw, bld); 2320 2321 ice_create_tunnel_end: 2322 ice_release_lock(&hw->tnl_lock); 2323 2324 return status; 2325 } 2326 2327 /** 2328 * ice_destroy_tunnel 2329 * @hw: pointer to the HW structure 2330 * @port: port of tunnel to destroy (ignored if the all parameter is true) 2331 * @all: flag that states to destroy all tunnels 2332 * 2333 * Destroys a tunnel or all tunnels by creating an update package buffer 2334 * targeting the specific updates requested and then performing an update 2335 * package. 2336 */ 2337 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) 2338 { 2339 struct ice_boost_tcam_section *sect_rx, *sect_tx; 2340 enum ice_status status = ICE_ERR_MAX_LIMIT; 2341 struct ice_buf_build *bld; 2342 u16 count = 0; 2343 u16 index; 2344 u16 size; 2345 u16 i; 2346 2347 ice_acquire_lock(&hw->tnl_lock); 2348 2349 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) 2350 if (hw->tnl.tbl[index].ref > 1) { 2351 hw->tnl.tbl[index].ref--; 2352 status = ICE_SUCCESS; 2353 goto ice_destroy_tunnel_end; 2354 } 2355 2356 /* determine count */ 2357 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2358 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 2359 (all || hw->tnl.tbl[i].port == port)) 2360 count++; 2361 2362 if (!count) { 2363 status = ICE_ERR_PARAM; 2364 goto ice_destroy_tunnel_end; 2365 } 2366 2367 /* size of section - there is at least one entry */ 2368 size = ice_struct_size(sect_rx, tcam, count); 2369 2370 bld = ice_pkg_buf_alloc(hw); 2371 if (!bld) { 2372 status = ICE_ERR_NO_MEMORY; 2373 goto ice_destroy_tunnel_end; 2374 } 2375 2376 /* allocate 2 sections, one for Rx parser, one for Tx parser */ 2377 if (ice_pkg_buf_reserve_section(bld, 2)) 2378 goto ice_destroy_tunnel_err; 2379 2380 sect_rx = (struct ice_boost_tcam_section *) 2381 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, 2382 size); 2383 if (!sect_rx) 2384 goto ice_destroy_tunnel_err; 2385 sect_rx->count = CPU_TO_LE16(1); 2386 2387 sect_tx = (struct ice_boost_tcam_section *) 2388 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, 2389 size); 2390 if (!sect_tx) 2391 goto ice_destroy_tunnel_err; 2392 sect_tx->count = CPU_TO_LE16(1); 2393 2394 /* copy original boost entry to update package buffer, one copy to Rx 2395 * section, another copy to the Tx section 2396 */ 2397 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) 2398 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && 2399 (all || hw->tnl.tbl[i].port == port)) { 2400 ice_memcpy(sect_rx->tcam + i, 2401 hw->tnl.tbl[i].boost_entry, 2402 sizeof(*sect_rx->tcam), 2403 ICE_NONDMA_TO_NONDMA); 2404 ice_memcpy(sect_tx->tcam + i, 2405 hw->tnl.tbl[i].boost_entry, 2406 sizeof(*sect_tx->tcam), 2407 ICE_NONDMA_TO_NONDMA); 2408 hw->tnl.tbl[i].marked = true; 2409 } 2410 2411 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2412 if (!status) 2413 for (i = 0; i < hw->tnl.count && 2414 i < ICE_TUNNEL_MAX_ENTRIES; i++) 2415 if (hw->tnl.tbl[i].marked) { 2416 hw->tnl.tbl[i].ref = 0; 2417 hw->tnl.tbl[i].port = 0; 2418 hw->tnl.tbl[i].in_use = false; 2419 hw->tnl.tbl[i].marked = false; 2420 } 2421 2422 ice_destroy_tunnel_err: 2423 ice_pkg_buf_free(hw, bld); 2424 2425 ice_destroy_tunnel_end: 2426 ice_release_lock(&hw->tnl_lock); 2427 2428 return status; 2429 } 2430 2431 /** 2432 * ice_replay_tunnels 2433 * @hw: pointer to the HW structure 2434 * 2435 * Replays all tunnels 2436 */ 2437 enum ice_status ice_replay_tunnels(struct ice_hw *hw) 2438 { 2439 enum ice_status status = ICE_SUCCESS; 2440 u16 i; 2441 2442 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2443 2444 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) { 2445 enum ice_tunnel_type type = hw->tnl.tbl[i].type; 2446 u16 refs = hw->tnl.tbl[i].ref; 2447 u16 port = hw->tnl.tbl[i].port; 2448 2449 if (!hw->tnl.tbl[i].in_use) 2450 continue; 2451 2452 /* Replay tunnels one at a time by destroying them, then 2453 * recreating them 2454 */ 2455 hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */ 2456 status = ice_destroy_tunnel(hw, port, false); 2457 if (status) { 2458 ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n", 2459 status, port); 2460 break; 2461 } 2462 2463 status = ice_create_tunnel(hw, type, port); 2464 if (status) { 2465 ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n", 2466 status, port); 2467 break; 2468 } 2469 2470 /* reset to original ref count */ 2471 hw->tnl.tbl[i].ref = refs; 2472 } 2473 2474 return status; 2475 } 2476 2477 /** 2478 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index 2479 * @hw: pointer to the hardware structure 2480 * @blk: hardware block 2481 * @prof: profile ID 2482 * @fv_idx: field vector word index 2483 * @prot: variable to receive the protocol ID 2484 * @off: variable to receive the protocol offset 2485 */ 2486 enum ice_status 2487 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, 2488 u8 *prot, u16 *off) 2489 { 2490 struct ice_fv_word *fv_ext; 2491 2492 if (prof >= hw->blk[blk].es.count) 2493 return ICE_ERR_PARAM; 2494 2495 if (fv_idx >= hw->blk[blk].es.fvw) 2496 return ICE_ERR_PARAM; 2497 2498 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); 2499 2500 *prot = fv_ext[fv_idx].prot_id; 2501 *off = fv_ext[fv_idx].off; 2502 2503 return ICE_SUCCESS; 2504 } 2505 2506 /* PTG Management */ 2507 2508 /** 2509 * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table 2510 * @hw: pointer to the hardware structure 2511 * @blk: HW block 2512 * 2513 * This function will update the XLT1 hardware table to reflect the new 2514 * packet type group configuration. 2515 */ 2516 enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) 2517 { 2518 struct ice_xlt1_section *sect; 2519 struct ice_buf_build *bld; 2520 enum ice_status status; 2521 u16 index; 2522 2523 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1), 2524 ice_struct_size(sect, value, 2525 ICE_XLT1_CNT), 2526 (void **)§); 2527 if (!bld) 2528 return ICE_ERR_NO_MEMORY; 2529 2530 sect->count = CPU_TO_LE16(ICE_XLT1_CNT); 2531 sect->offset = CPU_TO_LE16(0); 2532 for (index = 0; index < ICE_XLT1_CNT; index++) 2533 sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg; 2534 2535 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2536 2537 ice_pkg_buf_free(hw, bld); 2538 2539 return status; 2540 } 2541 2542 /** 2543 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) 2544 * @hw: pointer to the hardware structure 2545 * @blk: HW block 2546 * @ptype: the ptype to search for 2547 * @ptg: pointer to variable that receives the PTG 2548 * 2549 * This function will search the PTGs for a particular ptype, returning the 2550 * PTG ID that contains it through the PTG parameter, with the value of 2551 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. 2552 */ 2553 static enum ice_status 2554 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) 2555 { 2556 if (ptype >= ICE_XLT1_CNT || !ptg) 2557 return ICE_ERR_PARAM; 2558 2559 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; 2560 return ICE_SUCCESS; 2561 } 2562 2563 /** 2564 * ice_ptg_alloc_val - Allocates a new packet type group ID by value 2565 * @hw: pointer to the hardware structure 2566 * @blk: HW block 2567 * @ptg: the PTG to allocate 2568 * 2569 * This function allocates a given packet type group ID specified by the PTG 2570 * parameter. 2571 */ 2572 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) 2573 { 2574 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; 2575 } 2576 2577 /** 2578 * ice_ptg_free - Frees a packet type group 2579 * @hw: pointer to the hardware structure 2580 * @blk: HW block 2581 * @ptg: the PTG ID to free 2582 * 2583 * This function frees a packet type group, and returns all the current ptypes 2584 * within it to the default PTG. 2585 */ 2586 void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg) 2587 { 2588 struct ice_ptg_ptype *p, *temp; 2589 2590 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false; 2591 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2592 while (p) { 2593 p->ptg = ICE_DEFAULT_PTG; 2594 temp = p->next_ptype; 2595 p->next_ptype = NULL; 2596 p = temp; 2597 } 2598 2599 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL; 2600 } 2601 2602 /** 2603 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group 2604 * @hw: pointer to the hardware structure 2605 * @blk: HW block 2606 * @ptype: the ptype to remove 2607 * @ptg: the PTG to remove the ptype from 2608 * 2609 * This function will remove the ptype from the specific PTG, and move it to 2610 * the default PTG (ICE_DEFAULT_PTG). 2611 */ 2612 static enum ice_status 2613 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 2614 { 2615 struct ice_ptg_ptype **ch; 2616 struct ice_ptg_ptype *p; 2617 2618 if (ptype > ICE_XLT1_CNT - 1) 2619 return ICE_ERR_PARAM; 2620 2621 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) 2622 return ICE_ERR_DOES_NOT_EXIST; 2623 2624 /* Should not happen if .in_use is set, bad config */ 2625 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) 2626 return ICE_ERR_CFG; 2627 2628 /* find the ptype within this PTG, and bypass the link over it */ 2629 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2630 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2631 while (p) { 2632 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { 2633 *ch = p->next_ptype; 2634 break; 2635 } 2636 2637 ch = &p->next_ptype; 2638 p = p->next_ptype; 2639 } 2640 2641 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; 2642 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; 2643 2644 return ICE_SUCCESS; 2645 } 2646 2647 /** 2648 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group 2649 * @hw: pointer to the hardware structure 2650 * @blk: HW block 2651 * @ptype: the ptype to add or move 2652 * @ptg: the PTG to add or move the ptype to 2653 * 2654 * This function will either add or move a ptype to a particular PTG depending 2655 * on if the ptype is already part of another group. Note that using a 2656 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the 2657 * default PTG. 2658 */ 2659 static enum ice_status 2660 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) 2661 { 2662 enum ice_status status; 2663 u8 original_ptg; 2664 2665 if (ptype > ICE_XLT1_CNT - 1) 2666 return ICE_ERR_PARAM; 2667 2668 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) 2669 return ICE_ERR_DOES_NOT_EXIST; 2670 2671 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); 2672 if (status) 2673 return status; 2674 2675 /* Is ptype already in the correct PTG? */ 2676 if (original_ptg == ptg) 2677 return ICE_SUCCESS; 2678 2679 /* Remove from original PTG and move back to the default PTG */ 2680 if (original_ptg != ICE_DEFAULT_PTG) 2681 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); 2682 2683 /* Moving to default PTG? Then we're done with this request */ 2684 if (ptg == ICE_DEFAULT_PTG) 2685 return ICE_SUCCESS; 2686 2687 /* Add ptype to PTG at beginning of list */ 2688 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = 2689 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; 2690 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = 2691 &hw->blk[blk].xlt1.ptypes[ptype]; 2692 2693 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; 2694 hw->blk[blk].xlt1.t[ptype] = ptg; 2695 2696 return ICE_SUCCESS; 2697 } 2698 2699 /* Block / table size info */ 2700 struct ice_blk_size_details { 2701 u16 xlt1; /* # XLT1 entries */ 2702 u16 xlt2; /* # XLT2 entries */ 2703 u16 prof_tcam; /* # profile ID TCAM entries */ 2704 u16 prof_id; /* # profile IDs */ 2705 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ 2706 u16 prof_redir; /* # profile redirection entries */ 2707 u16 es; /* # extraction sequence entries */ 2708 u16 fvw; /* # field vector words */ 2709 u8 overwrite; /* overwrite existing entries allowed */ 2710 u8 reverse; /* reverse FV order */ 2711 }; 2712 2713 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { 2714 /** 2715 * Table Definitions 2716 * XLT1 - Number of entries in XLT1 table 2717 * XLT2 - Number of entries in XLT2 table 2718 * TCAM - Number of entries Profile ID TCAM table 2719 * CDID - Control Domain ID of the hardware block 2720 * PRED - Number of entries in the Profile Redirection Table 2721 * FV - Number of entries in the Field Vector 2722 * FVW - Width (in WORDs) of the Field Vector 2723 * OVR - Overwrite existing table entries 2724 * REV - Reverse FV 2725 */ 2726 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ 2727 /* Overwrite , Reverse FV */ 2728 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, 2729 false, false }, 2730 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, 2731 false, false }, 2732 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 2733 false, true }, 2734 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, 2735 true, true }, 2736 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, 2737 false, false }, 2738 }; 2739 2740 enum ice_sid_all { 2741 ICE_SID_XLT1_OFF = 0, 2742 ICE_SID_XLT2_OFF, 2743 ICE_SID_PR_OFF, 2744 ICE_SID_PR_REDIR_OFF, 2745 ICE_SID_ES_OFF, 2746 ICE_SID_OFF_COUNT, 2747 }; 2748 2749 /* Characteristic handling */ 2750 2751 /** 2752 * ice_match_prop_lst - determine if properties of two lists match 2753 * @list1: first properties list 2754 * @list2: second properties list 2755 * 2756 * Count, cookies and the order must match in order to be considered equivalent. 2757 */ 2758 static bool 2759 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2) 2760 { 2761 struct ice_vsig_prof *tmp1; 2762 struct ice_vsig_prof *tmp2; 2763 u16 chk_count = 0; 2764 u16 count = 0; 2765 2766 /* compare counts */ 2767 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) 2768 count++; 2769 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) 2770 chk_count++; 2771 if (!count || count != chk_count) 2772 return false; 2773 2774 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list); 2775 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list); 2776 2777 /* profile cookies must compare, and in the exact same order to take 2778 * into account priority 2779 */ 2780 while (count--) { 2781 if (tmp2->profile_cookie != tmp1->profile_cookie) 2782 return false; 2783 2784 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list); 2785 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list); 2786 } 2787 2788 return true; 2789 } 2790 2791 /* VSIG Management */ 2792 2793 /** 2794 * ice_vsig_update_xlt2_sect - update one section of XLT2 table 2795 * @hw: pointer to the hardware structure 2796 * @blk: HW block 2797 * @vsi: HW VSI number to program 2798 * @vsig: VSIG for the VSI 2799 * 2800 * This function will update the XLT2 hardware table with the input VSI 2801 * group configuration. 2802 */ 2803 static enum ice_status 2804 ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi, 2805 u16 vsig) 2806 { 2807 struct ice_xlt2_section *sect; 2808 struct ice_buf_build *bld; 2809 enum ice_status status; 2810 2811 bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2), 2812 ice_struct_size(sect, value, 1), 2813 (void **)§); 2814 if (!bld) 2815 return ICE_ERR_NO_MEMORY; 2816 2817 sect->count = CPU_TO_LE16(1); 2818 sect->offset = CPU_TO_LE16(vsi); 2819 sect->value[0] = CPU_TO_LE16(vsig); 2820 2821 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); 2822 2823 ice_pkg_buf_free(hw, bld); 2824 2825 return status; 2826 } 2827 2828 /** 2829 * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration 2830 * @hw: pointer to the hardware structure 2831 * @blk: HW block 2832 * 2833 * This function will update the XLT2 hardware table with the input VSI 2834 * group configuration of used vsis. 2835 */ 2836 enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) 2837 { 2838 u16 vsi; 2839 2840 for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) { 2841 /* update only vsis that have been changed */ 2842 if (hw->blk[blk].xlt2.vsis[vsi].changed) { 2843 enum ice_status status; 2844 u16 vsig; 2845 2846 vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 2847 status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig); 2848 if (status) 2849 return status; 2850 2851 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 2852 } 2853 } 2854 2855 return ICE_SUCCESS; 2856 } 2857 2858 /** 2859 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI 2860 * @hw: pointer to the hardware structure 2861 * @blk: HW block 2862 * @vsi: VSI of interest 2863 * @vsig: pointer to receive the VSI group 2864 * 2865 * This function will lookup the VSI entry in the XLT2 list and return 2866 * the VSI group its associated with. 2867 */ 2868 enum ice_status 2869 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) 2870 { 2871 if (!vsig || vsi >= ICE_MAX_VSI) 2872 return ICE_ERR_PARAM; 2873 2874 /* As long as there's a default or valid VSIG associated with the input 2875 * VSI, the functions returns a success. Any handling of VSIG will be 2876 * done by the following add, update or remove functions. 2877 */ 2878 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; 2879 2880 return ICE_SUCCESS; 2881 } 2882 2883 /** 2884 * ice_vsig_alloc_val - allocate a new VSIG by value 2885 * @hw: pointer to the hardware structure 2886 * @blk: HW block 2887 * @vsig: the VSIG to allocate 2888 * 2889 * This function will allocate a given VSIG specified by the VSIG parameter. 2890 */ 2891 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) 2892 { 2893 u16 idx = vsig & ICE_VSIG_IDX_M; 2894 2895 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { 2896 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 2897 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; 2898 } 2899 2900 return ICE_VSIG_VALUE(idx, hw->pf_id); 2901 } 2902 2903 /** 2904 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG 2905 * @hw: pointer to the hardware structure 2906 * @blk: HW block 2907 * 2908 * This function will iterate through the VSIG list and mark the first 2909 * unused entry for the new VSIG entry as used and return that value. 2910 */ 2911 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) 2912 { 2913 u16 i; 2914 2915 for (i = 1; i < ICE_MAX_VSIGS; i++) 2916 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) 2917 return ice_vsig_alloc_val(hw, blk, i); 2918 2919 return ICE_DEFAULT_VSIG; 2920 } 2921 2922 /** 2923 * ice_find_dup_props_vsig - find VSI group with a specified set of properties 2924 * @hw: pointer to the hardware structure 2925 * @blk: HW block 2926 * @chs: characteristic list 2927 * @vsig: returns the VSIG with the matching profiles, if found 2928 * 2929 * Each VSIG is associated with a characteristic set; i.e. all VSIs under 2930 * a group have the same characteristic set. To check if there exists a VSIG 2931 * which has the same characteristics as the input characteristics; this 2932 * function will iterate through the XLT2 list and return the VSIG that has a 2933 * matching configuration. In order to make sure that priorities are accounted 2934 * for, the list must match exactly, including the order in which the 2935 * characteristics are listed. 2936 */ 2937 static enum ice_status 2938 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, 2939 struct LIST_HEAD_TYPE *chs, u16 *vsig) 2940 { 2941 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; 2942 u16 i; 2943 2944 for (i = 0; i < xlt2->count; i++) 2945 if (xlt2->vsig_tbl[i].in_use && 2946 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { 2947 *vsig = ICE_VSIG_VALUE(i, hw->pf_id); 2948 return ICE_SUCCESS; 2949 } 2950 2951 return ICE_ERR_DOES_NOT_EXIST; 2952 } 2953 2954 /** 2955 * ice_vsig_free - free VSI group 2956 * @hw: pointer to the hardware structure 2957 * @blk: HW block 2958 * @vsig: VSIG to remove 2959 * 2960 * The function will remove all VSIs associated with the input VSIG and move 2961 * them to the DEFAULT_VSIG and mark the VSIG available. 2962 */ 2963 static enum ice_status 2964 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) 2965 { 2966 struct ice_vsig_prof *dtmp, *del; 2967 struct ice_vsig_vsi *vsi_cur; 2968 u16 idx; 2969 2970 idx = vsig & ICE_VSIG_IDX_M; 2971 if (idx >= ICE_MAX_VSIGS) 2972 return ICE_ERR_PARAM; 2973 2974 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 2975 return ICE_ERR_DOES_NOT_EXIST; 2976 2977 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; 2978 2979 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 2980 /* If the VSIG has at least 1 VSI then iterate through the 2981 * list and remove the VSIs before deleting the group. 2982 */ 2983 if (vsi_cur) { 2984 /* remove all vsis associated with this VSIG XLT2 entry */ 2985 do { 2986 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 2987 2988 vsi_cur->vsig = ICE_DEFAULT_VSIG; 2989 vsi_cur->changed = 1; 2990 vsi_cur->next_vsi = NULL; 2991 vsi_cur = tmp; 2992 } while (vsi_cur); 2993 2994 /* NULL terminate head of VSI list */ 2995 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; 2996 } 2997 2998 /* free characteristic list */ 2999 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp, 3000 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 3001 ice_vsig_prof, list) { 3002 LIST_DEL(&del->list); 3003 ice_free(hw, del); 3004 } 3005 3006 /* if VSIG characteristic list was cleared for reset 3007 * re-initialize the list head 3008 */ 3009 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); 3010 3011 return ICE_SUCCESS; 3012 } 3013 3014 /** 3015 * ice_vsig_remove_vsi - remove VSI from VSIG 3016 * @hw: pointer to the hardware structure 3017 * @blk: HW block 3018 * @vsi: VSI to remove 3019 * @vsig: VSI group to remove from 3020 * 3021 * The function will remove the input VSI from its VSI group and move it 3022 * to the DEFAULT_VSIG. 3023 */ 3024 static enum ice_status 3025 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 3026 { 3027 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; 3028 u16 idx; 3029 3030 idx = vsig & ICE_VSIG_IDX_M; 3031 3032 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 3033 return ICE_ERR_PARAM; 3034 3035 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 3036 return ICE_ERR_DOES_NOT_EXIST; 3037 3038 /* entry already in default VSIG, don't have to remove */ 3039 if (idx == ICE_DEFAULT_VSIG) 3040 return ICE_SUCCESS; 3041 3042 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3043 if (!(*vsi_head)) 3044 return ICE_ERR_CFG; 3045 3046 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; 3047 vsi_cur = (*vsi_head); 3048 3049 /* iterate the VSI list, skip over the entry to be removed */ 3050 while (vsi_cur) { 3051 if (vsi_tgt == vsi_cur) { 3052 (*vsi_head) = vsi_cur->next_vsi; 3053 break; 3054 } 3055 vsi_head = &vsi_cur->next_vsi; 3056 vsi_cur = vsi_cur->next_vsi; 3057 } 3058 3059 /* verify if VSI was removed from group list */ 3060 if (!vsi_cur) 3061 return ICE_ERR_DOES_NOT_EXIST; 3062 3063 vsi_cur->vsig = ICE_DEFAULT_VSIG; 3064 vsi_cur->changed = 1; 3065 vsi_cur->next_vsi = NULL; 3066 3067 return ICE_SUCCESS; 3068 } 3069 3070 /** 3071 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group 3072 * @hw: pointer to the hardware structure 3073 * @blk: HW block 3074 * @vsi: VSI to move 3075 * @vsig: destination VSI group 3076 * 3077 * This function will move or add the input VSI to the target VSIG. 3078 * The function will find the original VSIG the VSI belongs to and 3079 * move the entry to the DEFAULT_VSIG, update the original VSIG and 3080 * then move entry to the new VSIG. 3081 */ 3082 static enum ice_status 3083 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 3084 { 3085 struct ice_vsig_vsi *tmp; 3086 enum ice_status status; 3087 u16 orig_vsig, idx; 3088 3089 idx = vsig & ICE_VSIG_IDX_M; 3090 3091 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) 3092 return ICE_ERR_PARAM; 3093 3094 /* if VSIG not in use and VSIG is not default type this VSIG 3095 * doesn't exist. 3096 */ 3097 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && 3098 vsig != ICE_DEFAULT_VSIG) 3099 return ICE_ERR_DOES_NOT_EXIST; 3100 3101 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 3102 if (status) 3103 return status; 3104 3105 /* no update required if vsigs match */ 3106 if (orig_vsig == vsig) 3107 return ICE_SUCCESS; 3108 3109 if (orig_vsig != ICE_DEFAULT_VSIG) { 3110 /* remove entry from orig_vsig and add to default VSIG */ 3111 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); 3112 if (status) 3113 return status; 3114 } 3115 3116 if (idx == ICE_DEFAULT_VSIG) 3117 return ICE_SUCCESS; 3118 3119 /* Create VSI entry and add VSIG and prop_mask values */ 3120 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; 3121 hw->blk[blk].xlt2.vsis[vsi].changed = 1; 3122 3123 /* Add new entry to the head of the VSIG list */ 3124 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 3125 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = 3126 &hw->blk[blk].xlt2.vsis[vsi]; 3127 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; 3128 hw->blk[blk].xlt2.t[vsi] = vsig; 3129 3130 return ICE_SUCCESS; 3131 } 3132 3133 /** 3134 * ice_find_prof_id - find profile ID for a given field vector 3135 * @hw: pointer to the hardware structure 3136 * @blk: HW block 3137 * @fv: field vector to search for 3138 * @prof_id: receives the profile ID 3139 */ 3140 static enum ice_status 3141 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, 3142 struct ice_fv_word *fv, u8 *prof_id) 3143 { 3144 struct ice_es *es = &hw->blk[blk].es; 3145 u16 off; 3146 u8 i; 3147 3148 for (i = 0; i < (u8)es->count; i++) { 3149 off = i * es->fvw; 3150 3151 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) 3152 continue; 3153 3154 *prof_id = i; 3155 return ICE_SUCCESS; 3156 } 3157 3158 return ICE_ERR_DOES_NOT_EXIST; 3159 } 3160 3161 /** 3162 * ice_prof_id_rsrc_type - get profile ID resource type for a block type 3163 * @blk: the block type 3164 * @rsrc_type: pointer to variable to receive the resource type 3165 */ 3166 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) 3167 { 3168 switch (blk) { 3169 case ICE_BLK_RSS: 3170 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; 3171 break; 3172 case ICE_BLK_PE: 3173 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID; 3174 break; 3175 default: 3176 return false; 3177 } 3178 return true; 3179 } 3180 3181 /** 3182 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type 3183 * @blk: the block type 3184 * @rsrc_type: pointer to variable to receive the resource type 3185 */ 3186 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) 3187 { 3188 switch (blk) { 3189 case ICE_BLK_RSS: 3190 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; 3191 break; 3192 case ICE_BLK_PE: 3193 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM; 3194 break; 3195 default: 3196 return false; 3197 } 3198 return true; 3199 } 3200 3201 /** 3202 * ice_alloc_tcam_ent - allocate hardware TCAM entry 3203 * @hw: pointer to the HW struct 3204 * @blk: the block to allocate the TCAM for 3205 * @btm: true to allocate from bottom of table, false to allocate from top 3206 * @tcam_idx: pointer to variable to receive the TCAM entry 3207 * 3208 * This function allocates a new entry in a Profile ID TCAM for a specific 3209 * block. 3210 */ 3211 static enum ice_status 3212 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, 3213 u16 *tcam_idx) 3214 { 3215 u16 res_type; 3216 3217 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 3218 return ICE_ERR_PARAM; 3219 3220 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); 3221 } 3222 3223 /** 3224 * ice_free_tcam_ent - free hardware TCAM entry 3225 * @hw: pointer to the HW struct 3226 * @blk: the block from which to free the TCAM entry 3227 * @tcam_idx: the TCAM entry to free 3228 * 3229 * This function frees an entry in a Profile ID TCAM for a specific block. 3230 */ 3231 static enum ice_status 3232 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) 3233 { 3234 u16 res_type; 3235 3236 if (!ice_tcam_ent_rsrc_type(blk, &res_type)) 3237 return ICE_ERR_PARAM; 3238 3239 return ice_free_hw_res(hw, res_type, 1, &tcam_idx); 3240 } 3241 3242 /** 3243 * ice_alloc_prof_id - allocate profile ID 3244 * @hw: pointer to the HW struct 3245 * @blk: the block to allocate the profile ID for 3246 * @prof_id: pointer to variable to receive the profile ID 3247 * 3248 * This function allocates a new profile ID, which also corresponds to a Field 3249 * Vector (Extraction Sequence) entry. 3250 */ 3251 static enum ice_status 3252 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) 3253 { 3254 enum ice_status status; 3255 u16 res_type; 3256 u16 get_prof; 3257 3258 if (!ice_prof_id_rsrc_type(blk, &res_type)) 3259 return ICE_ERR_PARAM; 3260 3261 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); 3262 if (!status) 3263 *prof_id = (u8)get_prof; 3264 3265 return status; 3266 } 3267 3268 /** 3269 * ice_free_prof_id - free profile ID 3270 * @hw: pointer to the HW struct 3271 * @blk: the block from which to free the profile ID 3272 * @prof_id: the profile ID to free 3273 * 3274 * This function frees a profile ID, which also corresponds to a Field Vector. 3275 */ 3276 static enum ice_status 3277 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 3278 { 3279 u16 tmp_prof_id = (u16)prof_id; 3280 u16 res_type; 3281 3282 if (!ice_prof_id_rsrc_type(blk, &res_type)) 3283 return ICE_ERR_PARAM; 3284 3285 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); 3286 } 3287 3288 /** 3289 * ice_prof_inc_ref - increment reference count for profile 3290 * @hw: pointer to the HW struct 3291 * @blk: the block from which to free the profile ID 3292 * @prof_id: the profile ID for which to increment the reference count 3293 */ 3294 static enum ice_status 3295 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 3296 { 3297 if (prof_id > hw->blk[blk].es.count) 3298 return ICE_ERR_PARAM; 3299 3300 hw->blk[blk].es.ref_count[prof_id]++; 3301 3302 return ICE_SUCCESS; 3303 } 3304 3305 /** 3306 * ice_write_es - write an extraction sequence to hardware 3307 * @hw: pointer to the HW struct 3308 * @blk: the block in which to write the extraction sequence 3309 * @prof_id: the profile ID to write 3310 * @fv: pointer to the extraction sequence to write - NULL to clear extraction 3311 */ 3312 static void 3313 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, 3314 struct ice_fv_word *fv) 3315 { 3316 u16 off; 3317 3318 off = prof_id * hw->blk[blk].es.fvw; 3319 if (!fv) { 3320 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw * 3321 sizeof(*fv), ICE_NONDMA_MEM); 3322 hw->blk[blk].es.written[prof_id] = false; 3323 } else { 3324 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * 3325 sizeof(*fv), ICE_NONDMA_TO_NONDMA); 3326 } 3327 } 3328 3329 /** 3330 * ice_prof_dec_ref - decrement reference count for profile 3331 * @hw: pointer to the HW struct 3332 * @blk: the block from which to free the profile ID 3333 * @prof_id: the profile ID for which to decrement the reference count 3334 */ 3335 static enum ice_status 3336 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) 3337 { 3338 if (prof_id > hw->blk[blk].es.count) 3339 return ICE_ERR_PARAM; 3340 3341 if (hw->blk[blk].es.ref_count[prof_id] > 0) { 3342 if (!--hw->blk[blk].es.ref_count[prof_id]) { 3343 ice_write_es(hw, blk, prof_id, NULL); 3344 return ice_free_prof_id(hw, blk, prof_id); 3345 } 3346 } 3347 3348 return ICE_SUCCESS; 3349 } 3350 3351 /* Block / table section IDs */ 3352 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { 3353 /* SWITCH */ 3354 { ICE_SID_XLT1_SW, 3355 ICE_SID_XLT2_SW, 3356 ICE_SID_PROFID_TCAM_SW, 3357 ICE_SID_PROFID_REDIR_SW, 3358 ICE_SID_FLD_VEC_SW 3359 }, 3360 3361 /* ACL */ 3362 { ICE_SID_XLT1_ACL, 3363 ICE_SID_XLT2_ACL, 3364 ICE_SID_PROFID_TCAM_ACL, 3365 ICE_SID_PROFID_REDIR_ACL, 3366 ICE_SID_FLD_VEC_ACL 3367 }, 3368 3369 /* FD */ 3370 { ICE_SID_XLT1_FD, 3371 ICE_SID_XLT2_FD, 3372 ICE_SID_PROFID_TCAM_FD, 3373 ICE_SID_PROFID_REDIR_FD, 3374 ICE_SID_FLD_VEC_FD 3375 }, 3376 3377 /* RSS */ 3378 { ICE_SID_XLT1_RSS, 3379 ICE_SID_XLT2_RSS, 3380 ICE_SID_PROFID_TCAM_RSS, 3381 ICE_SID_PROFID_REDIR_RSS, 3382 ICE_SID_FLD_VEC_RSS 3383 }, 3384 3385 /* PE */ 3386 { ICE_SID_XLT1_PE, 3387 ICE_SID_XLT2_PE, 3388 ICE_SID_PROFID_TCAM_PE, 3389 ICE_SID_PROFID_REDIR_PE, 3390 ICE_SID_FLD_VEC_PE 3391 } 3392 }; 3393 3394 /** 3395 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables 3396 * @hw: pointer to the hardware structure 3397 * @blk: the HW block to initialize 3398 */ 3399 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) 3400 { 3401 u16 pt; 3402 3403 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { 3404 u8 ptg; 3405 3406 ptg = hw->blk[blk].xlt1.t[pt]; 3407 if (ptg != ICE_DEFAULT_PTG) { 3408 ice_ptg_alloc_val(hw, blk, ptg); 3409 ice_ptg_add_mv_ptype(hw, blk, pt, ptg); 3410 } 3411 } 3412 } 3413 3414 /** 3415 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables 3416 * @hw: pointer to the hardware structure 3417 * @blk: the HW block to initialize 3418 */ 3419 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) 3420 { 3421 u16 vsi; 3422 3423 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { 3424 u16 vsig; 3425 3426 vsig = hw->blk[blk].xlt2.t[vsi]; 3427 if (vsig) { 3428 ice_vsig_alloc_val(hw, blk, vsig); 3429 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 3430 /* no changes at this time, since this has been 3431 * initialized from the original package 3432 */ 3433 hw->blk[blk].xlt2.vsis[vsi].changed = 0; 3434 } 3435 } 3436 } 3437 3438 /** 3439 * ice_init_sw_db - init software database from HW tables 3440 * @hw: pointer to the hardware structure 3441 */ 3442 static void ice_init_sw_db(struct ice_hw *hw) 3443 { 3444 u16 i; 3445 3446 for (i = 0; i < ICE_BLK_COUNT; i++) { 3447 ice_init_sw_xlt1_db(hw, (enum ice_block)i); 3448 ice_init_sw_xlt2_db(hw, (enum ice_block)i); 3449 } 3450 } 3451 3452 /** 3453 * ice_fill_tbl - Reads content of a single table type into database 3454 * @hw: pointer to the hardware structure 3455 * @block_id: Block ID of the table to copy 3456 * @sid: Section ID of the table to copy 3457 * 3458 * Will attempt to read the entire content of a given table of a single block 3459 * into the driver database. We assume that the buffer will always 3460 * be as large or larger than the data contained in the package. If 3461 * this condition is not met, there is most likely an error in the package 3462 * contents. 3463 */ 3464 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) 3465 { 3466 u32 dst_len, sect_len, offset = 0; 3467 struct ice_prof_redir_section *pr; 3468 struct ice_prof_id_section *pid; 3469 struct ice_xlt1_section *xlt1; 3470 struct ice_xlt2_section *xlt2; 3471 struct ice_sw_fv_section *es; 3472 struct ice_pkg_enum state; 3473 u8 *src, *dst; 3474 void *sect; 3475 3476 /* if the HW segment pointer is null then the first iteration of 3477 * ice_pkg_enum_section() will fail. In this case the HW tables will 3478 * not be filled and return success. 3479 */ 3480 if (!hw->seg) { 3481 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); 3482 return; 3483 } 3484 3485 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); 3486 3487 sect = ice_pkg_enum_section(hw->seg, &state, sid); 3488 3489 while (sect) { 3490 switch (sid) { 3491 case ICE_SID_XLT1_SW: 3492 case ICE_SID_XLT1_FD: 3493 case ICE_SID_XLT1_RSS: 3494 case ICE_SID_XLT1_ACL: 3495 case ICE_SID_XLT1_PE: 3496 xlt1 = (struct ice_xlt1_section *)sect; 3497 src = xlt1->value; 3498 sect_len = LE16_TO_CPU(xlt1->count) * 3499 sizeof(*hw->blk[block_id].xlt1.t); 3500 dst = hw->blk[block_id].xlt1.t; 3501 dst_len = hw->blk[block_id].xlt1.count * 3502 sizeof(*hw->blk[block_id].xlt1.t); 3503 break; 3504 case ICE_SID_XLT2_SW: 3505 case ICE_SID_XLT2_FD: 3506 case ICE_SID_XLT2_RSS: 3507 case ICE_SID_XLT2_ACL: 3508 case ICE_SID_XLT2_PE: 3509 xlt2 = (struct ice_xlt2_section *)sect; 3510 src = (_FORCE_ u8 *)xlt2->value; 3511 sect_len = LE16_TO_CPU(xlt2->count) * 3512 sizeof(*hw->blk[block_id].xlt2.t); 3513 dst = (u8 *)hw->blk[block_id].xlt2.t; 3514 dst_len = hw->blk[block_id].xlt2.count * 3515 sizeof(*hw->blk[block_id].xlt2.t); 3516 break; 3517 case ICE_SID_PROFID_TCAM_SW: 3518 case ICE_SID_PROFID_TCAM_FD: 3519 case ICE_SID_PROFID_TCAM_RSS: 3520 case ICE_SID_PROFID_TCAM_ACL: 3521 case ICE_SID_PROFID_TCAM_PE: 3522 pid = (struct ice_prof_id_section *)sect; 3523 src = (u8 *)pid->entry; 3524 sect_len = LE16_TO_CPU(pid->count) * 3525 sizeof(*hw->blk[block_id].prof.t); 3526 dst = (u8 *)hw->blk[block_id].prof.t; 3527 dst_len = hw->blk[block_id].prof.count * 3528 sizeof(*hw->blk[block_id].prof.t); 3529 break; 3530 case ICE_SID_PROFID_REDIR_SW: 3531 case ICE_SID_PROFID_REDIR_FD: 3532 case ICE_SID_PROFID_REDIR_RSS: 3533 case ICE_SID_PROFID_REDIR_ACL: 3534 case ICE_SID_PROFID_REDIR_PE: 3535 pr = (struct ice_prof_redir_section *)sect; 3536 src = pr->redir_value; 3537 sect_len = LE16_TO_CPU(pr->count) * 3538 sizeof(*hw->blk[block_id].prof_redir.t); 3539 dst = hw->blk[block_id].prof_redir.t; 3540 dst_len = hw->blk[block_id].prof_redir.count * 3541 sizeof(*hw->blk[block_id].prof_redir.t); 3542 break; 3543 case ICE_SID_FLD_VEC_SW: 3544 case ICE_SID_FLD_VEC_FD: 3545 case ICE_SID_FLD_VEC_RSS: 3546 case ICE_SID_FLD_VEC_ACL: 3547 case ICE_SID_FLD_VEC_PE: 3548 es = (struct ice_sw_fv_section *)sect; 3549 src = (u8 *)es->fv; 3550 sect_len = (u32)(LE16_TO_CPU(es->count) * 3551 hw->blk[block_id].es.fvw) * 3552 sizeof(*hw->blk[block_id].es.t); 3553 dst = (u8 *)hw->blk[block_id].es.t; 3554 dst_len = (u32)(hw->blk[block_id].es.count * 3555 hw->blk[block_id].es.fvw) * 3556 sizeof(*hw->blk[block_id].es.t); 3557 break; 3558 default: 3559 return; 3560 } 3561 3562 /* if the section offset exceeds destination length, terminate 3563 * table fill. 3564 */ 3565 if (offset > dst_len) 3566 return; 3567 3568 /* if the sum of section size and offset exceed destination size 3569 * then we are out of bounds of the HW table size for that PF. 3570 * Changing section length to fill the remaining table space 3571 * of that PF. 3572 */ 3573 if ((offset + sect_len) > dst_len) 3574 sect_len = dst_len - offset; 3575 3576 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); 3577 offset += sect_len; 3578 sect = ice_pkg_enum_section(NULL, &state, sid); 3579 } 3580 } 3581 3582 /** 3583 * ice_fill_blk_tbls - Read package context for tables 3584 * @hw: pointer to the hardware structure 3585 * 3586 * Reads the current package contents and populates the driver 3587 * database with the data iteratively for all advanced feature 3588 * blocks. Assume that the HW tables have been allocated. 3589 */ 3590 void ice_fill_blk_tbls(struct ice_hw *hw) 3591 { 3592 u8 i; 3593 3594 for (i = 0; i < ICE_BLK_COUNT; i++) { 3595 enum ice_block blk_id = (enum ice_block)i; 3596 3597 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); 3598 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); 3599 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); 3600 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); 3601 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); 3602 } 3603 3604 ice_init_sw_db(hw); 3605 } 3606 3607 /** 3608 * ice_free_prof_map - free profile map 3609 * @hw: pointer to the hardware structure 3610 * @blk_idx: HW block index 3611 */ 3612 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) 3613 { 3614 struct ice_es *es = &hw->blk[blk_idx].es; 3615 struct ice_prof_map *del, *tmp; 3616 3617 ice_acquire_lock(&es->prof_map_lock); 3618 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map, 3619 ice_prof_map, list) { 3620 LIST_DEL(&del->list); 3621 ice_free(hw, del); 3622 } 3623 INIT_LIST_HEAD(&es->prof_map); 3624 ice_release_lock(&es->prof_map_lock); 3625 } 3626 3627 /** 3628 * ice_free_flow_profs - free flow profile entries 3629 * @hw: pointer to the hardware structure 3630 * @blk_idx: HW block index 3631 */ 3632 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) 3633 { 3634 struct ice_flow_prof *p, *tmp; 3635 3636 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]); 3637 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx], 3638 ice_flow_prof, l_entry) { 3639 LIST_DEL(&p->l_entry); 3640 3641 ice_free(hw, p); 3642 } 3643 ice_release_lock(&hw->fl_profs_locks[blk_idx]); 3644 3645 /* if driver is in reset and tables are being cleared 3646 * re-initialize the flow profile list heads 3647 */ 3648 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 3649 } 3650 3651 /** 3652 * ice_free_vsig_tbl - free complete VSIG table entries 3653 * @hw: pointer to the hardware structure 3654 * @blk: the HW block on which to free the VSIG table entries 3655 */ 3656 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) 3657 { 3658 u16 i; 3659 3660 if (!hw->blk[blk].xlt2.vsig_tbl) 3661 return; 3662 3663 for (i = 1; i < ICE_MAX_VSIGS; i++) 3664 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) 3665 ice_vsig_free(hw, blk, i); 3666 } 3667 3668 /** 3669 * ice_free_hw_tbls - free hardware table memory 3670 * @hw: pointer to the hardware structure 3671 */ 3672 void ice_free_hw_tbls(struct ice_hw *hw) 3673 { 3674 struct ice_rss_cfg *r, *rt; 3675 u8 i; 3676 3677 for (i = 0; i < ICE_BLK_COUNT; i++) { 3678 if (hw->blk[i].is_list_init) { 3679 struct ice_es *es = &hw->blk[i].es; 3680 3681 ice_free_prof_map(hw, i); 3682 ice_destroy_lock(&es->prof_map_lock); 3683 3684 ice_free_flow_profs(hw, i); 3685 ice_destroy_lock(&hw->fl_profs_locks[i]); 3686 3687 hw->blk[i].is_list_init = false; 3688 } 3689 ice_free_vsig_tbl(hw, (enum ice_block)i); 3690 ice_free(hw, hw->blk[i].xlt1.ptypes); 3691 ice_free(hw, hw->blk[i].xlt1.ptg_tbl); 3692 ice_free(hw, hw->blk[i].xlt1.t); 3693 ice_free(hw, hw->blk[i].xlt2.t); 3694 ice_free(hw, hw->blk[i].xlt2.vsig_tbl); 3695 ice_free(hw, hw->blk[i].xlt2.vsis); 3696 ice_free(hw, hw->blk[i].prof.t); 3697 ice_free(hw, hw->blk[i].prof_redir.t); 3698 ice_free(hw, hw->blk[i].es.t); 3699 ice_free(hw, hw->blk[i].es.ref_count); 3700 ice_free(hw, hw->blk[i].es.written); 3701 } 3702 3703 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head, 3704 ice_rss_cfg, l_entry) { 3705 LIST_DEL(&r->l_entry); 3706 ice_free(hw, r); 3707 } 3708 ice_destroy_lock(&hw->rss_locks); 3709 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM); 3710 } 3711 3712 /** 3713 * ice_init_flow_profs - init flow profile locks and list heads 3714 * @hw: pointer to the hardware structure 3715 * @blk_idx: HW block index 3716 */ 3717 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) 3718 { 3719 ice_init_lock(&hw->fl_profs_locks[blk_idx]); 3720 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); 3721 } 3722 3723 /** 3724 * ice_clear_hw_tbls - clear HW tables and flow profiles 3725 * @hw: pointer to the hardware structure 3726 */ 3727 void ice_clear_hw_tbls(struct ice_hw *hw) 3728 { 3729 u8 i; 3730 3731 for (i = 0; i < ICE_BLK_COUNT; i++) { 3732 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 3733 struct ice_prof_tcam *prof = &hw->blk[i].prof; 3734 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 3735 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 3736 struct ice_es *es = &hw->blk[i].es; 3737 3738 if (hw->blk[i].is_list_init) { 3739 ice_free_prof_map(hw, i); 3740 ice_free_flow_profs(hw, i); 3741 } 3742 3743 ice_free_vsig_tbl(hw, (enum ice_block)i); 3744 3745 ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes), 3746 ICE_NONDMA_MEM); 3747 ice_memset(xlt1->ptg_tbl, 0, 3748 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), 3749 ICE_NONDMA_MEM); 3750 ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), 3751 ICE_NONDMA_MEM); 3752 3753 ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis), 3754 ICE_NONDMA_MEM); 3755 ice_memset(xlt2->vsig_tbl, 0, 3756 xlt2->count * sizeof(*xlt2->vsig_tbl), 3757 ICE_NONDMA_MEM); 3758 ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), 3759 ICE_NONDMA_MEM); 3760 3761 ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), 3762 ICE_NONDMA_MEM); 3763 ice_memset(prof_redir->t, 0, 3764 prof_redir->count * sizeof(*prof_redir->t), 3765 ICE_NONDMA_MEM); 3766 3767 ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw, 3768 ICE_NONDMA_MEM); 3769 ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count), 3770 ICE_NONDMA_MEM); 3771 ice_memset(es->written, 0, es->count * sizeof(*es->written), 3772 ICE_NONDMA_MEM); 3773 } 3774 } 3775 3776 /** 3777 * ice_init_hw_tbls - init hardware table memory 3778 * @hw: pointer to the hardware structure 3779 */ 3780 enum ice_status ice_init_hw_tbls(struct ice_hw *hw) 3781 { 3782 u8 i; 3783 3784 ice_init_lock(&hw->rss_locks); 3785 INIT_LIST_HEAD(&hw->rss_list_head); 3786 for (i = 0; i < ICE_BLK_COUNT; i++) { 3787 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; 3788 struct ice_prof_tcam *prof = &hw->blk[i].prof; 3789 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; 3790 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; 3791 struct ice_es *es = &hw->blk[i].es; 3792 u16 j; 3793 3794 if (hw->blk[i].is_list_init) 3795 continue; 3796 3797 ice_init_flow_profs(hw, i); 3798 ice_init_lock(&es->prof_map_lock); 3799 INIT_LIST_HEAD(&es->prof_map); 3800 hw->blk[i].is_list_init = true; 3801 3802 hw->blk[i].overwrite = blk_sizes[i].overwrite; 3803 es->reverse = blk_sizes[i].reverse; 3804 3805 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; 3806 xlt1->count = blk_sizes[i].xlt1; 3807 3808 xlt1->ptypes = (struct ice_ptg_ptype *) 3809 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); 3810 3811 if (!xlt1->ptypes) 3812 goto err; 3813 3814 xlt1->ptg_tbl = (struct ice_ptg_entry *) 3815 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); 3816 3817 if (!xlt1->ptg_tbl) 3818 goto err; 3819 3820 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); 3821 if (!xlt1->t) 3822 goto err; 3823 3824 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; 3825 xlt2->count = blk_sizes[i].xlt2; 3826 3827 xlt2->vsis = (struct ice_vsig_vsi *) 3828 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); 3829 3830 if (!xlt2->vsis) 3831 goto err; 3832 3833 xlt2->vsig_tbl = (struct ice_vsig_entry *) 3834 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); 3835 if (!xlt2->vsig_tbl) 3836 goto err; 3837 3838 for (j = 0; j < xlt2->count; j++) 3839 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); 3840 3841 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); 3842 if (!xlt2->t) 3843 goto err; 3844 3845 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; 3846 prof->count = blk_sizes[i].prof_tcam; 3847 prof->max_prof_id = blk_sizes[i].prof_id; 3848 prof->cdid_bits = blk_sizes[i].prof_cdid_bits; 3849 prof->t = (struct ice_prof_tcam_entry *) 3850 ice_calloc(hw, prof->count, sizeof(*prof->t)); 3851 3852 if (!prof->t) 3853 goto err; 3854 3855 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; 3856 prof_redir->count = blk_sizes[i].prof_redir; 3857 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, 3858 sizeof(*prof_redir->t)); 3859 3860 if (!prof_redir->t) 3861 goto err; 3862 3863 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; 3864 es->count = blk_sizes[i].es; 3865 es->fvw = blk_sizes[i].fvw; 3866 es->t = (struct ice_fv_word *) 3867 ice_calloc(hw, (u32)(es->count * es->fvw), 3868 sizeof(*es->t)); 3869 if (!es->t) 3870 goto err; 3871 3872 es->ref_count = (u16 *) 3873 ice_calloc(hw, es->count, sizeof(*es->ref_count)); 3874 3875 if (!es->ref_count) 3876 goto err; 3877 3878 es->written = (u8 *) 3879 ice_calloc(hw, es->count, sizeof(*es->written)); 3880 3881 if (!es->written) 3882 goto err; 3883 3884 } 3885 return ICE_SUCCESS; 3886 3887 err: 3888 ice_free_hw_tbls(hw); 3889 return ICE_ERR_NO_MEMORY; 3890 } 3891 3892 /** 3893 * ice_prof_gen_key - generate profile ID key 3894 * @hw: pointer to the HW struct 3895 * @blk: the block in which to write profile ID to 3896 * @ptg: packet type group (PTG) portion of key 3897 * @vsig: VSIG portion of key 3898 * @cdid: CDID portion of key 3899 * @flags: flag portion of key 3900 * @vl_msk: valid mask 3901 * @dc_msk: don't care mask 3902 * @nm_msk: never match mask 3903 * @key: output of profile ID key 3904 */ 3905 static enum ice_status 3906 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, 3907 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 3908 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], 3909 u8 key[ICE_TCAM_KEY_SZ]) 3910 { 3911 struct ice_prof_id_key inkey; 3912 3913 inkey.xlt1 = ptg; 3914 inkey.xlt2_cdid = CPU_TO_LE16(vsig); 3915 inkey.flags = CPU_TO_LE16(flags); 3916 3917 switch (hw->blk[blk].prof.cdid_bits) { 3918 case 0: 3919 break; 3920 case 2: 3921 #define ICE_CD_2_M 0xC000U 3922 #define ICE_CD_2_S 14 3923 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M); 3924 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S); 3925 break; 3926 case 4: 3927 #define ICE_CD_4_M 0xF000U 3928 #define ICE_CD_4_S 12 3929 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M); 3930 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S); 3931 break; 3932 case 8: 3933 #define ICE_CD_8_M 0xFF00U 3934 #define ICE_CD_8_S 16 3935 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M); 3936 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S); 3937 break; 3938 default: 3939 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); 3940 break; 3941 } 3942 3943 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, 3944 nm_msk, 0, ICE_TCAM_KEY_SZ / 2); 3945 } 3946 3947 /** 3948 * ice_tcam_write_entry - write TCAM entry 3949 * @hw: pointer to the HW struct 3950 * @blk: the block in which to write profile ID to 3951 * @idx: the entry index to write to 3952 * @prof_id: profile ID 3953 * @ptg: packet type group (PTG) portion of key 3954 * @vsig: VSIG portion of key 3955 * @cdid: CDID portion of key 3956 * @flags: flag portion of key 3957 * @vl_msk: valid mask 3958 * @dc_msk: don't care mask 3959 * @nm_msk: never match mask 3960 */ 3961 static enum ice_status 3962 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, 3963 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, 3964 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], 3965 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], 3966 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) 3967 { 3968 struct ice_prof_tcam_entry; 3969 enum ice_status status; 3970 3971 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, 3972 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); 3973 if (!status) { 3974 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx); 3975 hw->blk[blk].prof.t[idx].prof_id = prof_id; 3976 } 3977 3978 return status; 3979 } 3980 3981 /** 3982 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG 3983 * @hw: pointer to the hardware structure 3984 * @blk: HW block 3985 * @vsig: VSIG to query 3986 * @refs: pointer to variable to receive the reference count 3987 */ 3988 static enum ice_status 3989 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) 3990 { 3991 u16 idx = vsig & ICE_VSIG_IDX_M; 3992 struct ice_vsig_vsi *ptr; 3993 3994 *refs = 0; 3995 3996 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) 3997 return ICE_ERR_DOES_NOT_EXIST; 3998 3999 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 4000 while (ptr) { 4001 (*refs)++; 4002 ptr = ptr->next_vsi; 4003 } 4004 4005 return ICE_SUCCESS; 4006 } 4007 4008 /** 4009 * ice_has_prof_vsig - check to see if VSIG has a specific profile 4010 * @hw: pointer to the hardware structure 4011 * @blk: HW block 4012 * @vsig: VSIG to check against 4013 * @hdl: profile handle 4014 */ 4015 static bool 4016 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) 4017 { 4018 u16 idx = vsig & ICE_VSIG_IDX_M; 4019 struct ice_vsig_prof *ent; 4020 4021 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4022 ice_vsig_prof, list) 4023 if (ent->profile_cookie == hdl) 4024 return true; 4025 4026 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", 4027 vsig); 4028 return false; 4029 } 4030 4031 /** 4032 * ice_prof_bld_es - build profile ID extraction sequence changes 4033 * @hw: pointer to the HW struct 4034 * @blk: hardware block 4035 * @bld: the update package buffer build to add to 4036 * @chgs: the list of changes to make in hardware 4037 */ 4038 static enum ice_status 4039 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, 4040 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) 4041 { 4042 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); 4043 struct ice_chs_chg *tmp; 4044 4045 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 4046 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { 4047 u16 off = tmp->prof_id * hw->blk[blk].es.fvw; 4048 struct ice_pkg_es *p; 4049 u32 id; 4050 4051 id = ice_sect_id(blk, ICE_VEC_TBL); 4052 p = (struct ice_pkg_es *) 4053 ice_pkg_buf_alloc_section(bld, id, 4054 ice_struct_size(p, es, 4055 1) + 4056 vec_size - 4057 sizeof(p->es[0])); 4058 4059 if (!p) 4060 return ICE_ERR_MAX_LIMIT; 4061 4062 p->count = CPU_TO_LE16(1); 4063 p->offset = CPU_TO_LE16(tmp->prof_id); 4064 4065 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size, 4066 ICE_NONDMA_TO_NONDMA); 4067 } 4068 4069 return ICE_SUCCESS; 4070 } 4071 4072 /** 4073 * ice_prof_bld_tcam - build profile ID TCAM changes 4074 * @hw: pointer to the HW struct 4075 * @blk: hardware block 4076 * @bld: the update package buffer build to add to 4077 * @chgs: the list of changes to make in hardware 4078 */ 4079 static enum ice_status 4080 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, 4081 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) 4082 { 4083 struct ice_chs_chg *tmp; 4084 4085 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 4086 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { 4087 struct ice_prof_id_section *p; 4088 u32 id; 4089 4090 id = ice_sect_id(blk, ICE_PROF_TCAM); 4091 p = (struct ice_prof_id_section *) 4092 ice_pkg_buf_alloc_section(bld, id, 4093 ice_struct_size(p, 4094 entry, 4095 1)); 4096 4097 if (!p) 4098 return ICE_ERR_MAX_LIMIT; 4099 4100 p->count = CPU_TO_LE16(1); 4101 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx); 4102 p->entry[0].prof_id = tmp->prof_id; 4103 4104 ice_memcpy(p->entry[0].key, 4105 &hw->blk[blk].prof.t[tmp->tcam_idx].key, 4106 sizeof(hw->blk[blk].prof.t->key), 4107 ICE_NONDMA_TO_NONDMA); 4108 } 4109 4110 return ICE_SUCCESS; 4111 } 4112 4113 /** 4114 * ice_prof_bld_xlt1 - build XLT1 changes 4115 * @blk: hardware block 4116 * @bld: the update package buffer build to add to 4117 * @chgs: the list of changes to make in hardware 4118 */ 4119 static enum ice_status 4120 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, 4121 struct LIST_HEAD_TYPE *chgs) 4122 { 4123 struct ice_chs_chg *tmp; 4124 4125 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) 4126 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { 4127 struct ice_xlt1_section *p; 4128 u32 id; 4129 4130 id = ice_sect_id(blk, ICE_XLT1); 4131 p = (struct ice_xlt1_section *) 4132 ice_pkg_buf_alloc_section(bld, id, 4133 ice_struct_size(p, 4134 value, 4135 1)); 4136 4137 if (!p) 4138 return ICE_ERR_MAX_LIMIT; 4139 4140 p->count = CPU_TO_LE16(1); 4141 p->offset = CPU_TO_LE16(tmp->ptype); 4142 p->value[0] = tmp->ptg; 4143 } 4144 4145 return ICE_SUCCESS; 4146 } 4147 4148 /** 4149 * ice_prof_bld_xlt2 - build XLT2 changes 4150 * @blk: hardware block 4151 * @bld: the update package buffer build to add to 4152 * @chgs: the list of changes to make in hardware 4153 */ 4154 static enum ice_status 4155 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, 4156 struct LIST_HEAD_TYPE *chgs) 4157 { 4158 struct ice_chs_chg *tmp; 4159 4160 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { 4161 struct ice_xlt2_section *p; 4162 u32 id; 4163 4164 switch (tmp->type) { 4165 case ICE_VSIG_ADD: 4166 case ICE_VSI_MOVE: 4167 case ICE_VSIG_REM: 4168 id = ice_sect_id(blk, ICE_XLT2); 4169 p = (struct ice_xlt2_section *) 4170 ice_pkg_buf_alloc_section(bld, id, 4171 ice_struct_size(p, 4172 value, 4173 1)); 4174 4175 if (!p) 4176 return ICE_ERR_MAX_LIMIT; 4177 4178 p->count = CPU_TO_LE16(1); 4179 p->offset = CPU_TO_LE16(tmp->vsi); 4180 p->value[0] = CPU_TO_LE16(tmp->vsig); 4181 break; 4182 default: 4183 break; 4184 } 4185 } 4186 4187 return ICE_SUCCESS; 4188 } 4189 4190 /** 4191 * ice_upd_prof_hw - update hardware using the change list 4192 * @hw: pointer to the HW struct 4193 * @blk: hardware block 4194 * @chgs: the list of changes to make in hardware 4195 */ 4196 static enum ice_status 4197 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, 4198 struct LIST_HEAD_TYPE *chgs) 4199 { 4200 struct ice_buf_build *b; 4201 struct ice_chs_chg *tmp; 4202 enum ice_status status; 4203 u16 pkg_sects; 4204 u16 xlt1 = 0; 4205 u16 xlt2 = 0; 4206 u16 tcam = 0; 4207 u16 es = 0; 4208 u16 sects; 4209 4210 /* count number of sections we need */ 4211 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { 4212 switch (tmp->type) { 4213 case ICE_PTG_ES_ADD: 4214 if (tmp->add_ptg) 4215 xlt1++; 4216 if (tmp->add_prof) 4217 es++; 4218 break; 4219 case ICE_TCAM_ADD: 4220 tcam++; 4221 break; 4222 case ICE_VSIG_ADD: 4223 case ICE_VSI_MOVE: 4224 case ICE_VSIG_REM: 4225 xlt2++; 4226 break; 4227 default: 4228 break; 4229 } 4230 } 4231 sects = xlt1 + xlt2 + tcam + es; 4232 4233 if (!sects) 4234 return ICE_SUCCESS; 4235 4236 /* Build update package buffer */ 4237 b = ice_pkg_buf_alloc(hw); 4238 if (!b) 4239 return ICE_ERR_NO_MEMORY; 4240 4241 status = ice_pkg_buf_reserve_section(b, sects); 4242 if (status) 4243 goto error_tmp; 4244 4245 /* Preserve order of table update: ES, TCAM, PTG, VSIG */ 4246 if (es) { 4247 status = ice_prof_bld_es(hw, blk, b, chgs); 4248 if (status) 4249 goto error_tmp; 4250 } 4251 4252 if (tcam) { 4253 status = ice_prof_bld_tcam(hw, blk, b, chgs); 4254 if (status) 4255 goto error_tmp; 4256 } 4257 4258 if (xlt1) { 4259 status = ice_prof_bld_xlt1(blk, b, chgs); 4260 if (status) 4261 goto error_tmp; 4262 } 4263 4264 if (xlt2) { 4265 status = ice_prof_bld_xlt2(blk, b, chgs); 4266 if (status) 4267 goto error_tmp; 4268 } 4269 4270 /* After package buffer build check if the section count in buffer is 4271 * non-zero and matches the number of sections detected for package 4272 * update. 4273 */ 4274 pkg_sects = ice_pkg_buf_get_active_sections(b); 4275 if (!pkg_sects || pkg_sects != sects) { 4276 status = ICE_ERR_INVAL_SIZE; 4277 goto error_tmp; 4278 } 4279 4280 /* update package */ 4281 status = ice_update_pkg(hw, ice_pkg_buf(b), 1); 4282 if (status == ICE_ERR_AQ_ERROR) 4283 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); 4284 4285 error_tmp: 4286 ice_pkg_buf_free(hw, b); 4287 return status; 4288 } 4289 4290 /** 4291 * ice_add_prof - add profile 4292 * @hw: pointer to the HW struct 4293 * @blk: hardware block 4294 * @id: profile tracking ID 4295 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) 4296 * @es: extraction sequence (length of array is determined by the block) 4297 * 4298 * This function registers a profile, which matches a set of PTGs with a 4299 * particular extraction sequence. While the hardware profile is allocated 4300 * it will not be written until the first call to ice_add_flow that specifies 4301 * the ID value used here. 4302 */ 4303 enum ice_status 4304 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], 4305 struct ice_fv_word *es) 4306 { 4307 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); 4308 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); 4309 struct ice_prof_map *prof; 4310 enum ice_status status; 4311 u8 byte = 0; 4312 u8 prof_id; 4313 4314 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); 4315 4316 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4317 4318 /* search for existing profile */ 4319 status = ice_find_prof_id(hw, blk, es, &prof_id); 4320 if (status) { 4321 /* allocate profile ID */ 4322 status = ice_alloc_prof_id(hw, blk, &prof_id); 4323 if (status) 4324 goto err_ice_add_prof; 4325 4326 /* and write new es */ 4327 ice_write_es(hw, blk, prof_id, es); 4328 } 4329 4330 ice_prof_inc_ref(hw, blk, prof_id); 4331 4332 /* add profile info */ 4333 4334 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof)); 4335 if (!prof) 4336 goto err_ice_add_prof; 4337 4338 prof->profile_cookie = id; 4339 prof->prof_id = prof_id; 4340 prof->ptg_cnt = 0; 4341 prof->context = 0; 4342 4343 /* build list of ptgs */ 4344 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { 4345 u8 bit; 4346 4347 if (!ptypes[byte]) { 4348 bytes--; 4349 byte++; 4350 continue; 4351 } 4352 4353 /* Examine 8 bits per byte */ 4354 ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte], 4355 BITS_PER_BYTE) { 4356 u16 ptype; 4357 u8 ptg; 4358 4359 ptype = byte * BITS_PER_BYTE + bit; 4360 4361 /* The package should place all ptypes in a non-zero 4362 * PTG, so the following call should never fail. 4363 */ 4364 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) 4365 continue; 4366 4367 /* If PTG is already added, skip and continue */ 4368 if (ice_is_bit_set(ptgs_used, ptg)) 4369 continue; 4370 4371 ice_set_bit(ptg, ptgs_used); 4372 prof->ptg[prof->ptg_cnt] = ptg; 4373 4374 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) 4375 break; 4376 } 4377 4378 bytes--; 4379 byte++; 4380 } 4381 4382 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map); 4383 status = ICE_SUCCESS; 4384 4385 err_ice_add_prof: 4386 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4387 return status; 4388 } 4389 4390 /** 4391 * ice_search_prof_id - Search for a profile tracking ID 4392 * @hw: pointer to the HW struct 4393 * @blk: hardware block 4394 * @id: profile tracking ID 4395 * 4396 * This will search for a profile tracking ID which was previously added. 4397 * The profile map lock should be held before calling this function. 4398 */ 4399 struct ice_prof_map * 4400 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) 4401 { 4402 struct ice_prof_map *entry = NULL; 4403 struct ice_prof_map *map; 4404 4405 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list) 4406 if (map->profile_cookie == id) { 4407 entry = map; 4408 break; 4409 } 4410 4411 return entry; 4412 } 4413 4414 /** 4415 * ice_set_prof_context - Set context for a given profile 4416 * @hw: pointer to the HW struct 4417 * @blk: hardware block 4418 * @id: profile tracking ID 4419 * @cntxt: context 4420 */ 4421 enum ice_status 4422 ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt) 4423 { 4424 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 4425 struct ice_prof_map *entry; 4426 4427 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4428 entry = ice_search_prof_id(hw, blk, id); 4429 if (entry) { 4430 entry->context = cntxt; 4431 status = ICE_SUCCESS; 4432 } 4433 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4434 return status; 4435 } 4436 4437 /** 4438 * ice_get_prof_context - Get context for a given profile 4439 * @hw: pointer to the HW struct 4440 * @blk: hardware block 4441 * @id: profile tracking ID 4442 * @cntxt: pointer to variable to receive the context 4443 */ 4444 enum ice_status 4445 ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt) 4446 { 4447 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 4448 struct ice_prof_map *entry; 4449 4450 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4451 entry = ice_search_prof_id(hw, blk, id); 4452 if (entry) { 4453 *cntxt = entry->context; 4454 status = ICE_SUCCESS; 4455 } 4456 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4457 return status; 4458 } 4459 4460 /** 4461 * ice_vsig_prof_id_count - count profiles in a VSIG 4462 * @hw: pointer to the HW struct 4463 * @blk: hardware block 4464 * @vsig: VSIG to remove the profile from 4465 */ 4466 static u16 4467 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) 4468 { 4469 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; 4470 struct ice_vsig_prof *p; 4471 4472 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4473 ice_vsig_prof, list) 4474 count++; 4475 4476 return count; 4477 } 4478 4479 /** 4480 * ice_rel_tcam_idx - release a TCAM index 4481 * @hw: pointer to the HW struct 4482 * @blk: hardware block 4483 * @idx: the index to release 4484 */ 4485 static enum ice_status 4486 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) 4487 { 4488 /* Masks to invoke a never match entry */ 4489 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4490 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; 4491 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; 4492 enum ice_status status; 4493 4494 /* write the TCAM entry */ 4495 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, 4496 dc_msk, nm_msk); 4497 if (status) 4498 return status; 4499 4500 /* release the TCAM entry */ 4501 status = ice_free_tcam_ent(hw, blk, idx); 4502 4503 return status; 4504 } 4505 4506 /** 4507 * ice_rem_prof_id - remove one profile from a VSIG 4508 * @hw: pointer to the HW struct 4509 * @blk: hardware block 4510 * @prof: pointer to profile structure to remove 4511 */ 4512 static enum ice_status 4513 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, 4514 struct ice_vsig_prof *prof) 4515 { 4516 enum ice_status status; 4517 u16 i; 4518 4519 for (i = 0; i < prof->tcam_count; i++) 4520 if (prof->tcam[i].in_use) { 4521 prof->tcam[i].in_use = false; 4522 status = ice_rel_tcam_idx(hw, blk, 4523 prof->tcam[i].tcam_idx); 4524 if (status) 4525 return ICE_ERR_HW_TABLE; 4526 } 4527 4528 return ICE_SUCCESS; 4529 } 4530 4531 /** 4532 * ice_rem_vsig - remove VSIG 4533 * @hw: pointer to the HW struct 4534 * @blk: hardware block 4535 * @vsig: the VSIG to remove 4536 * @chg: the change list 4537 */ 4538 static enum ice_status 4539 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4540 struct LIST_HEAD_TYPE *chg) 4541 { 4542 u16 idx = vsig & ICE_VSIG_IDX_M; 4543 struct ice_vsig_vsi *vsi_cur; 4544 struct ice_vsig_prof *d, *t; 4545 enum ice_status status; 4546 4547 /* remove TCAM entries */ 4548 LIST_FOR_EACH_ENTRY_SAFE(d, t, 4549 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4550 ice_vsig_prof, list) { 4551 status = ice_rem_prof_id(hw, blk, d); 4552 if (status) 4553 return status; 4554 4555 LIST_DEL(&d->list); 4556 ice_free(hw, d); 4557 } 4558 4559 /* Move all VSIS associated with this VSIG to the default VSIG */ 4560 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; 4561 /* If the VSIG has at least 1 VSI then iterate through the list 4562 * and remove the VSIs before deleting the group. 4563 */ 4564 if (vsi_cur) 4565 do { 4566 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; 4567 struct ice_chs_chg *p; 4568 4569 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4570 if (!p) 4571 return ICE_ERR_NO_MEMORY; 4572 4573 p->type = ICE_VSIG_REM; 4574 p->orig_vsig = vsig; 4575 p->vsig = ICE_DEFAULT_VSIG; 4576 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; 4577 4578 LIST_ADD(&p->list_entry, chg); 4579 4580 vsi_cur = tmp; 4581 } while (vsi_cur); 4582 4583 return ice_vsig_free(hw, blk, vsig); 4584 } 4585 4586 /** 4587 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG 4588 * @hw: pointer to the HW struct 4589 * @blk: hardware block 4590 * @vsig: VSIG to remove the profile from 4591 * @hdl: profile handle indicating which profile to remove 4592 * @chg: list to receive a record of changes 4593 */ 4594 static enum ice_status 4595 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 4596 struct LIST_HEAD_TYPE *chg) 4597 { 4598 u16 idx = vsig & ICE_VSIG_IDX_M; 4599 struct ice_vsig_prof *p, *t; 4600 enum ice_status status; 4601 4602 LIST_FOR_EACH_ENTRY_SAFE(p, t, 4603 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4604 ice_vsig_prof, list) 4605 if (p->profile_cookie == hdl) { 4606 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) 4607 /* this is the last profile, remove the VSIG */ 4608 return ice_rem_vsig(hw, blk, vsig, chg); 4609 4610 status = ice_rem_prof_id(hw, blk, p); 4611 if (!status) { 4612 LIST_DEL(&p->list); 4613 ice_free(hw, p); 4614 } 4615 return status; 4616 } 4617 4618 return ICE_ERR_DOES_NOT_EXIST; 4619 } 4620 4621 /** 4622 * ice_rem_flow_all - remove all flows with a particular profile 4623 * @hw: pointer to the HW struct 4624 * @blk: hardware block 4625 * @id: profile tracking ID 4626 */ 4627 static enum ice_status 4628 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) 4629 { 4630 struct ice_chs_chg *del, *tmp; 4631 enum ice_status status; 4632 struct LIST_HEAD_TYPE chg; 4633 u16 i; 4634 4635 INIT_LIST_HEAD(&chg); 4636 4637 for (i = 1; i < ICE_MAX_VSIGS; i++) 4638 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { 4639 if (ice_has_prof_vsig(hw, blk, i, id)) { 4640 status = ice_rem_prof_id_vsig(hw, blk, i, id, 4641 &chg); 4642 if (status) 4643 goto err_ice_rem_flow_all; 4644 } 4645 } 4646 4647 status = ice_upd_prof_hw(hw, blk, &chg); 4648 4649 err_ice_rem_flow_all: 4650 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 4651 LIST_DEL(&del->list_entry); 4652 ice_free(hw, del); 4653 } 4654 4655 return status; 4656 } 4657 4658 /** 4659 * ice_rem_prof - remove profile 4660 * @hw: pointer to the HW struct 4661 * @blk: hardware block 4662 * @id: profile tracking ID 4663 * 4664 * This will remove the profile specified by the ID parameter, which was 4665 * previously created through ice_add_prof. If any existing entries 4666 * are associated with this profile, they will be removed as well. 4667 */ 4668 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) 4669 { 4670 struct ice_prof_map *pmap; 4671 enum ice_status status; 4672 4673 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4674 4675 pmap = ice_search_prof_id(hw, blk, id); 4676 if (!pmap) { 4677 status = ICE_ERR_DOES_NOT_EXIST; 4678 goto err_ice_rem_prof; 4679 } 4680 4681 /* remove all flows with this profile */ 4682 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); 4683 if (status) 4684 goto err_ice_rem_prof; 4685 4686 /* dereference profile, and possibly remove */ 4687 ice_prof_dec_ref(hw, blk, pmap->prof_id); 4688 4689 LIST_DEL(&pmap->list); 4690 ice_free(hw, pmap); 4691 4692 err_ice_rem_prof: 4693 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4694 return status; 4695 } 4696 4697 /** 4698 * ice_get_prof - get profile 4699 * @hw: pointer to the HW struct 4700 * @blk: hardware block 4701 * @hdl: profile handle 4702 * @chg: change list 4703 */ 4704 static enum ice_status 4705 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, 4706 struct LIST_HEAD_TYPE *chg) 4707 { 4708 enum ice_status status = ICE_SUCCESS; 4709 struct ice_prof_map *map; 4710 struct ice_chs_chg *p; 4711 u16 i; 4712 4713 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4714 /* Get the details on the profile specified by the handle ID */ 4715 map = ice_search_prof_id(hw, blk, hdl); 4716 if (!map) { 4717 status = ICE_ERR_DOES_NOT_EXIST; 4718 goto err_ice_get_prof; 4719 } 4720 4721 for (i = 0; i < map->ptg_cnt; i++) 4722 if (!hw->blk[blk].es.written[map->prof_id]) { 4723 /* add ES to change list */ 4724 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4725 if (!p) { 4726 status = ICE_ERR_NO_MEMORY; 4727 goto err_ice_get_prof; 4728 } 4729 4730 p->type = ICE_PTG_ES_ADD; 4731 p->ptype = 0; 4732 p->ptg = map->ptg[i]; 4733 p->add_ptg = 0; 4734 4735 p->add_prof = 1; 4736 p->prof_id = map->prof_id; 4737 4738 hw->blk[blk].es.written[map->prof_id] = true; 4739 4740 LIST_ADD(&p->list_entry, chg); 4741 } 4742 4743 err_ice_get_prof: 4744 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4745 /* let caller clean up the change list */ 4746 return status; 4747 } 4748 4749 /** 4750 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG 4751 * @hw: pointer to the HW struct 4752 * @blk: hardware block 4753 * @vsig: VSIG from which to copy the list 4754 * @lst: output list 4755 * 4756 * This routine makes a copy of the list of profiles in the specified VSIG. 4757 */ 4758 static enum ice_status 4759 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4760 struct LIST_HEAD_TYPE *lst) 4761 { 4762 struct ice_vsig_prof *ent1, *ent2; 4763 u16 idx = vsig & ICE_VSIG_IDX_M; 4764 4765 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4766 ice_vsig_prof, list) { 4767 struct ice_vsig_prof *p; 4768 4769 /* copy to the input list */ 4770 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p), 4771 ICE_NONDMA_TO_NONDMA); 4772 if (!p) 4773 goto err_ice_get_profs_vsig; 4774 4775 LIST_ADD_TAIL(&p->list, lst); 4776 } 4777 4778 return ICE_SUCCESS; 4779 4780 err_ice_get_profs_vsig: 4781 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) { 4782 LIST_DEL(&ent1->list); 4783 ice_free(hw, ent1); 4784 } 4785 4786 return ICE_ERR_NO_MEMORY; 4787 } 4788 4789 /** 4790 * ice_add_prof_to_lst - add profile entry to a list 4791 * @hw: pointer to the HW struct 4792 * @blk: hardware block 4793 * @lst: the list to be added to 4794 * @hdl: profile handle of entry to add 4795 */ 4796 static enum ice_status 4797 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, 4798 struct LIST_HEAD_TYPE *lst, u64 hdl) 4799 { 4800 enum ice_status status = ICE_SUCCESS; 4801 struct ice_prof_map *map; 4802 struct ice_vsig_prof *p; 4803 u16 i; 4804 4805 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 4806 map = ice_search_prof_id(hw, blk, hdl); 4807 if (!map) { 4808 status = ICE_ERR_DOES_NOT_EXIST; 4809 goto err_ice_add_prof_to_lst; 4810 } 4811 4812 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p)); 4813 if (!p) { 4814 status = ICE_ERR_NO_MEMORY; 4815 goto err_ice_add_prof_to_lst; 4816 } 4817 4818 p->profile_cookie = map->profile_cookie; 4819 p->prof_id = map->prof_id; 4820 p->tcam_count = map->ptg_cnt; 4821 4822 for (i = 0; i < map->ptg_cnt; i++) { 4823 p->tcam[i].prof_id = map->prof_id; 4824 p->tcam[i].tcam_idx = ICE_INVALID_TCAM; 4825 p->tcam[i].ptg = map->ptg[i]; 4826 } 4827 4828 LIST_ADD(&p->list, lst); 4829 4830 err_ice_add_prof_to_lst: 4831 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 4832 return status; 4833 } 4834 4835 /** 4836 * ice_move_vsi - move VSI to another VSIG 4837 * @hw: pointer to the HW struct 4838 * @blk: hardware block 4839 * @vsi: the VSI to move 4840 * @vsig: the VSIG to move the VSI to 4841 * @chg: the change list 4842 */ 4843 static enum ice_status 4844 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, 4845 struct LIST_HEAD_TYPE *chg) 4846 { 4847 enum ice_status status; 4848 struct ice_chs_chg *p; 4849 u16 orig_vsig; 4850 4851 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4852 if (!p) 4853 return ICE_ERR_NO_MEMORY; 4854 4855 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); 4856 if (!status) 4857 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); 4858 4859 if (status) { 4860 ice_free(hw, p); 4861 return status; 4862 } 4863 4864 p->type = ICE_VSI_MOVE; 4865 p->vsi = vsi; 4866 p->orig_vsig = orig_vsig; 4867 p->vsig = vsig; 4868 4869 LIST_ADD(&p->list_entry, chg); 4870 4871 return ICE_SUCCESS; 4872 } 4873 4874 /** 4875 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list 4876 * @hw: pointer to the HW struct 4877 * @idx: the index of the TCAM entry to remove 4878 * @chg: the list of change structures to search 4879 */ 4880 static void 4881 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg) 4882 { 4883 struct ice_chs_chg *pos, *tmp; 4884 4885 LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) 4886 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { 4887 LIST_DEL(&tmp->list_entry); 4888 ice_free(hw, tmp); 4889 } 4890 } 4891 4892 /** 4893 * ice_prof_tcam_ena_dis - add enable or disable TCAM change 4894 * @hw: pointer to the HW struct 4895 * @blk: hardware block 4896 * @enable: true to enable, false to disable 4897 * @vsig: the VSIG of the TCAM entry 4898 * @tcam: pointer the TCAM info structure of the TCAM to disable 4899 * @chg: the change list 4900 * 4901 * This function appends an enable or disable TCAM entry in the change log 4902 */ 4903 static enum ice_status 4904 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, 4905 u16 vsig, struct ice_tcam_inf *tcam, 4906 struct LIST_HEAD_TYPE *chg) 4907 { 4908 enum ice_status status; 4909 struct ice_chs_chg *p; 4910 4911 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4912 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 4913 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 4914 4915 /* if disabling, free the TCAM */ 4916 if (!enable) { 4917 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); 4918 4919 /* if we have already created a change for this TCAM entry, then 4920 * we need to remove that entry, in order to prevent writing to 4921 * a TCAM entry we no longer will have ownership of. 4922 */ 4923 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); 4924 tcam->tcam_idx = 0; 4925 tcam->in_use = 0; 4926 return status; 4927 } 4928 4929 /* for re-enabling, reallocate a TCAM */ 4930 status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx); 4931 if (status) 4932 return status; 4933 4934 /* add TCAM to change list */ 4935 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 4936 if (!p) 4937 return ICE_ERR_NO_MEMORY; 4938 4939 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, 4940 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, 4941 nm_msk); 4942 if (status) 4943 goto err_ice_prof_tcam_ena_dis; 4944 4945 tcam->in_use = 1; 4946 4947 p->type = ICE_TCAM_ADD; 4948 p->add_tcam_idx = true; 4949 p->prof_id = tcam->prof_id; 4950 p->ptg = tcam->ptg; 4951 p->vsig = 0; 4952 p->tcam_idx = tcam->tcam_idx; 4953 4954 /* log change */ 4955 LIST_ADD(&p->list_entry, chg); 4956 4957 return ICE_SUCCESS; 4958 4959 err_ice_prof_tcam_ena_dis: 4960 ice_free(hw, p); 4961 return status; 4962 } 4963 4964 /** 4965 * ice_adj_prof_priorities - adjust profile based on priorities 4966 * @hw: pointer to the HW struct 4967 * @blk: hardware block 4968 * @vsig: the VSIG for which to adjust profile priorities 4969 * @chg: the change list 4970 */ 4971 static enum ice_status 4972 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, 4973 struct LIST_HEAD_TYPE *chg) 4974 { 4975 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); 4976 enum ice_status status = ICE_SUCCESS; 4977 struct ice_vsig_prof *t; 4978 u16 idx; 4979 4980 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); 4981 idx = vsig & ICE_VSIG_IDX_M; 4982 4983 /* Priority is based on the order in which the profiles are added. The 4984 * newest added profile has highest priority and the oldest added 4985 * profile has the lowest priority. Since the profile property list for 4986 * a VSIG is sorted from newest to oldest, this code traverses the list 4987 * in order and enables the first of each PTG that it finds (that is not 4988 * already enabled); it also disables any duplicate PTGs that it finds 4989 * in the older profiles (that are currently enabled). 4990 */ 4991 4992 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, 4993 ice_vsig_prof, list) { 4994 u16 i; 4995 4996 for (i = 0; i < t->tcam_count; i++) { 4997 bool used; 4998 4999 /* Scan the priorities from newest to oldest. 5000 * Make sure that the newest profiles take priority. 5001 */ 5002 used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg); 5003 5004 if (used && t->tcam[i].in_use) { 5005 /* need to mark this PTG as never match, as it 5006 * was already in use and therefore duplicate 5007 * (and lower priority) 5008 */ 5009 status = ice_prof_tcam_ena_dis(hw, blk, false, 5010 vsig, 5011 &t->tcam[i], 5012 chg); 5013 if (status) 5014 return status; 5015 } else if (!used && !t->tcam[i].in_use) { 5016 /* need to enable this PTG, as it in not in use 5017 * and not enabled (highest priority) 5018 */ 5019 status = ice_prof_tcam_ena_dis(hw, blk, true, 5020 vsig, 5021 &t->tcam[i], 5022 chg); 5023 if (status) 5024 return status; 5025 } 5026 5027 /* keep track of used ptgs */ 5028 ice_set_bit(t->tcam[i].ptg, ptgs_used); 5029 } 5030 } 5031 5032 return status; 5033 } 5034 5035 /** 5036 * ice_add_prof_id_vsig - add profile to VSIG 5037 * @hw: pointer to the HW struct 5038 * @blk: hardware block 5039 * @vsig: the VSIG to which this profile is to be added 5040 * @hdl: the profile handle indicating the profile to add 5041 * @rev: true to add entries to the end of the list 5042 * @chg: the change list 5043 */ 5044 static enum ice_status 5045 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, 5046 bool rev, struct LIST_HEAD_TYPE *chg) 5047 { 5048 /* Masks that ignore flags */ 5049 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 5050 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; 5051 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; 5052 enum ice_status status = ICE_SUCCESS; 5053 struct ice_prof_map *map; 5054 struct ice_vsig_prof *t; 5055 struct ice_chs_chg *p; 5056 u16 vsig_idx, i; 5057 5058 /* Error, if this VSIG already has this profile */ 5059 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) 5060 return ICE_ERR_ALREADY_EXISTS; 5061 5062 /* new VSIG profile structure */ 5063 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); 5064 if (!t) 5065 return ICE_ERR_NO_MEMORY; 5066 5067 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); 5068 /* Get the details on the profile specified by the handle ID */ 5069 map = ice_search_prof_id(hw, blk, hdl); 5070 if (!map) { 5071 status = ICE_ERR_DOES_NOT_EXIST; 5072 goto err_ice_add_prof_id_vsig; 5073 } 5074 5075 t->profile_cookie = map->profile_cookie; 5076 t->prof_id = map->prof_id; 5077 t->tcam_count = map->ptg_cnt; 5078 5079 /* create TCAM entries */ 5080 for (i = 0; i < map->ptg_cnt; i++) { 5081 u16 tcam_idx; 5082 5083 /* add TCAM to change list */ 5084 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 5085 if (!p) { 5086 status = ICE_ERR_NO_MEMORY; 5087 goto err_ice_add_prof_id_vsig; 5088 } 5089 5090 /* allocate the TCAM entry index */ 5091 status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx); 5092 if (status) { 5093 ice_free(hw, p); 5094 goto err_ice_add_prof_id_vsig; 5095 } 5096 5097 t->tcam[i].ptg = map->ptg[i]; 5098 t->tcam[i].prof_id = map->prof_id; 5099 t->tcam[i].tcam_idx = tcam_idx; 5100 t->tcam[i].in_use = true; 5101 5102 p->type = ICE_TCAM_ADD; 5103 p->add_tcam_idx = true; 5104 p->prof_id = t->tcam[i].prof_id; 5105 p->ptg = t->tcam[i].ptg; 5106 p->vsig = vsig; 5107 p->tcam_idx = t->tcam[i].tcam_idx; 5108 5109 /* write the TCAM entry */ 5110 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, 5111 t->tcam[i].prof_id, 5112 t->tcam[i].ptg, vsig, 0, 0, 5113 vl_msk, dc_msk, nm_msk); 5114 if (status) { 5115 ice_free(hw, p); 5116 goto err_ice_add_prof_id_vsig; 5117 } 5118 5119 /* log change */ 5120 LIST_ADD(&p->list_entry, chg); 5121 } 5122 5123 /* add profile to VSIG */ 5124 vsig_idx = vsig & ICE_VSIG_IDX_M; 5125 if (rev) 5126 LIST_ADD_TAIL(&t->list, 5127 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 5128 else 5129 LIST_ADD(&t->list, 5130 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); 5131 5132 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 5133 return status; 5134 5135 err_ice_add_prof_id_vsig: 5136 ice_release_lock(&hw->blk[blk].es.prof_map_lock); 5137 /* let caller clean up the change list */ 5138 ice_free(hw, t); 5139 return status; 5140 } 5141 5142 /** 5143 * ice_create_prof_id_vsig - add a new VSIG with a single profile 5144 * @hw: pointer to the HW struct 5145 * @blk: hardware block 5146 * @vsi: the initial VSI that will be in VSIG 5147 * @hdl: the profile handle of the profile that will be added to the VSIG 5148 * @chg: the change list 5149 */ 5150 static enum ice_status 5151 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, 5152 struct LIST_HEAD_TYPE *chg) 5153 { 5154 enum ice_status status; 5155 struct ice_chs_chg *p; 5156 u16 new_vsig; 5157 5158 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); 5159 if (!p) 5160 return ICE_ERR_NO_MEMORY; 5161 5162 new_vsig = ice_vsig_alloc(hw, blk); 5163 if (!new_vsig) { 5164 status = ICE_ERR_HW_TABLE; 5165 goto err_ice_create_prof_id_vsig; 5166 } 5167 5168 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); 5169 if (status) 5170 goto err_ice_create_prof_id_vsig; 5171 5172 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); 5173 if (status) 5174 goto err_ice_create_prof_id_vsig; 5175 5176 p->type = ICE_VSIG_ADD; 5177 p->vsi = vsi; 5178 p->orig_vsig = ICE_DEFAULT_VSIG; 5179 p->vsig = new_vsig; 5180 5181 LIST_ADD(&p->list_entry, chg); 5182 5183 return ICE_SUCCESS; 5184 5185 err_ice_create_prof_id_vsig: 5186 /* let caller clean up the change list */ 5187 ice_free(hw, p); 5188 return status; 5189 } 5190 5191 /** 5192 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles 5193 * @hw: pointer to the HW struct 5194 * @blk: hardware block 5195 * @vsi: the initial VSI that will be in VSIG 5196 * @lst: the list of profile that will be added to the VSIG 5197 * @new_vsig: return of new VSIG 5198 * @chg: the change list 5199 */ 5200 static enum ice_status 5201 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, 5202 struct LIST_HEAD_TYPE *lst, u16 *new_vsig, 5203 struct LIST_HEAD_TYPE *chg) 5204 { 5205 struct ice_vsig_prof *t; 5206 enum ice_status status; 5207 u16 vsig; 5208 5209 vsig = ice_vsig_alloc(hw, blk); 5210 if (!vsig) 5211 return ICE_ERR_HW_TABLE; 5212 5213 status = ice_move_vsi(hw, blk, vsi, vsig, chg); 5214 if (status) 5215 return status; 5216 5217 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) { 5218 /* Reverse the order here since we are copying the list */ 5219 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, 5220 true, chg); 5221 if (status) 5222 return status; 5223 } 5224 5225 *new_vsig = vsig; 5226 5227 return ICE_SUCCESS; 5228 } 5229 5230 /** 5231 * ice_find_prof_vsig - find a VSIG with a specific profile handle 5232 * @hw: pointer to the HW struct 5233 * @blk: hardware block 5234 * @hdl: the profile handle of the profile to search for 5235 * @vsig: returns the VSIG with the matching profile 5236 */ 5237 static bool 5238 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) 5239 { 5240 struct ice_vsig_prof *t; 5241 enum ice_status status; 5242 struct LIST_HEAD_TYPE lst; 5243 5244 INIT_LIST_HEAD(&lst); 5245 5246 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); 5247 if (!t) 5248 return false; 5249 5250 t->profile_cookie = hdl; 5251 LIST_ADD(&t->list, &lst); 5252 5253 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); 5254 5255 LIST_DEL(&t->list); 5256 ice_free(hw, t); 5257 5258 return status == ICE_SUCCESS; 5259 } 5260 5261 /** 5262 * ice_add_vsi_flow - add VSI flow 5263 * @hw: pointer to the HW struct 5264 * @blk: hardware block 5265 * @vsi: input VSI 5266 * @vsig: target VSIG to include the input VSI 5267 * 5268 * Calling this function will add the VSI to a given VSIG and 5269 * update the HW tables accordingly. This call can be used to 5270 * add multiple VSIs to a VSIG if we know beforehand that those 5271 * VSIs have the same characteristics of the VSIG. This will 5272 * save time in generating a new VSIG and TCAMs till a match is 5273 * found and subsequent rollback when a matching VSIG is found. 5274 */ 5275 enum ice_status 5276 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) 5277 { 5278 struct ice_chs_chg *tmp, *del; 5279 struct LIST_HEAD_TYPE chg; 5280 enum ice_status status; 5281 5282 /* if target VSIG is default the move is invalid */ 5283 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG) 5284 return ICE_ERR_PARAM; 5285 5286 INIT_LIST_HEAD(&chg); 5287 5288 /* move VSI to the VSIG that matches */ 5289 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5290 /* update hardware if success */ 5291 if (!status) 5292 status = ice_upd_prof_hw(hw, blk, &chg); 5293 5294 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 5295 LIST_DEL(&del->list_entry); 5296 ice_free(hw, del); 5297 } 5298 5299 return status; 5300 } 5301 5302 /** 5303 * ice_add_prof_id_flow - add profile flow 5304 * @hw: pointer to the HW struct 5305 * @blk: hardware block 5306 * @vsi: the VSI to enable with the profile specified by ID 5307 * @hdl: profile handle 5308 * 5309 * Calling this function will update the hardware tables to enable the 5310 * profile indicated by the ID parameter for the VSIs specified in the VSI 5311 * array. Once successfully called, the flow will be enabled. 5312 */ 5313 enum ice_status 5314 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 5315 { 5316 struct ice_vsig_prof *tmp1, *del1; 5317 struct ice_chs_chg *tmp, *del; 5318 struct LIST_HEAD_TYPE union_lst; 5319 enum ice_status status; 5320 struct LIST_HEAD_TYPE chg; 5321 u16 vsig; 5322 5323 INIT_LIST_HEAD(&union_lst); 5324 INIT_LIST_HEAD(&chg); 5325 5326 /* Get profile */ 5327 status = ice_get_prof(hw, blk, hdl, &chg); 5328 if (status) 5329 return status; 5330 5331 /* determine if VSI is already part of a VSIG */ 5332 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 5333 if (!status && vsig) { 5334 bool only_vsi; 5335 u16 or_vsig; 5336 u16 ref; 5337 5338 /* found in VSIG */ 5339 or_vsig = vsig; 5340 5341 /* make sure that there is no overlap/conflict between the new 5342 * characteristics and the existing ones; we don't support that 5343 * scenario 5344 */ 5345 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { 5346 status = ICE_ERR_ALREADY_EXISTS; 5347 goto err_ice_add_prof_id_flow; 5348 } 5349 5350 /* last VSI in the VSIG? */ 5351 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 5352 if (status) 5353 goto err_ice_add_prof_id_flow; 5354 only_vsi = (ref == 1); 5355 5356 /* create a union of the current profiles and the one being 5357 * added 5358 */ 5359 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); 5360 if (status) 5361 goto err_ice_add_prof_id_flow; 5362 5363 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); 5364 if (status) 5365 goto err_ice_add_prof_id_flow; 5366 5367 /* search for an existing VSIG with an exact charc match */ 5368 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); 5369 if (!status) { 5370 /* move VSI to the VSIG that matches */ 5371 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5372 if (status) 5373 goto err_ice_add_prof_id_flow; 5374 5375 /* VSI has been moved out of or_vsig. If the or_vsig had 5376 * only that VSI it is now empty and can be removed. 5377 */ 5378 if (only_vsi) { 5379 status = ice_rem_vsig(hw, blk, or_vsig, &chg); 5380 if (status) 5381 goto err_ice_add_prof_id_flow; 5382 } 5383 } else if (only_vsi) { 5384 /* If the original VSIG only contains one VSI, then it 5385 * will be the requesting VSI. In this case the VSI is 5386 * not sharing entries and we can simply add the new 5387 * profile to the VSIG. 5388 */ 5389 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, 5390 &chg); 5391 if (status) 5392 goto err_ice_add_prof_id_flow; 5393 5394 /* Adjust priorities */ 5395 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 5396 if (status) 5397 goto err_ice_add_prof_id_flow; 5398 } else { 5399 /* No match, so we need a new VSIG */ 5400 status = ice_create_vsig_from_lst(hw, blk, vsi, 5401 &union_lst, &vsig, 5402 &chg); 5403 if (status) 5404 goto err_ice_add_prof_id_flow; 5405 5406 /* Adjust priorities */ 5407 status = ice_adj_prof_priorities(hw, blk, vsig, &chg); 5408 if (status) 5409 goto err_ice_add_prof_id_flow; 5410 } 5411 } else { 5412 /* need to find or add a VSIG */ 5413 /* search for an existing VSIG with an exact charc match */ 5414 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { 5415 /* found an exact match */ 5416 /* add or move VSI to the VSIG that matches */ 5417 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5418 if (status) 5419 goto err_ice_add_prof_id_flow; 5420 } else { 5421 /* we did not find an exact match */ 5422 /* we need to add a VSIG */ 5423 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, 5424 &chg); 5425 if (status) 5426 goto err_ice_add_prof_id_flow; 5427 } 5428 } 5429 5430 /* update hardware */ 5431 if (!status) 5432 status = ice_upd_prof_hw(hw, blk, &chg); 5433 5434 err_ice_add_prof_id_flow: 5435 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 5436 LIST_DEL(&del->list_entry); 5437 ice_free(hw, del); 5438 } 5439 5440 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) { 5441 LIST_DEL(&del1->list); 5442 ice_free(hw, del1); 5443 } 5444 5445 return status; 5446 } 5447 5448 /** 5449 * ice_add_flow - add flow 5450 * @hw: pointer to the HW struct 5451 * @blk: hardware block 5452 * @vsi: array of VSIs to enable with the profile specified by ID 5453 * @count: number of elements in the VSI array 5454 * @id: profile tracking ID 5455 * 5456 * Calling this function will update the hardware tables to enable the 5457 * profile indicated by the ID parameter for the VSIs specified in the VSI 5458 * array. Once successfully called, the flow will be enabled. 5459 */ 5460 enum ice_status 5461 ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 5462 u64 id) 5463 { 5464 enum ice_status status; 5465 u16 i; 5466 5467 for (i = 0; i < count; i++) { 5468 status = ice_add_prof_id_flow(hw, blk, vsi[i], id); 5469 if (status) 5470 return status; 5471 } 5472 5473 return ICE_SUCCESS; 5474 } 5475 5476 /** 5477 * ice_rem_prof_from_list - remove a profile from list 5478 * @hw: pointer to the HW struct 5479 * @lst: list to remove the profile from 5480 * @hdl: the profile handle indicating the profile to remove 5481 */ 5482 static enum ice_status 5483 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl) 5484 { 5485 struct ice_vsig_prof *ent, *tmp; 5486 5487 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) 5488 if (ent->profile_cookie == hdl) { 5489 LIST_DEL(&ent->list); 5490 ice_free(hw, ent); 5491 return ICE_SUCCESS; 5492 } 5493 5494 return ICE_ERR_DOES_NOT_EXIST; 5495 } 5496 5497 /** 5498 * ice_rem_prof_id_flow - remove flow 5499 * @hw: pointer to the HW struct 5500 * @blk: hardware block 5501 * @vsi: the VSI from which to remove the profile specified by ID 5502 * @hdl: profile tracking handle 5503 * 5504 * Calling this function will update the hardware tables to remove the 5505 * profile indicated by the ID parameter for the VSIs specified in the VSI 5506 * array. Once successfully called, the flow will be disabled. 5507 */ 5508 enum ice_status 5509 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) 5510 { 5511 struct ice_vsig_prof *tmp1, *del1; 5512 struct ice_chs_chg *tmp, *del; 5513 struct LIST_HEAD_TYPE chg, copy; 5514 enum ice_status status; 5515 u16 vsig; 5516 5517 INIT_LIST_HEAD(©); 5518 INIT_LIST_HEAD(&chg); 5519 5520 /* determine if VSI is already part of a VSIG */ 5521 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); 5522 if (!status && vsig) { 5523 bool last_profile; 5524 bool only_vsi; 5525 u16 ref; 5526 5527 /* found in VSIG */ 5528 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; 5529 status = ice_vsig_get_ref(hw, blk, vsig, &ref); 5530 if (status) 5531 goto err_ice_rem_prof_id_flow; 5532 only_vsi = (ref == 1); 5533 5534 if (only_vsi) { 5535 /* If the original VSIG only contains one reference, 5536 * which will be the requesting VSI, then the VSI is not 5537 * sharing entries and we can simply remove the specific 5538 * characteristics from the VSIG. 5539 */ 5540 5541 if (last_profile) { 5542 /* If there are no profiles left for this VSIG, 5543 * then simply remove the VSIG. 5544 */ 5545 status = ice_rem_vsig(hw, blk, vsig, &chg); 5546 if (status) 5547 goto err_ice_rem_prof_id_flow; 5548 } else { 5549 status = ice_rem_prof_id_vsig(hw, blk, vsig, 5550 hdl, &chg); 5551 if (status) 5552 goto err_ice_rem_prof_id_flow; 5553 5554 /* Adjust priorities */ 5555 status = ice_adj_prof_priorities(hw, blk, vsig, 5556 &chg); 5557 if (status) 5558 goto err_ice_rem_prof_id_flow; 5559 } 5560 5561 } else { 5562 /* Make a copy of the VSIG's list of Profiles */ 5563 status = ice_get_profs_vsig(hw, blk, vsig, ©); 5564 if (status) 5565 goto err_ice_rem_prof_id_flow; 5566 5567 /* Remove specified profile entry from the list */ 5568 status = ice_rem_prof_from_list(hw, ©, hdl); 5569 if (status) 5570 goto err_ice_rem_prof_id_flow; 5571 5572 if (LIST_EMPTY(©)) { 5573 status = ice_move_vsi(hw, blk, vsi, 5574 ICE_DEFAULT_VSIG, &chg); 5575 if (status) 5576 goto err_ice_rem_prof_id_flow; 5577 5578 } else if (!ice_find_dup_props_vsig(hw, blk, ©, 5579 &vsig)) { 5580 /* found an exact match */ 5581 /* add or move VSI to the VSIG that matches */ 5582 /* Search for a VSIG with a matching profile 5583 * list 5584 */ 5585 5586 /* Found match, move VSI to the matching VSIG */ 5587 status = ice_move_vsi(hw, blk, vsi, vsig, &chg); 5588 if (status) 5589 goto err_ice_rem_prof_id_flow; 5590 } else { 5591 /* since no existing VSIG supports this 5592 * characteristic pattern, we need to create a 5593 * new VSIG and TCAM entries 5594 */ 5595 status = ice_create_vsig_from_lst(hw, blk, vsi, 5596 ©, &vsig, 5597 &chg); 5598 if (status) 5599 goto err_ice_rem_prof_id_flow; 5600 5601 /* Adjust priorities */ 5602 status = ice_adj_prof_priorities(hw, blk, vsig, 5603 &chg); 5604 if (status) 5605 goto err_ice_rem_prof_id_flow; 5606 } 5607 } 5608 } else { 5609 status = ICE_ERR_DOES_NOT_EXIST; 5610 } 5611 5612 /* update hardware tables */ 5613 if (!status) 5614 status = ice_upd_prof_hw(hw, blk, &chg); 5615 5616 err_ice_rem_prof_id_flow: 5617 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { 5618 LIST_DEL(&del->list_entry); 5619 ice_free(hw, del); 5620 } 5621 5622 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) { 5623 LIST_DEL(&del1->list); 5624 ice_free(hw, del1); 5625 } 5626 5627 return status; 5628 } 5629 5630 /** 5631 * ice_rem_flow - remove flow 5632 * @hw: pointer to the HW struct 5633 * @blk: hardware block 5634 * @vsi: array of VSIs from which to remove the profile specified by ID 5635 * @count: number of elements in the VSI array 5636 * @id: profile tracking ID 5637 * 5638 * The function will remove flows from the specified VSIs that were enabled 5639 * using ice_add_flow. The ID value will indicated which profile will be 5640 * removed. Once successfully called, the flow will be disabled. 5641 */ 5642 enum ice_status 5643 ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, 5644 u64 id) 5645 { 5646 enum ice_status status; 5647 u16 i; 5648 5649 for (i = 0; i < count; i++) { 5650 status = ice_rem_prof_id_flow(hw, blk, vsi[i], id); 5651 if (status) 5652 return status; 5653 } 5654 5655 return ICE_SUCCESS; 5656 } 5657