1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2022, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice.h" 6 #include "ice_ddp.h" 7 #include "ice_sched.h" 8 9 /* For supporting double VLAN mode, it is necessary to enable or disable certain 10 * boost tcam entries. The metadata labels names that match the following 11 * prefixes will be saved to allow enabling double VLAN mode. 12 */ 13 #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ 14 #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ 15 16 /* To support tunneling entries by PF, the package will append the PF number to 17 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. 18 */ 19 #define ICE_TNL_PRE "TNL_" 20 static const struct ice_tunnel_type_scan tnls[] = { 21 { TNL_VXLAN, "TNL_VXLAN_PF" }, 22 { TNL_GENEVE, "TNL_GENEVE_PF" }, 23 { TNL_LAST, "" } 24 }; 25 26 /** 27 * ice_verify_pkg - verify package 28 * @pkg: pointer to the package buffer 29 * @len: size of the package buffer 30 * 31 * Verifies various attributes of the package file, including length, format 32 * version, and the requirement of at least one segment. 33 */ 34 static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 35 { 36 u32 seg_count; 37 u32 i; 38 39 if (len < struct_size(pkg, seg_offset, 1)) 40 return ICE_DDP_PKG_INVALID_FILE; 41 42 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || 43 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || 44 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || 45 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) 46 return ICE_DDP_PKG_INVALID_FILE; 47 48 /* pkg must have at least one segment */ 49 seg_count = le32_to_cpu(pkg->seg_count); 50 if (seg_count < 1) 51 return ICE_DDP_PKG_INVALID_FILE; 52 53 /* make sure segment array fits in package length */ 54 if (len < struct_size(pkg, seg_offset, seg_count)) 55 return ICE_DDP_PKG_INVALID_FILE; 56 57 /* all segments must fit within length */ 58 for (i = 0; i < seg_count; i++) { 59 u32 off = le32_to_cpu(pkg->seg_offset[i]); 60 struct ice_generic_seg_hdr *seg; 61 62 /* segment header must fit */ 63 if (len < off + sizeof(*seg)) 64 return ICE_DDP_PKG_INVALID_FILE; 65 66 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); 67 68 /* segment body must fit */ 69 if (len < off + le32_to_cpu(seg->seg_size)) 70 return ICE_DDP_PKG_INVALID_FILE; 71 } 72 73 return ICE_DDP_PKG_SUCCESS; 74 } 75 76 /** 77 * ice_free_seg - free package segment pointer 78 * @hw: pointer to the hardware structure 79 * 80 * Frees the package segment pointer in the proper manner, depending on if the 81 * segment was allocated or just the passed in pointer was stored. 82 */ 83 void ice_free_seg(struct ice_hw *hw) 84 { 85 if (hw->pkg_copy) { 86 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); 87 hw->pkg_copy = NULL; 88 hw->pkg_size = 0; 89 } 90 hw->seg = NULL; 91 } 92 93 /** 94 * ice_chk_pkg_version - check package version for compatibility with driver 95 * @pkg_ver: pointer to a version structure to check 96 * 97 * Check to make sure that the package about to be downloaded is compatible with 98 * the driver. To be compatible, the major and minor components of the package 99 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR 100 * definitions. 101 */ 102 static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) 103 { 104 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || 105 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && 106 pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) 107 return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; 108 else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || 109 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && 110 pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) 111 return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; 112 113 return ICE_DDP_PKG_SUCCESS; 114 } 115 116 /** 117 * ice_pkg_val_buf 118 * @buf: pointer to the ice buffer 119 * 120 * This helper function validates a buffer's header. 121 */ 122 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 123 { 124 struct ice_buf_hdr *hdr; 125 u16 section_count; 126 u16 data_end; 127 128 hdr = (struct ice_buf_hdr *)buf->buf; 129 /* verify data */ 130 section_count = le16_to_cpu(hdr->section_count); 131 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) 132 return NULL; 133 134 data_end = le16_to_cpu(hdr->data_end); 135 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) 136 return NULL; 137 138 return hdr; 139 } 140 141 /** 142 * ice_find_buf_table 143 * @ice_seg: pointer to the ice segment 144 * 145 * Returns the address of the buffer table within the ice segment. 146 */ 147 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) 148 { 149 struct ice_nvm_table *nvms = (struct ice_nvm_table *) 150 (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); 151 152 return (__force struct ice_buf_table *)(nvms->vers + 153 le32_to_cpu(nvms->table_count)); 154 } 155 156 /** 157 * ice_pkg_enum_buf 158 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 159 * @state: pointer to the enum state 160 * 161 * This function will enumerate all the buffers in the ice segment. The first 162 * call is made with the ice_seg parameter non-NULL; on subsequent calls, 163 * ice_seg is set to NULL which continues the enumeration. When the function 164 * returns a NULL pointer, then the end of the buffers has been reached, or an 165 * unexpected value has been detected (for example an invalid section count or 166 * an invalid buffer end value). 167 */ 168 static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, 169 struct ice_pkg_enum *state) 170 { 171 if (ice_seg) { 172 state->buf_table = ice_find_buf_table(ice_seg); 173 if (!state->buf_table) 174 return NULL; 175 176 state->buf_idx = 0; 177 return ice_pkg_val_buf(state->buf_table->buf_array); 178 } 179 180 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) 181 return ice_pkg_val_buf(state->buf_table->buf_array + 182 state->buf_idx); 183 else 184 return NULL; 185 } 186 187 /** 188 * ice_pkg_advance_sect 189 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 190 * @state: pointer to the enum state 191 * 192 * This helper function will advance the section within the ice segment, 193 * also advancing the buffer if needed. 194 */ 195 static bool ice_pkg_advance_sect(struct ice_seg *ice_seg, 196 struct ice_pkg_enum *state) 197 { 198 if (!ice_seg && !state->buf) 199 return false; 200 201 if (!ice_seg && state->buf) 202 if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) 203 return true; 204 205 state->buf = ice_pkg_enum_buf(ice_seg, state); 206 if (!state->buf) 207 return false; 208 209 /* start of new buffer, reset section index */ 210 state->sect_idx = 0; 211 return true; 212 } 213 214 /** 215 * ice_pkg_enum_section 216 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 217 * @state: pointer to the enum state 218 * @sect_type: section type to enumerate 219 * 220 * This function will enumerate all the sections of a particular type in the 221 * ice segment. The first call is made with the ice_seg parameter non-NULL; 222 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 223 * When the function returns a NULL pointer, then the end of the matching 224 * sections has been reached. 225 */ 226 void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 227 u32 sect_type) 228 { 229 u16 offset, size; 230 231 if (ice_seg) 232 state->type = sect_type; 233 234 if (!ice_pkg_advance_sect(ice_seg, state)) 235 return NULL; 236 237 /* scan for next matching section */ 238 while (state->buf->section_entry[state->sect_idx].type != 239 cpu_to_le32(state->type)) 240 if (!ice_pkg_advance_sect(NULL, state)) 241 return NULL; 242 243 /* validate section */ 244 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 245 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) 246 return NULL; 247 248 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); 249 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) 250 return NULL; 251 252 /* make sure the section fits in the buffer */ 253 if (offset + size > ICE_PKG_BUF_SIZE) 254 return NULL; 255 256 state->sect_type = 257 le32_to_cpu(state->buf->section_entry[state->sect_idx].type); 258 259 /* calc pointer to this section */ 260 state->sect = 261 ((u8 *)state->buf) + 262 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 263 264 return state->sect; 265 } 266 267 /** 268 * ice_pkg_enum_entry 269 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 270 * @state: pointer to the enum state 271 * @sect_type: section type to enumerate 272 * @offset: pointer to variable that receives the offset in the table (optional) 273 * @handler: function that handles access to the entries into the section type 274 * 275 * This function will enumerate all the entries in particular section type in 276 * the ice segment. The first call is made with the ice_seg parameter non-NULL; 277 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 278 * When the function returns a NULL pointer, then the end of the entries has 279 * been reached. 280 * 281 * Since each section may have a different header and entry size, the handler 282 * function is needed to determine the number and location entries in each 283 * section. 284 * 285 * The offset parameter is optional, but should be used for sections that 286 * contain an offset for each section table. For such cases, the section handler 287 * function must return the appropriate offset + index to give the absolution 288 * offset for each entry. For example, if the base for a section's header 289 * indicates a base offset of 10, and the index for the entry is 2, then 290 * section handler function should set the offset to 10 + 2 = 12. 291 */ 292 static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, 293 struct ice_pkg_enum *state, u32 sect_type, 294 u32 *offset, 295 void *(*handler)(u32 sect_type, void *section, 296 u32 index, u32 *offset)) 297 { 298 void *entry; 299 300 if (ice_seg) { 301 if (!handler) 302 return NULL; 303 304 if (!ice_pkg_enum_section(ice_seg, state, sect_type)) 305 return NULL; 306 307 state->entry_idx = 0; 308 state->handler = handler; 309 } else { 310 state->entry_idx++; 311 } 312 313 if (!state->handler) 314 return NULL; 315 316 /* get entry */ 317 entry = state->handler(state->sect_type, state->sect, state->entry_idx, 318 offset); 319 if (!entry) { 320 /* end of a section, look for another section of this type */ 321 if (!ice_pkg_enum_section(NULL, state, 0)) 322 return NULL; 323 324 state->entry_idx = 0; 325 entry = state->handler(state->sect_type, state->sect, 326 state->entry_idx, offset); 327 } 328 329 return entry; 330 } 331 332 /** 333 * ice_sw_fv_handler 334 * @sect_type: section type 335 * @section: pointer to section 336 * @index: index of the field vector entry to be returned 337 * @offset: ptr to variable that receives the offset in the field vector table 338 * 339 * This is a callback function that can be passed to ice_pkg_enum_entry. 340 * This function treats the given section as of type ice_sw_fv_section and 341 * enumerates offset field. "offset" is an index into the field vector table. 342 */ 343 static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index, 344 u32 *offset) 345 { 346 struct ice_sw_fv_section *fv_section = section; 347 348 if (!section || sect_type != ICE_SID_FLD_VEC_SW) 349 return NULL; 350 if (index >= le16_to_cpu(fv_section->count)) 351 return NULL; 352 if (offset) 353 /* "index" passed in to this function is relative to a given 354 * 4k block. To get to the true index into the field vector 355 * table need to add the relative index to the base_offset 356 * field of this section 357 */ 358 *offset = le16_to_cpu(fv_section->base_offset) + index; 359 return fv_section->fv + index; 360 } 361 362 /** 363 * ice_get_prof_index_max - get the max profile index for used profile 364 * @hw: pointer to the HW struct 365 * 366 * Calling this function will get the max profile index for used profile 367 * and store the index number in struct ice_switch_info *switch_info 368 * in HW for following use. 369 */ 370 static int ice_get_prof_index_max(struct ice_hw *hw) 371 { 372 u16 prof_index = 0, j, max_prof_index = 0; 373 struct ice_pkg_enum state; 374 struct ice_seg *ice_seg; 375 bool flag = false; 376 struct ice_fv *fv; 377 u32 offset; 378 379 memset(&state, 0, sizeof(state)); 380 381 if (!hw->seg) 382 return -EINVAL; 383 384 ice_seg = hw->seg; 385 386 do { 387 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 388 &offset, ice_sw_fv_handler); 389 if (!fv) 390 break; 391 ice_seg = NULL; 392 393 /* in the profile that not be used, the prot_id is set to 0xff 394 * and the off is set to 0x1ff for all the field vectors. 395 */ 396 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 397 if (fv->ew[j].prot_id != ICE_PROT_INVALID || 398 fv->ew[j].off != ICE_FV_OFFSET_INVAL) 399 flag = true; 400 if (flag && prof_index > max_prof_index) 401 max_prof_index = prof_index; 402 403 prof_index++; 404 flag = false; 405 } while (fv); 406 407 hw->switch_info->max_used_prof_index = max_prof_index; 408 409 return 0; 410 } 411 412 /** 413 * ice_get_ddp_pkg_state - get DDP pkg state after download 414 * @hw: pointer to the HW struct 415 * @already_loaded: indicates if pkg was already loaded onto the device 416 */ 417 static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw, 418 bool already_loaded) 419 { 420 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 421 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 422 hw->pkg_ver.update == hw->active_pkg_ver.update && 423 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 424 !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { 425 if (already_loaded) 426 return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; 427 else 428 return ICE_DDP_PKG_SUCCESS; 429 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || 430 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { 431 return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; 432 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 433 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { 434 return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; 435 } else { 436 return ICE_DDP_PKG_ERR; 437 } 438 } 439 440 /** 441 * ice_init_pkg_regs - initialize additional package registers 442 * @hw: pointer to the hardware structure 443 */ 444 static void ice_init_pkg_regs(struct ice_hw *hw) 445 { 446 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF 447 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF 448 #define ICE_SW_BLK_IDX 0 449 450 /* setup Switch block input mask, which is 48-bits in two parts */ 451 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); 452 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); 453 } 454 455 /** 456 * ice_marker_ptype_tcam_handler 457 * @sect_type: section type 458 * @section: pointer to section 459 * @index: index of the Marker PType TCAM entry to be returned 460 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections 461 * 462 * This is a callback function that can be passed to ice_pkg_enum_entry. 463 * Handles enumeration of individual Marker PType TCAM entries. 464 */ 465 static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section, 466 u32 index, u32 *offset) 467 { 468 struct ice_marker_ptype_tcam_section *marker_ptype; 469 470 if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) 471 return NULL; 472 473 if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) 474 return NULL; 475 476 if (offset) 477 *offset = 0; 478 479 marker_ptype = section; 480 if (index >= le16_to_cpu(marker_ptype->count)) 481 return NULL; 482 483 return marker_ptype->tcam + index; 484 } 485 486 /** 487 * ice_add_dvm_hint 488 * @hw: pointer to the HW structure 489 * @val: value of the boost entry 490 * @enable: true if entry needs to be enabled, or false if needs to be disabled 491 */ 492 static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) 493 { 494 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { 495 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; 496 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; 497 hw->dvm_upd.count++; 498 } 499 } 500 501 /** 502 * ice_add_tunnel_hint 503 * @hw: pointer to the HW structure 504 * @label_name: label text 505 * @val: value of the tunnel port boost entry 506 */ 507 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) 508 { 509 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { 510 u16 i; 511 512 for (i = 0; tnls[i].type != TNL_LAST; i++) { 513 size_t len = strlen(tnls[i].label_prefix); 514 515 /* Look for matching label start, before continuing */ 516 if (strncmp(label_name, tnls[i].label_prefix, len)) 517 continue; 518 519 /* Make sure this label matches our PF. Note that the PF 520 * character ('0' - '7') will be located where our 521 * prefix string's null terminator is located. 522 */ 523 if ((label_name[len] - '0') == hw->pf_id) { 524 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; 525 hw->tnl.tbl[hw->tnl.count].valid = false; 526 hw->tnl.tbl[hw->tnl.count].boost_addr = val; 527 hw->tnl.tbl[hw->tnl.count].port = 0; 528 hw->tnl.count++; 529 break; 530 } 531 } 532 } 533 } 534 535 /** 536 * ice_label_enum_handler 537 * @sect_type: section type 538 * @section: pointer to section 539 * @index: index of the label entry to be returned 540 * @offset: pointer to receive absolute offset, always zero for label sections 541 * 542 * This is a callback function that can be passed to ice_pkg_enum_entry. 543 * Handles enumeration of individual label entries. 544 */ 545 static void *ice_label_enum_handler(u32 __always_unused sect_type, 546 void *section, u32 index, u32 *offset) 547 { 548 struct ice_label_section *labels; 549 550 if (!section) 551 return NULL; 552 553 if (index > ICE_MAX_LABELS_IN_BUF) 554 return NULL; 555 556 if (offset) 557 *offset = 0; 558 559 labels = section; 560 if (index >= le16_to_cpu(labels->count)) 561 return NULL; 562 563 return labels->label + index; 564 } 565 566 /** 567 * ice_enum_labels 568 * @ice_seg: pointer to the ice segment (NULL on subsequent calls) 569 * @type: the section type that will contain the label (0 on subsequent calls) 570 * @state: ice_pkg_enum structure that will hold the state of the enumeration 571 * @value: pointer to a value that will return the label's value if found 572 * 573 * Enumerates a list of labels in the package. The caller will call 574 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call 575 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL 576 * the end of the list has been reached. 577 */ 578 static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type, 579 struct ice_pkg_enum *state, u16 *value) 580 { 581 struct ice_label *label; 582 583 /* Check for valid label section on first call */ 584 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) 585 return NULL; 586 587 label = ice_pkg_enum_entry(ice_seg, state, type, NULL, 588 ice_label_enum_handler); 589 if (!label) 590 return NULL; 591 592 *value = le16_to_cpu(label->value); 593 return label->name; 594 } 595 596 /** 597 * ice_boost_tcam_handler 598 * @sect_type: section type 599 * @section: pointer to section 600 * @index: index of the boost TCAM entry to be returned 601 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections 602 * 603 * This is a callback function that can be passed to ice_pkg_enum_entry. 604 * Handles enumeration of individual boost TCAM entries. 605 */ 606 static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, 607 u32 *offset) 608 { 609 struct ice_boost_tcam_section *boost; 610 611 if (!section) 612 return NULL; 613 614 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) 615 return NULL; 616 617 if (index > ICE_MAX_BST_TCAMS_IN_BUF) 618 return NULL; 619 620 if (offset) 621 *offset = 0; 622 623 boost = section; 624 if (index >= le16_to_cpu(boost->count)) 625 return NULL; 626 627 return boost->tcam + index; 628 } 629 630 /** 631 * ice_find_boost_entry 632 * @ice_seg: pointer to the ice segment (non-NULL) 633 * @addr: Boost TCAM address of entry to search for 634 * @entry: returns pointer to the entry 635 * 636 * Finds a particular Boost TCAM entry and returns a pointer to that entry 637 * if it is found. The ice_seg parameter must not be NULL since the first call 638 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. 639 */ 640 static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, 641 struct ice_boost_tcam_entry **entry) 642 { 643 struct ice_boost_tcam_entry *tcam; 644 struct ice_pkg_enum state; 645 646 memset(&state, 0, sizeof(state)); 647 648 if (!ice_seg) 649 return -EINVAL; 650 651 do { 652 tcam = ice_pkg_enum_entry(ice_seg, &state, 653 ICE_SID_RXPARSER_BOOST_TCAM, NULL, 654 ice_boost_tcam_handler); 655 if (tcam && le16_to_cpu(tcam->addr) == addr) { 656 *entry = tcam; 657 return 0; 658 } 659 660 ice_seg = NULL; 661 } while (tcam); 662 663 *entry = NULL; 664 return -EIO; 665 } 666 667 /** 668 * ice_is_init_pkg_successful - check if DDP init was successful 669 * @state: state of the DDP pkg after download 670 */ 671 bool ice_is_init_pkg_successful(enum ice_ddp_state state) 672 { 673 switch (state) { 674 case ICE_DDP_PKG_SUCCESS: 675 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 676 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 677 return true; 678 default: 679 return false; 680 } 681 } 682 683 /** 684 * ice_pkg_buf_alloc 685 * @hw: pointer to the HW structure 686 * 687 * Allocates a package buffer and returns a pointer to the buffer header. 688 * Note: all package contents must be in Little Endian form. 689 */ 690 struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) 691 { 692 struct ice_buf_build *bld; 693 struct ice_buf_hdr *buf; 694 695 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); 696 if (!bld) 697 return NULL; 698 699 buf = (struct ice_buf_hdr *)bld; 700 buf->data_end = 701 cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry)); 702 return bld; 703 } 704 705 static bool ice_is_gtp_u_profile(u16 prof_idx) 706 { 707 return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && 708 prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || 709 prof_idx == ICE_PROFID_IPV4_GTPU_TEID; 710 } 711 712 static bool ice_is_gtp_c_profile(u16 prof_idx) 713 { 714 switch (prof_idx) { 715 case ICE_PROFID_IPV4_GTPC_TEID: 716 case ICE_PROFID_IPV4_GTPC_NO_TEID: 717 case ICE_PROFID_IPV6_GTPC_TEID: 718 case ICE_PROFID_IPV6_GTPC_NO_TEID: 719 return true; 720 default: 721 return false; 722 } 723 } 724 725 static bool ice_is_pfcp_profile(u16 prof_idx) 726 { 727 return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE && 728 prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION; 729 } 730 731 /** 732 * ice_get_sw_prof_type - determine switch profile type 733 * @hw: pointer to the HW structure 734 * @fv: pointer to the switch field vector 735 * @prof_idx: profile index to check 736 */ 737 static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, 738 struct ice_fv *fv, u32 prof_idx) 739 { 740 u16 i; 741 742 if (ice_is_gtp_c_profile(prof_idx)) 743 return ICE_PROF_TUN_GTPC; 744 745 if (ice_is_gtp_u_profile(prof_idx)) 746 return ICE_PROF_TUN_GTPU; 747 748 if (ice_is_pfcp_profile(prof_idx)) 749 return ICE_PROF_TUN_PFCP; 750 751 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { 752 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ 753 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && 754 fv->ew[i].off == ICE_VNI_OFFSET) 755 return ICE_PROF_TUN_UDP; 756 757 /* GRE tunnel will have GRE protocol */ 758 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) 759 return ICE_PROF_TUN_GRE; 760 } 761 762 return ICE_PROF_NON_TUN; 763 } 764 765 /** 766 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type 767 * @hw: pointer to hardware structure 768 * @req_profs: type of profiles requested 769 * @bm: pointer to memory for returning the bitmap of field vectors 770 */ 771 void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, 772 unsigned long *bm) 773 { 774 struct ice_pkg_enum state; 775 struct ice_seg *ice_seg; 776 struct ice_fv *fv; 777 778 if (req_profs == ICE_PROF_ALL) { 779 bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); 780 return; 781 } 782 783 memset(&state, 0, sizeof(state)); 784 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 785 ice_seg = hw->seg; 786 do { 787 enum ice_prof_type prof_type; 788 u32 offset; 789 790 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 791 &offset, ice_sw_fv_handler); 792 ice_seg = NULL; 793 794 if (fv) { 795 /* Determine field vector type */ 796 prof_type = ice_get_sw_prof_type(hw, fv, offset); 797 798 if (req_profs & prof_type) 799 set_bit((u16)offset, bm); 800 } 801 } while (fv); 802 } 803 804 /** 805 * ice_get_sw_fv_list 806 * @hw: pointer to the HW structure 807 * @lkups: list of protocol types 808 * @bm: bitmap of field vectors to consider 809 * @fv_list: Head of a list 810 * 811 * Finds all the field vector entries from switch block that contain 812 * a given protocol ID and offset and returns a list of structures of type 813 * "ice_sw_fv_list_entry". Every structure in the list has a field vector 814 * definition and profile ID information 815 * NOTE: The caller of the function is responsible for freeing the memory 816 * allocated for every list entry. 817 */ 818 int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, 819 unsigned long *bm, struct list_head *fv_list) 820 { 821 struct ice_sw_fv_list_entry *fvl; 822 struct ice_sw_fv_list_entry *tmp; 823 struct ice_pkg_enum state; 824 struct ice_seg *ice_seg; 825 struct ice_fv *fv; 826 u32 offset; 827 828 memset(&state, 0, sizeof(state)); 829 830 if (!lkups->n_val_words || !hw->seg) 831 return -EINVAL; 832 833 ice_seg = hw->seg; 834 do { 835 u16 i; 836 837 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 838 &offset, ice_sw_fv_handler); 839 if (!fv) 840 break; 841 ice_seg = NULL; 842 843 /* If field vector is not in the bitmap list, then skip this 844 * profile. 845 */ 846 if (!test_bit((u16)offset, bm)) 847 continue; 848 849 for (i = 0; i < lkups->n_val_words; i++) { 850 int j; 851 852 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 853 if (fv->ew[j].prot_id == 854 lkups->fv_words[i].prot_id && 855 fv->ew[j].off == lkups->fv_words[i].off) 856 break; 857 if (j >= hw->blk[ICE_BLK_SW].es.fvw) 858 break; 859 if (i + 1 == lkups->n_val_words) { 860 fvl = devm_kzalloc(ice_hw_to_dev(hw), 861 sizeof(*fvl), GFP_KERNEL); 862 if (!fvl) 863 goto err; 864 fvl->fv_ptr = fv; 865 fvl->profile_id = offset; 866 list_add(&fvl->list_entry, fv_list); 867 break; 868 } 869 } 870 } while (fv); 871 if (list_empty(fv_list)) { 872 dev_warn(ice_hw_to_dev(hw), 873 "Required profiles not found in currently loaded DDP package"); 874 return -EIO; 875 } 876 877 return 0; 878 879 err: 880 list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { 881 list_del(&fvl->list_entry); 882 devm_kfree(ice_hw_to_dev(hw), fvl); 883 } 884 885 return -ENOMEM; 886 } 887 888 /** 889 * ice_init_prof_result_bm - Initialize the profile result index bitmap 890 * @hw: pointer to hardware structure 891 */ 892 void ice_init_prof_result_bm(struct ice_hw *hw) 893 { 894 struct ice_pkg_enum state; 895 struct ice_seg *ice_seg; 896 struct ice_fv *fv; 897 898 memset(&state, 0, sizeof(state)); 899 900 if (!hw->seg) 901 return; 902 903 ice_seg = hw->seg; 904 do { 905 u32 off; 906 u16 i; 907 908 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 909 &off, ice_sw_fv_handler); 910 ice_seg = NULL; 911 if (!fv) 912 break; 913 914 bitmap_zero(hw->switch_info->prof_res_bm[off], 915 ICE_MAX_FV_WORDS); 916 917 /* Determine empty field vector indices, these can be 918 * used for recipe results. Skip index 0, since it is 919 * always used for Switch ID. 920 */ 921 for (i = 1; i < ICE_MAX_FV_WORDS; i++) 922 if (fv->ew[i].prot_id == ICE_PROT_INVALID && 923 fv->ew[i].off == ICE_FV_OFFSET_INVAL) 924 set_bit(i, hw->switch_info->prof_res_bm[off]); 925 } while (fv); 926 } 927 928 /** 929 * ice_pkg_buf_free 930 * @hw: pointer to the HW structure 931 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 932 * 933 * Frees a package buffer 934 */ 935 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) 936 { 937 devm_kfree(ice_hw_to_dev(hw), bld); 938 } 939 940 /** 941 * ice_pkg_buf_reserve_section 942 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 943 * @count: the number of sections to reserve 944 * 945 * Reserves one or more section table entries in a package buffer. This routine 946 * can be called multiple times as long as they are made before calling 947 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 948 * is called once, the number of sections that can be allocated will not be able 949 * to be increased; not using all reserved sections is fine, but this will 950 * result in some wasted space in the buffer. 951 * Note: all package contents must be in Little Endian form. 952 */ 953 int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) 954 { 955 struct ice_buf_hdr *buf; 956 u16 section_count; 957 u16 data_end; 958 959 if (!bld) 960 return -EINVAL; 961 962 buf = (struct ice_buf_hdr *)&bld->buf; 963 964 /* already an active section, can't increase table size */ 965 section_count = le16_to_cpu(buf->section_count); 966 if (section_count > 0) 967 return -EIO; 968 969 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) 970 return -EIO; 971 bld->reserved_section_table_entries += count; 972 973 data_end = le16_to_cpu(buf->data_end) + 974 flex_array_size(buf, section_entry, count); 975 buf->data_end = cpu_to_le16(data_end); 976 977 return 0; 978 } 979 980 /** 981 * ice_pkg_buf_alloc_section 982 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 983 * @type: the section type value 984 * @size: the size of the section to reserve (in bytes) 985 * 986 * Reserves memory in the buffer for a section's content and updates the 987 * buffers' status accordingly. This routine returns a pointer to the first 988 * byte of the section start within the buffer, which is used to fill in the 989 * section contents. 990 * Note: all package contents must be in Little Endian form. 991 */ 992 void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) 993 { 994 struct ice_buf_hdr *buf; 995 u16 sect_count; 996 u16 data_end; 997 998 if (!bld || !type || !size) 999 return NULL; 1000 1001 buf = (struct ice_buf_hdr *)&bld->buf; 1002 1003 /* check for enough space left in buffer */ 1004 data_end = le16_to_cpu(buf->data_end); 1005 1006 /* section start must align on 4 byte boundary */ 1007 data_end = ALIGN(data_end, 4); 1008 1009 if ((data_end + size) > ICE_MAX_S_DATA_END) 1010 return NULL; 1011 1012 /* check for more available section table entries */ 1013 sect_count = le16_to_cpu(buf->section_count); 1014 if (sect_count < bld->reserved_section_table_entries) { 1015 void *section_ptr = ((u8 *)buf) + data_end; 1016 1017 buf->section_entry[sect_count].offset = cpu_to_le16(data_end); 1018 buf->section_entry[sect_count].size = cpu_to_le16(size); 1019 buf->section_entry[sect_count].type = cpu_to_le32(type); 1020 1021 data_end += size; 1022 buf->data_end = cpu_to_le16(data_end); 1023 1024 buf->section_count = cpu_to_le16(sect_count + 1); 1025 return section_ptr; 1026 } 1027 1028 /* no free section table entries */ 1029 return NULL; 1030 } 1031 1032 /** 1033 * ice_pkg_buf_alloc_single_section 1034 * @hw: pointer to the HW structure 1035 * @type: the section type value 1036 * @size: the size of the section to reserve (in bytes) 1037 * @section: returns pointer to the section 1038 * 1039 * Allocates a package buffer with a single section. 1040 * Note: all package contents must be in Little Endian form. 1041 */ 1042 struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw, 1043 u32 type, u16 size, 1044 void **section) 1045 { 1046 struct ice_buf_build *buf; 1047 1048 if (!section) 1049 return NULL; 1050 1051 buf = ice_pkg_buf_alloc(hw); 1052 if (!buf) 1053 return NULL; 1054 1055 if (ice_pkg_buf_reserve_section(buf, 1)) 1056 goto ice_pkg_buf_alloc_single_section_err; 1057 1058 *section = ice_pkg_buf_alloc_section(buf, type, size); 1059 if (!*section) 1060 goto ice_pkg_buf_alloc_single_section_err; 1061 1062 return buf; 1063 1064 ice_pkg_buf_alloc_single_section_err: 1065 ice_pkg_buf_free(hw, buf); 1066 return NULL; 1067 } 1068 1069 /** 1070 * ice_pkg_buf_get_active_sections 1071 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1072 * 1073 * Returns the number of active sections. Before using the package buffer 1074 * in an update package command, the caller should make sure that there is at 1075 * least one active section - otherwise, the buffer is not legal and should 1076 * not be used. 1077 * Note: all package contents must be in Little Endian form. 1078 */ 1079 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) 1080 { 1081 struct ice_buf_hdr *buf; 1082 1083 if (!bld) 1084 return 0; 1085 1086 buf = (struct ice_buf_hdr *)&bld->buf; 1087 return le16_to_cpu(buf->section_count); 1088 } 1089 1090 /** 1091 * ice_pkg_buf 1092 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1093 * 1094 * Return a pointer to the buffer's header 1095 */ 1096 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) 1097 { 1098 if (!bld) 1099 return NULL; 1100 1101 return &bld->buf; 1102 } 1103 1104 static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) 1105 { 1106 switch (aq_err) { 1107 case ICE_AQ_RC_ENOSEC: 1108 case ICE_AQ_RC_EBADSIG: 1109 return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; 1110 case ICE_AQ_RC_ESVN: 1111 return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; 1112 case ICE_AQ_RC_EBADMAN: 1113 case ICE_AQ_RC_EBADBUF: 1114 return ICE_DDP_PKG_LOAD_ERROR; 1115 default: 1116 return ICE_DDP_PKG_ERR; 1117 } 1118 } 1119 1120 /** 1121 * ice_acquire_global_cfg_lock 1122 * @hw: pointer to the HW structure 1123 * @access: access type (read or write) 1124 * 1125 * This function will request ownership of the global config lock for reading 1126 * or writing of the package. When attempting to obtain write access, the 1127 * caller must check for the following two return values: 1128 * 1129 * 0 - Means the caller has acquired the global config lock 1130 * and can perform writing of the package. 1131 * -EALREADY - Indicates another driver has already written the 1132 * package or has found that no update was necessary; in 1133 * this case, the caller can just skip performing any 1134 * update of the package. 1135 */ 1136 static int ice_acquire_global_cfg_lock(struct ice_hw *hw, 1137 enum ice_aq_res_access_type access) 1138 { 1139 int status; 1140 1141 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, 1142 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 1143 1144 if (!status) 1145 mutex_lock(&ice_global_cfg_lock_sw); 1146 else if (status == -EALREADY) 1147 ice_debug(hw, ICE_DBG_PKG, 1148 "Global config lock: No work to do\n"); 1149 1150 return status; 1151 } 1152 1153 /** 1154 * ice_release_global_cfg_lock 1155 * @hw: pointer to the HW structure 1156 * 1157 * This function will release the global config lock. 1158 */ 1159 static void ice_release_global_cfg_lock(struct ice_hw *hw) 1160 { 1161 mutex_unlock(&ice_global_cfg_lock_sw); 1162 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); 1163 } 1164 1165 /** 1166 * ice_aq_download_pkg 1167 * @hw: pointer to the hardware structure 1168 * @pkg_buf: the package buffer to transfer 1169 * @buf_size: the size of the package buffer 1170 * @last_buf: last buffer indicator 1171 * @error_offset: returns error offset 1172 * @error_info: returns error information 1173 * @cd: pointer to command details structure or NULL 1174 * 1175 * Download Package (0x0C40) 1176 */ 1177 static int 1178 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1179 u16 buf_size, bool last_buf, u32 *error_offset, 1180 u32 *error_info, struct ice_sq_cd *cd) 1181 { 1182 struct ice_aqc_download_pkg *cmd; 1183 struct ice_aq_desc desc; 1184 int status; 1185 1186 if (error_offset) 1187 *error_offset = 0; 1188 if (error_info) 1189 *error_info = 0; 1190 1191 cmd = &desc.params.download_pkg; 1192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 1193 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1194 1195 if (last_buf) 1196 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1197 1198 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1199 if (status == -EIO) { 1200 /* Read error from buffer only when the FW returned an error */ 1201 struct ice_aqc_download_pkg_resp *resp; 1202 1203 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1204 if (error_offset) 1205 *error_offset = le32_to_cpu(resp->error_offset); 1206 if (error_info) 1207 *error_info = le32_to_cpu(resp->error_info); 1208 } 1209 1210 return status; 1211 } 1212 1213 /** 1214 * ice_get_pkg_seg_by_idx 1215 * @pkg_hdr: pointer to the package header to be searched 1216 * @idx: index of segment 1217 */ 1218 static struct ice_generic_seg_hdr * 1219 ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) 1220 { 1221 if (idx < le32_to_cpu(pkg_hdr->seg_count)) 1222 return (struct ice_generic_seg_hdr *) 1223 ((u8 *)pkg_hdr + 1224 le32_to_cpu(pkg_hdr->seg_offset[idx])); 1225 1226 return NULL; 1227 } 1228 1229 /** 1230 * ice_is_signing_seg_at_idx - determine if segment is a signing segment 1231 * @pkg_hdr: pointer to package header 1232 * @idx: segment index 1233 */ 1234 static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) 1235 { 1236 struct ice_generic_seg_hdr *seg; 1237 1238 seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1239 if (!seg) 1240 return false; 1241 1242 return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; 1243 } 1244 1245 /** 1246 * ice_is_signing_seg_type_at_idx 1247 * @pkg_hdr: pointer to package header 1248 * @idx: segment index 1249 * @seg_id: segment id that is expected 1250 * @sign_type: signing type 1251 * 1252 * Determine if a segment is a signing segment of the correct type 1253 */ 1254 static bool 1255 ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, 1256 u32 seg_id, u32 sign_type) 1257 { 1258 struct ice_sign_seg *seg; 1259 1260 if (!ice_is_signing_seg_at_idx(pkg_hdr, idx)) 1261 return false; 1262 1263 seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1264 1265 if (seg && le32_to_cpu(seg->seg_id) == seg_id && 1266 le32_to_cpu(seg->sign_type) == sign_type) 1267 return true; 1268 1269 return false; 1270 } 1271 1272 /** 1273 * ice_is_buffer_metadata - determine if package buffer is a metadata buffer 1274 * @buf: pointer to buffer header 1275 */ 1276 static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) 1277 { 1278 if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) 1279 return true; 1280 1281 return false; 1282 } 1283 1284 /** 1285 * ice_is_last_download_buffer 1286 * @buf: pointer to current buffer header 1287 * @idx: index of the buffer in the current sequence 1288 * @count: the buffer count in the current sequence 1289 * 1290 * Note: this routine should only be called if the buffer is not the last buffer 1291 */ 1292 static bool 1293 ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) 1294 { 1295 struct ice_buf *next_buf; 1296 1297 if ((idx + 1) == count) 1298 return true; 1299 1300 /* A set metadata flag in the next buffer will signal that the current 1301 * buffer will be the last buffer downloaded 1302 */ 1303 next_buf = ((struct ice_buf *)buf) + 1; 1304 1305 return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); 1306 } 1307 1308 /** 1309 * ice_dwnld_cfg_bufs_no_lock 1310 * @hw: pointer to the hardware structure 1311 * @bufs: pointer to an array of buffers 1312 * @start: buffer index of first buffer to download 1313 * @count: the number of buffers to download 1314 * @indicate_last: if true, then set last buffer flag on last buffer download 1315 * 1316 * Downloads package configuration buffers to the firmware. Metadata buffers 1317 * are skipped, and the first metadata buffer found indicates that the rest 1318 * of the buffers are all metadata buffers. 1319 */ 1320 static enum ice_ddp_state 1321 ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, 1322 u32 count, bool indicate_last) 1323 { 1324 enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; 1325 struct ice_buf_hdr *bh; 1326 enum ice_aq_err err; 1327 u32 offset, info, i; 1328 1329 if (!bufs || !count) 1330 return ICE_DDP_PKG_ERR; 1331 1332 /* If the first buffer's first section has its metadata bit set 1333 * then there are no buffers to be downloaded, and the operation is 1334 * considered a success. 1335 */ 1336 bh = (struct ice_buf_hdr *)(bufs + start); 1337 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) 1338 return ICE_DDP_PKG_SUCCESS; 1339 1340 for (i = 0; i < count; i++) { 1341 bool last = false; 1342 int status; 1343 1344 bh = (struct ice_buf_hdr *)(bufs + start + i); 1345 1346 if (indicate_last) 1347 last = ice_is_last_download_buffer(bh, i, count); 1348 1349 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, 1350 &offset, &info, NULL); 1351 1352 /* Save AQ status from download package */ 1353 if (status) { 1354 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", 1355 status, offset, info); 1356 err = hw->adminq.sq_last_status; 1357 state = ice_map_aq_err_to_ddp_state(err); 1358 break; 1359 } 1360 1361 if (last) 1362 break; 1363 } 1364 1365 return state; 1366 } 1367 1368 /** 1369 * ice_download_pkg_sig_seg - download a signature segment 1370 * @hw: pointer to the hardware structure 1371 * @seg: pointer to signature segment 1372 */ 1373 static enum ice_ddp_state 1374 ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) 1375 { 1376 return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, 1377 le32_to_cpu(seg->buf_tbl.buf_count), 1378 false); 1379 } 1380 1381 /** 1382 * ice_download_pkg_config_seg - download a config segment 1383 * @hw: pointer to the hardware structure 1384 * @pkg_hdr: pointer to package header 1385 * @idx: segment index 1386 * @start: starting buffer 1387 * @count: buffer count 1388 * 1389 * Note: idx must reference a ICE segment 1390 */ 1391 static enum ice_ddp_state 1392 ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, 1393 u32 idx, u32 start, u32 count) 1394 { 1395 struct ice_buf_table *bufs; 1396 struct ice_seg *seg; 1397 u32 buf_count; 1398 1399 seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1400 if (!seg) 1401 return ICE_DDP_PKG_ERR; 1402 1403 bufs = ice_find_buf_table(seg); 1404 buf_count = le32_to_cpu(bufs->buf_count); 1405 1406 if (start >= buf_count || start + count > buf_count) 1407 return ICE_DDP_PKG_ERR; 1408 1409 return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, 1410 true); 1411 } 1412 1413 /** 1414 * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment 1415 * @hw: pointer to the hardware structure 1416 * @pkg_hdr: pointer to package header 1417 * @idx: segment index (must be a signature segment) 1418 * 1419 * Note: idx must reference a signature segment 1420 */ 1421 static enum ice_ddp_state 1422 ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, 1423 u32 idx) 1424 { 1425 enum ice_ddp_state state; 1426 struct ice_sign_seg *seg; 1427 u32 conf_idx; 1428 u32 start; 1429 u32 count; 1430 1431 seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1432 if (!seg) { 1433 state = ICE_DDP_PKG_ERR; 1434 goto exit; 1435 } 1436 1437 count = le32_to_cpu(seg->signed_buf_count); 1438 state = ice_download_pkg_sig_seg(hw, seg); 1439 if (state || !count) 1440 goto exit; 1441 1442 conf_idx = le32_to_cpu(seg->signed_seg_idx); 1443 start = le32_to_cpu(seg->signed_buf_start); 1444 1445 state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, 1446 count); 1447 1448 exit: 1449 return state; 1450 } 1451 1452 /** 1453 * ice_match_signing_seg - determine if a matching signing segment exists 1454 * @pkg_hdr: pointer to package header 1455 * @seg_id: segment id that is expected 1456 * @sign_type: signing type 1457 */ 1458 static bool 1459 ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) 1460 { 1461 u32 i; 1462 1463 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1464 if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, 1465 sign_type)) 1466 return true; 1467 } 1468 1469 return false; 1470 } 1471 1472 /** 1473 * ice_post_dwnld_pkg_actions - perform post download package actions 1474 * @hw: pointer to the hardware structure 1475 */ 1476 static enum ice_ddp_state 1477 ice_post_dwnld_pkg_actions(struct ice_hw *hw) 1478 { 1479 int status; 1480 1481 status = ice_set_vlan_mode(hw); 1482 if (status) { 1483 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", 1484 status); 1485 return ICE_DDP_PKG_ERR; 1486 } 1487 1488 return ICE_DDP_PKG_SUCCESS; 1489 } 1490 1491 /** 1492 * ice_download_pkg_with_sig_seg 1493 * @hw: pointer to the hardware structure 1494 * @pkg_hdr: pointer to package header 1495 * 1496 * Handles the download of a complete package. 1497 */ 1498 static enum ice_ddp_state 1499 ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1500 { 1501 enum ice_aq_err aq_err = hw->adminq.sq_last_status; 1502 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 1503 int status; 1504 u32 i; 1505 1506 ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); 1507 ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); 1508 1509 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1510 if (status) { 1511 if (status == -EALREADY) 1512 state = ICE_DDP_PKG_ALREADY_LOADED; 1513 else 1514 state = ice_map_aq_err_to_ddp_state(aq_err); 1515 return state; 1516 } 1517 1518 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1519 if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, 1520 hw->pkg_sign_type)) 1521 continue; 1522 1523 state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); 1524 if (state) 1525 break; 1526 } 1527 1528 if (!state) 1529 state = ice_post_dwnld_pkg_actions(hw); 1530 1531 ice_release_global_cfg_lock(hw); 1532 1533 return state; 1534 } 1535 1536 /** 1537 * ice_dwnld_cfg_bufs 1538 * @hw: pointer to the hardware structure 1539 * @bufs: pointer to an array of buffers 1540 * @count: the number of buffers in the array 1541 * 1542 * Obtains global config lock and downloads the package configuration buffers 1543 * to the firmware. 1544 */ 1545 static enum ice_ddp_state 1546 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1547 { 1548 enum ice_ddp_state state; 1549 struct ice_buf_hdr *bh; 1550 int status; 1551 1552 if (!bufs || !count) 1553 return ICE_DDP_PKG_ERR; 1554 1555 /* If the first buffer's first section has its metadata bit set 1556 * then there are no buffers to be downloaded, and the operation is 1557 * considered a success. 1558 */ 1559 bh = (struct ice_buf_hdr *)bufs; 1560 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) 1561 return ICE_DDP_PKG_SUCCESS; 1562 1563 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1564 if (status) { 1565 if (status == -EALREADY) 1566 return ICE_DDP_PKG_ALREADY_LOADED; 1567 return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); 1568 } 1569 1570 state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); 1571 if (!state) 1572 state = ice_post_dwnld_pkg_actions(hw); 1573 1574 ice_release_global_cfg_lock(hw); 1575 1576 return state; 1577 } 1578 1579 /** 1580 * ice_download_pkg_without_sig_seg 1581 * @hw: pointer to the hardware structure 1582 * @ice_seg: pointer to the segment of the package to be downloaded 1583 * 1584 * Handles the download of a complete package without signature segment. 1585 */ 1586 static enum ice_ddp_state 1587 ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) 1588 { 1589 struct ice_buf_table *ice_buf_tbl; 1590 1591 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", 1592 ice_seg->hdr.seg_format_ver.major, 1593 ice_seg->hdr.seg_format_ver.minor, 1594 ice_seg->hdr.seg_format_ver.update, 1595 ice_seg->hdr.seg_format_ver.draft); 1596 1597 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 1598 le32_to_cpu(ice_seg->hdr.seg_type), 1599 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); 1600 1601 ice_buf_tbl = ice_find_buf_table(ice_seg); 1602 1603 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", 1604 le32_to_cpu(ice_buf_tbl->buf_count)); 1605 1606 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, 1607 le32_to_cpu(ice_buf_tbl->buf_count)); 1608 } 1609 1610 /** 1611 * ice_download_pkg 1612 * @hw: pointer to the hardware structure 1613 * @pkg_hdr: pointer to package header 1614 * @ice_seg: pointer to the segment of the package to be downloaded 1615 * 1616 * Handles the download of a complete package. 1617 */ 1618 static enum ice_ddp_state 1619 ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, 1620 struct ice_seg *ice_seg) 1621 { 1622 enum ice_ddp_state state; 1623 1624 if (hw->pkg_has_signing_seg) 1625 state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); 1626 else 1627 state = ice_download_pkg_without_sig_seg(hw, ice_seg); 1628 1629 ice_post_pkg_dwnld_vlan_mode_cfg(hw); 1630 1631 return state; 1632 } 1633 1634 /** 1635 * ice_aq_get_pkg_info_list 1636 * @hw: pointer to the hardware structure 1637 * @pkg_info: the buffer which will receive the information list 1638 * @buf_size: the size of the pkg_info information buffer 1639 * @cd: pointer to command details structure or NULL 1640 * 1641 * Get Package Info List (0x0C43) 1642 */ 1643 static int ice_aq_get_pkg_info_list(struct ice_hw *hw, 1644 struct ice_aqc_get_pkg_info_resp *pkg_info, 1645 u16 buf_size, struct ice_sq_cd *cd) 1646 { 1647 struct ice_aq_desc desc; 1648 1649 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); 1650 1651 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); 1652 } 1653 1654 /** 1655 * ice_aq_update_pkg 1656 * @hw: pointer to the hardware structure 1657 * @pkg_buf: the package cmd buffer 1658 * @buf_size: the size of the package cmd buffer 1659 * @last_buf: last buffer indicator 1660 * @error_offset: returns error offset 1661 * @error_info: returns error information 1662 * @cd: pointer to command details structure or NULL 1663 * 1664 * Update Package (0x0C42) 1665 */ 1666 static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1667 u16 buf_size, bool last_buf, u32 *error_offset, 1668 u32 *error_info, struct ice_sq_cd *cd) 1669 { 1670 struct ice_aqc_download_pkg *cmd; 1671 struct ice_aq_desc desc; 1672 int status; 1673 1674 if (error_offset) 1675 *error_offset = 0; 1676 if (error_info) 1677 *error_info = 0; 1678 1679 cmd = &desc.params.download_pkg; 1680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); 1681 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1682 1683 if (last_buf) 1684 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1685 1686 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1687 if (status == -EIO) { 1688 /* Read error from buffer only when the FW returned an error */ 1689 struct ice_aqc_download_pkg_resp *resp; 1690 1691 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1692 if (error_offset) 1693 *error_offset = le32_to_cpu(resp->error_offset); 1694 if (error_info) 1695 *error_info = le32_to_cpu(resp->error_info); 1696 } 1697 1698 return status; 1699 } 1700 1701 /** 1702 * ice_aq_upload_section 1703 * @hw: pointer to the hardware structure 1704 * @pkg_buf: the package buffer which will receive the section 1705 * @buf_size: the size of the package buffer 1706 * @cd: pointer to command details structure or NULL 1707 * 1708 * Upload Section (0x0C41) 1709 */ 1710 int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1711 u16 buf_size, struct ice_sq_cd *cd) 1712 { 1713 struct ice_aq_desc desc; 1714 1715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); 1716 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1717 1718 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1719 } 1720 1721 /** 1722 * ice_update_pkg_no_lock 1723 * @hw: pointer to the hardware structure 1724 * @bufs: pointer to an array of buffers 1725 * @count: the number of buffers in the array 1726 */ 1727 int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1728 { 1729 int status = 0; 1730 u32 i; 1731 1732 for (i = 0; i < count; i++) { 1733 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); 1734 bool last = ((i + 1) == count); 1735 u32 offset, info; 1736 1737 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), 1738 last, &offset, &info, NULL); 1739 1740 if (status) { 1741 ice_debug(hw, ICE_DBG_PKG, 1742 "Update pkg failed: err %d off %d inf %d\n", 1743 status, offset, info); 1744 break; 1745 } 1746 } 1747 1748 return status; 1749 } 1750 1751 /** 1752 * ice_update_pkg 1753 * @hw: pointer to the hardware structure 1754 * @bufs: pointer to an array of buffers 1755 * @count: the number of buffers in the array 1756 * 1757 * Obtains change lock and updates package. 1758 */ 1759 int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1760 { 1761 int status; 1762 1763 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 1764 if (status) 1765 return status; 1766 1767 status = ice_update_pkg_no_lock(hw, bufs, count); 1768 1769 ice_release_change_lock(hw); 1770 1771 return status; 1772 } 1773 1774 /** 1775 * ice_find_seg_in_pkg 1776 * @hw: pointer to the hardware structure 1777 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) 1778 * @pkg_hdr: pointer to the package header to be searched 1779 * 1780 * This function searches a package file for a particular segment type. On 1781 * success it returns a pointer to the segment header, otherwise it will 1782 * return NULL. 1783 */ 1784 static struct ice_generic_seg_hdr * 1785 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 1786 struct ice_pkg_hdr *pkg_hdr) 1787 { 1788 u32 i; 1789 1790 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 1791 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, 1792 pkg_hdr->pkg_format_ver.update, 1793 pkg_hdr->pkg_format_ver.draft); 1794 1795 /* Search all package segments for the requested segment type */ 1796 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1797 struct ice_generic_seg_hdr *seg; 1798 1799 seg = (struct ice_generic_seg_hdr 1800 *)((u8 *)pkg_hdr + 1801 le32_to_cpu(pkg_hdr->seg_offset[i])); 1802 1803 if (le32_to_cpu(seg->seg_type) == seg_type) 1804 return seg; 1805 } 1806 1807 return NULL; 1808 } 1809 1810 /** 1811 * ice_has_signing_seg - determine if package has a signing segment 1812 * @hw: pointer to the hardware structure 1813 * @pkg_hdr: pointer to the driver's package hdr 1814 */ 1815 static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1816 { 1817 struct ice_generic_seg_hdr *seg_hdr; 1818 1819 seg_hdr = (struct ice_generic_seg_hdr *) 1820 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); 1821 1822 return seg_hdr ? true : false; 1823 } 1824 1825 /** 1826 * ice_get_pkg_segment_id - get correct package segment id, based on device 1827 * @mac_type: MAC type of the device 1828 */ 1829 static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) 1830 { 1831 u32 seg_id; 1832 1833 switch (mac_type) { 1834 case ICE_MAC_E830: 1835 seg_id = SEGMENT_TYPE_ICE_E830; 1836 break; 1837 case ICE_MAC_GENERIC: 1838 case ICE_MAC_GENERIC_3K_E825: 1839 default: 1840 seg_id = SEGMENT_TYPE_ICE_E810; 1841 break; 1842 } 1843 1844 return seg_id; 1845 } 1846 1847 /** 1848 * ice_get_pkg_sign_type - get package segment sign type, based on device 1849 * @mac_type: MAC type of the device 1850 */ 1851 static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) 1852 { 1853 u32 sign_type; 1854 1855 switch (mac_type) { 1856 case ICE_MAC_E830: 1857 sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB; 1858 break; 1859 case ICE_MAC_GENERIC_3K_E825: 1860 sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825; 1861 break; 1862 case ICE_MAC_GENERIC: 1863 default: 1864 sign_type = SEGMENT_SIGN_TYPE_RSA2K; 1865 break; 1866 } 1867 1868 return sign_type; 1869 } 1870 1871 /** 1872 * ice_get_signing_req - get correct package requirements, based on device 1873 * @hw: pointer to the hardware structure 1874 */ 1875 static void ice_get_signing_req(struct ice_hw *hw) 1876 { 1877 hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); 1878 hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); 1879 } 1880 1881 /** 1882 * ice_init_pkg_info 1883 * @hw: pointer to the hardware structure 1884 * @pkg_hdr: pointer to the driver's package hdr 1885 * 1886 * Saves off the package details into the HW structure. 1887 */ 1888 static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, 1889 struct ice_pkg_hdr *pkg_hdr) 1890 { 1891 struct ice_generic_seg_hdr *seg_hdr; 1892 1893 if (!pkg_hdr) 1894 return ICE_DDP_PKG_ERR; 1895 1896 hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); 1897 ice_get_signing_req(hw); 1898 1899 ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", 1900 hw->pkg_seg_id); 1901 1902 seg_hdr = (struct ice_generic_seg_hdr *) 1903 ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); 1904 if (seg_hdr) { 1905 struct ice_meta_sect *meta; 1906 struct ice_pkg_enum state; 1907 1908 memset(&state, 0, sizeof(state)); 1909 1910 /* Get package information from the Metadata Section */ 1911 meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, 1912 ICE_SID_METADATA); 1913 if (!meta) { 1914 ice_debug(hw, ICE_DBG_INIT, 1915 "Did not find ice metadata section in package\n"); 1916 return ICE_DDP_PKG_INVALID_FILE; 1917 } 1918 1919 hw->pkg_ver = meta->ver; 1920 memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); 1921 1922 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", 1923 meta->ver.major, meta->ver.minor, meta->ver.update, 1924 meta->ver.draft, meta->name); 1925 1926 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; 1927 memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); 1928 1929 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", 1930 seg_hdr->seg_format_ver.major, 1931 seg_hdr->seg_format_ver.minor, 1932 seg_hdr->seg_format_ver.update, 1933 seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); 1934 } else { 1935 ice_debug(hw, ICE_DBG_INIT, 1936 "Did not find ice segment in driver package\n"); 1937 return ICE_DDP_PKG_INVALID_FILE; 1938 } 1939 1940 return ICE_DDP_PKG_SUCCESS; 1941 } 1942 1943 /** 1944 * ice_get_pkg_info 1945 * @hw: pointer to the hardware structure 1946 * 1947 * Store details of the package currently loaded in HW into the HW structure. 1948 */ 1949 static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) 1950 { 1951 DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info, 1952 ICE_PKG_CNT); 1953 u16 size = __struct_size(pkg_info); 1954 u32 i; 1955 1956 if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) 1957 return ICE_DDP_PKG_ERR; 1958 1959 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { 1960 #define ICE_PKG_FLAG_COUNT 4 1961 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; 1962 u8 place = 0; 1963 1964 if (pkg_info->pkg_info[i].is_active) { 1965 flags[place++] = 'A'; 1966 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 1967 hw->active_track_id = 1968 le32_to_cpu(pkg_info->pkg_info[i].track_id); 1969 memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, 1970 sizeof(pkg_info->pkg_info[i].name)); 1971 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 1972 } 1973 if (pkg_info->pkg_info[i].is_active_at_boot) 1974 flags[place++] = 'B'; 1975 if (pkg_info->pkg_info[i].is_modified) 1976 flags[place++] = 'M'; 1977 if (pkg_info->pkg_info[i].is_in_nvm) 1978 flags[place++] = 'N'; 1979 1980 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, 1981 pkg_info->pkg_info[i].ver.major, 1982 pkg_info->pkg_info[i].ver.minor, 1983 pkg_info->pkg_info[i].ver.update, 1984 pkg_info->pkg_info[i].ver.draft, 1985 pkg_info->pkg_info[i].name, flags); 1986 } 1987 1988 return ICE_DDP_PKG_SUCCESS; 1989 } 1990 1991 /** 1992 * ice_chk_pkg_compat 1993 * @hw: pointer to the hardware structure 1994 * @ospkg: pointer to the package hdr 1995 * @seg: pointer to the package segment hdr 1996 * 1997 * This function checks the package version compatibility with driver and NVM 1998 */ 1999 static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, 2000 struct ice_pkg_hdr *ospkg, 2001 struct ice_seg **seg) 2002 { 2003 DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info, 2004 ICE_PKG_CNT); 2005 u16 size = __struct_size(pkg); 2006 enum ice_ddp_state state; 2007 u32 i; 2008 2009 /* Check package version compatibility */ 2010 state = ice_chk_pkg_version(&hw->pkg_ver); 2011 if (state) { 2012 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); 2013 return state; 2014 } 2015 2016 /* find ICE segment in given package */ 2017 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, 2018 ospkg); 2019 if (!*seg) { 2020 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 2021 return ICE_DDP_PKG_INVALID_FILE; 2022 } 2023 2024 /* Check if FW is compatible with the OS package */ 2025 if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) 2026 return ICE_DDP_PKG_LOAD_ERROR; 2027 2028 for (i = 0; i < le32_to_cpu(pkg->count); i++) { 2029 /* loop till we find the NVM package */ 2030 if (!pkg->pkg_info[i].is_in_nvm) 2031 continue; 2032 if ((*seg)->hdr.seg_format_ver.major != 2033 pkg->pkg_info[i].ver.major || 2034 (*seg)->hdr.seg_format_ver.minor > 2035 pkg->pkg_info[i].ver.minor) { 2036 state = ICE_DDP_PKG_FW_MISMATCH; 2037 ice_debug(hw, ICE_DBG_INIT, 2038 "OS package is not compatible with NVM.\n"); 2039 } 2040 /* done processing NVM package so break */ 2041 break; 2042 } 2043 2044 return state; 2045 } 2046 2047 /** 2048 * ice_init_pkg_hints 2049 * @hw: pointer to the HW structure 2050 * @ice_seg: pointer to the segment of the package scan (non-NULL) 2051 * 2052 * This function will scan the package and save off relevant information 2053 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL 2054 * since the first call to ice_enum_labels requires a pointer to an actual 2055 * ice_seg structure. 2056 */ 2057 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) 2058 { 2059 struct ice_pkg_enum state; 2060 char *label_name; 2061 u16 val; 2062 int i; 2063 2064 memset(&hw->tnl, 0, sizeof(hw->tnl)); 2065 memset(&state, 0, sizeof(state)); 2066 2067 if (!ice_seg) 2068 return; 2069 2070 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, 2071 &val); 2072 2073 while (label_name) { 2074 if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) 2075 /* check for a tunnel entry */ 2076 ice_add_tunnel_hint(hw, label_name, val); 2077 2078 /* check for a dvm mode entry */ 2079 else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) 2080 ice_add_dvm_hint(hw, val, true); 2081 2082 /* check for a svm mode entry */ 2083 else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) 2084 ice_add_dvm_hint(hw, val, false); 2085 2086 label_name = ice_enum_labels(NULL, 0, &state, &val); 2087 } 2088 2089 /* Cache the appropriate boost TCAM entry pointers for tunnels */ 2090 for (i = 0; i < hw->tnl.count; i++) { 2091 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, 2092 &hw->tnl.tbl[i].boost_entry); 2093 if (hw->tnl.tbl[i].boost_entry) { 2094 hw->tnl.tbl[i].valid = true; 2095 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) 2096 hw->tnl.valid_count[hw->tnl.tbl[i].type]++; 2097 } 2098 } 2099 2100 /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ 2101 for (i = 0; i < hw->dvm_upd.count; i++) 2102 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, 2103 &hw->dvm_upd.tbl[i].boost_entry); 2104 } 2105 2106 /** 2107 * ice_fill_hw_ptype - fill the enabled PTYPE bit information 2108 * @hw: pointer to the HW structure 2109 */ 2110 static void ice_fill_hw_ptype(struct ice_hw *hw) 2111 { 2112 struct ice_marker_ptype_tcam_entry *tcam; 2113 struct ice_seg *seg = hw->seg; 2114 struct ice_pkg_enum state; 2115 2116 bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); 2117 if (!seg) 2118 return; 2119 2120 memset(&state, 0, sizeof(state)); 2121 2122 do { 2123 tcam = ice_pkg_enum_entry(seg, &state, 2124 ICE_SID_RXPARSER_MARKER_PTYPE, NULL, 2125 ice_marker_ptype_tcam_handler); 2126 if (tcam && 2127 le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && 2128 le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) 2129 set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); 2130 2131 seg = NULL; 2132 } while (tcam); 2133 } 2134 2135 /** 2136 * ice_init_pkg - initialize/download package 2137 * @hw: pointer to the hardware structure 2138 * @buf: pointer to the package buffer 2139 * @len: size of the package buffer 2140 * 2141 * This function initializes a package. The package contains HW tables 2142 * required to do packet processing. First, the function extracts package 2143 * information such as version. Then it finds the ice configuration segment 2144 * within the package; this function then saves a copy of the segment pointer 2145 * within the supplied package buffer. Next, the function will cache any hints 2146 * from the package, followed by downloading the package itself. Note, that if 2147 * a previous PF driver has already downloaded the package successfully, then 2148 * the current driver will not have to download the package again. 2149 * 2150 * The local package contents will be used to query default behavior and to 2151 * update specific sections of the HW's version of the package (e.g. to update 2152 * the parse graph to understand new protocols). 2153 * 2154 * This function stores a pointer to the package buffer memory, and it is 2155 * expected that the supplied buffer will not be freed immediately. If the 2156 * package buffer needs to be freed, such as when read from a file, use 2157 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this 2158 * case. 2159 */ 2160 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) 2161 { 2162 bool already_loaded = false; 2163 enum ice_ddp_state state; 2164 struct ice_pkg_hdr *pkg; 2165 struct ice_seg *seg; 2166 2167 if (!buf || !len) 2168 return ICE_DDP_PKG_ERR; 2169 2170 pkg = (struct ice_pkg_hdr *)buf; 2171 state = ice_verify_pkg(pkg, len); 2172 if (state) { 2173 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", 2174 state); 2175 return state; 2176 } 2177 2178 /* initialize package info */ 2179 state = ice_init_pkg_info(hw, pkg); 2180 if (state) 2181 return state; 2182 2183 /* must be a matching segment */ 2184 if (hw->pkg_has_signing_seg && 2185 !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type)) 2186 return ICE_DDP_PKG_ERR; 2187 2188 /* before downloading the package, check package version for 2189 * compatibility with driver 2190 */ 2191 state = ice_chk_pkg_compat(hw, pkg, &seg); 2192 if (state) 2193 return state; 2194 2195 /* initialize package hints and then download package */ 2196 ice_init_pkg_hints(hw, seg); 2197 state = ice_download_pkg(hw, pkg, seg); 2198 if (state == ICE_DDP_PKG_ALREADY_LOADED) { 2199 ice_debug(hw, ICE_DBG_INIT, 2200 "package previously loaded - no work.\n"); 2201 already_loaded = true; 2202 } 2203 2204 /* Get information on the package currently loaded in HW, then make sure 2205 * the driver is compatible with this version. 2206 */ 2207 if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { 2208 state = ice_get_pkg_info(hw); 2209 if (!state) 2210 state = ice_get_ddp_pkg_state(hw, already_loaded); 2211 } 2212 2213 if (ice_is_init_pkg_successful(state)) { 2214 hw->seg = seg; 2215 /* on successful package download update other required 2216 * registers to support the package and fill HW tables 2217 * with package content. 2218 */ 2219 ice_init_pkg_regs(hw); 2220 ice_fill_blk_tbls(hw); 2221 ice_fill_hw_ptype(hw); 2222 ice_get_prof_index_max(hw); 2223 } else { 2224 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state); 2225 } 2226 2227 return state; 2228 } 2229 2230 /** 2231 * ice_copy_and_init_pkg - initialize/download a copy of the package 2232 * @hw: pointer to the hardware structure 2233 * @buf: pointer to the package buffer 2234 * @len: size of the package buffer 2235 * 2236 * This function copies the package buffer, and then calls ice_init_pkg() to 2237 * initialize the copied package contents. 2238 * 2239 * The copying is necessary if the package buffer supplied is constant, or if 2240 * the memory may disappear shortly after calling this function. 2241 * 2242 * If the package buffer resides in the data segment and can be modified, the 2243 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). 2244 * 2245 * However, if the package buffer needs to be copied first, such as when being 2246 * read from a file, the caller should use ice_copy_and_init_pkg(). 2247 * 2248 * This function will first copy the package buffer, before calling 2249 * ice_init_pkg(). The caller is free to immediately destroy the original 2250 * package buffer, as the new copy will be managed by this function and 2251 * related routines. 2252 */ 2253 enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, 2254 u32 len) 2255 { 2256 enum ice_ddp_state state; 2257 u8 *buf_copy; 2258 2259 if (!buf || !len) 2260 return ICE_DDP_PKG_ERR; 2261 2262 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); 2263 2264 state = ice_init_pkg(hw, buf_copy, len); 2265 if (!ice_is_init_pkg_successful(state)) { 2266 /* Free the copy, since we failed to initialize the package */ 2267 devm_kfree(ice_hw_to_dev(hw), buf_copy); 2268 } else { 2269 /* Track the copied pkg so we can free it later */ 2270 hw->pkg_copy = buf_copy; 2271 hw->pkg_size = len; 2272 } 2273 2274 return state; 2275 } 2276 2277 /** 2278 * ice_get_set_tx_topo - get or set Tx topology 2279 * @hw: pointer to the HW struct 2280 * @buf: pointer to Tx topology buffer 2281 * @buf_size: buffer size 2282 * @cd: pointer to command details structure or NULL 2283 * @flags: pointer to descriptor flags 2284 * @set: 0-get, 1-set topology 2285 * 2286 * The function will get or set Tx topology 2287 * 2288 * Return: zero when set was successful, negative values otherwise. 2289 */ 2290 static int 2291 ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, 2292 struct ice_sq_cd *cd, u8 *flags, bool set) 2293 { 2294 struct ice_aqc_get_set_tx_topo *cmd; 2295 struct ice_aq_desc desc; 2296 int status; 2297 2298 cmd = &desc.params.get_set_tx_topo; 2299 if (set) { 2300 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); 2301 cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; 2302 /* requested to update a new topology, not a default topology */ 2303 if (buf) 2304 cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | 2305 ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; 2306 2307 if (ice_is_e825c(hw)) 2308 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2309 } else { 2310 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); 2311 cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; 2312 } 2313 2314 if (!ice_is_e825c(hw)) 2315 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2316 2317 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2318 if (status) 2319 return status; 2320 /* read the return flag values (first byte) for get operation */ 2321 if (!set && flags) 2322 *flags = desc.params.get_set_tx_topo.set_flags; 2323 2324 return 0; 2325 } 2326 2327 /** 2328 * ice_cfg_tx_topo - Initialize new Tx topology if available 2329 * @hw: pointer to the HW struct 2330 * @buf: pointer to Tx topology buffer 2331 * @len: buffer size 2332 * 2333 * The function will apply the new Tx topology from the package buffer 2334 * if available. 2335 * 2336 * Return: zero when update was successful, negative values otherwise. 2337 */ 2338 int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) 2339 { 2340 u8 *current_topo, *new_topo = NULL; 2341 struct ice_run_time_cfg_seg *seg; 2342 struct ice_buf_hdr *section; 2343 struct ice_pkg_hdr *pkg_hdr; 2344 enum ice_ddp_state state; 2345 u16 offset, size = 0; 2346 u32 reg = 0; 2347 int status; 2348 u8 flags; 2349 2350 if (!buf || !len) 2351 return -EINVAL; 2352 2353 /* Does FW support new Tx topology mode ? */ 2354 if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { 2355 ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); 2356 return -EOPNOTSUPP; 2357 } 2358 2359 current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2360 if (!current_topo) 2361 return -ENOMEM; 2362 2363 /* Get the current Tx topology */ 2364 status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, 2365 &flags, false); 2366 2367 kfree(current_topo); 2368 2369 if (status) { 2370 ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); 2371 return status; 2372 } 2373 2374 /* Is default topology already applied ? */ 2375 if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && 2376 hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) { 2377 ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n"); 2378 return -EEXIST; 2379 } 2380 2381 /* Is new topology already applied ? */ 2382 if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && 2383 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { 2384 ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n"); 2385 return -EEXIST; 2386 } 2387 2388 /* Setting topology already issued? */ 2389 if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { 2390 ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n"); 2391 /* Add a small delay before exiting */ 2392 msleep(2000); 2393 return -EEXIST; 2394 } 2395 2396 /* Change the topology from new to default (5 to 9) */ 2397 if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && 2398 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { 2399 ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); 2400 goto update_topo; 2401 } 2402 2403 pkg_hdr = (struct ice_pkg_hdr *)buf; 2404 state = ice_verify_pkg(pkg_hdr, len); 2405 if (state) { 2406 ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n", 2407 state); 2408 return -EIO; 2409 } 2410 2411 /* Find runtime configuration segment */ 2412 seg = (struct ice_run_time_cfg_seg *) 2413 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); 2414 if (!seg) { 2415 ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); 2416 return -EIO; 2417 } 2418 2419 if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { 2420 ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", 2421 seg->buf_table.buf_count); 2422 return -EIO; 2423 } 2424 2425 section = ice_pkg_val_buf(seg->buf_table.buf_array); 2426 if (!section || le32_to_cpu(section->section_entry[0].type) != 2427 ICE_SID_TX_5_LAYER_TOPO) { 2428 ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); 2429 return -EIO; 2430 } 2431 2432 size = le16_to_cpu(section->section_entry[0].size); 2433 offset = le16_to_cpu(section->section_entry[0].offset); 2434 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { 2435 ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); 2436 return -EIO; 2437 } 2438 2439 /* Make sure the section fits in the buffer */ 2440 if (offset + size > ICE_PKG_BUF_SIZE) { 2441 ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); 2442 return -EIO; 2443 } 2444 2445 /* Get the new topology buffer */ 2446 new_topo = ((u8 *)section) + offset; 2447 2448 update_topo: 2449 /* Acquire global lock to make sure that set topology issued 2450 * by one PF. 2451 */ 2452 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, 2453 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 2454 if (status) { 2455 ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); 2456 return status; 2457 } 2458 2459 /* Check if reset was triggered already. */ 2460 reg = rd32(hw, GLGEN_RSTAT); 2461 if (reg & GLGEN_RSTAT_DEVSTATE_M) { 2462 /* Reset is in progress, re-init the HW again */ 2463 ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n"); 2464 ice_check_reset(hw); 2465 return 0; 2466 } 2467 2468 /* Set new topology */ 2469 status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); 2470 if (status) { 2471 ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n"); 2472 return status; 2473 } 2474 2475 /* New topology is updated, delay 1 second before issuing the CORER */ 2476 msleep(1000); 2477 ice_reset(hw, ICE_RESET_CORER); 2478 /* CORER will clear the global lock, so no explicit call 2479 * required for release. 2480 */ 2481 2482 return 0; 2483 } 2484