1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2022, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice.h" 6 #include "ice_ddp.h" 7 8 /* For supporting double VLAN mode, it is necessary to enable or disable certain 9 * boost tcam entries. The metadata labels names that match the following 10 * prefixes will be saved to allow enabling double VLAN mode. 11 */ 12 #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ 13 #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ 14 15 /* To support tunneling entries by PF, the package will append the PF number to 16 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. 17 */ 18 #define ICE_TNL_PRE "TNL_" 19 static const struct ice_tunnel_type_scan tnls[] = { 20 { TNL_VXLAN, "TNL_VXLAN_PF" }, 21 { TNL_GENEVE, "TNL_GENEVE_PF" }, 22 { TNL_LAST, "" } 23 }; 24 25 /** 26 * ice_verify_pkg - verify package 27 * @pkg: pointer to the package buffer 28 * @len: size of the package buffer 29 * 30 * Verifies various attributes of the package file, including length, format 31 * version, and the requirement of at least one segment. 32 */ 33 static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 34 { 35 u32 seg_count; 36 u32 i; 37 38 if (len < struct_size(pkg, seg_offset, 1)) 39 return ICE_DDP_PKG_INVALID_FILE; 40 41 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || 42 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || 43 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || 44 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) 45 return ICE_DDP_PKG_INVALID_FILE; 46 47 /* pkg must have at least one segment */ 48 seg_count = le32_to_cpu(pkg->seg_count); 49 if (seg_count < 1) 50 return ICE_DDP_PKG_INVALID_FILE; 51 52 /* make sure segment array fits in package length */ 53 if (len < struct_size(pkg, seg_offset, seg_count)) 54 return ICE_DDP_PKG_INVALID_FILE; 55 56 /* all segments must fit within length */ 57 for (i = 0; i < seg_count; i++) { 58 u32 off = le32_to_cpu(pkg->seg_offset[i]); 59 struct ice_generic_seg_hdr *seg; 60 61 /* segment header must fit */ 62 if (len < off + sizeof(*seg)) 63 return ICE_DDP_PKG_INVALID_FILE; 64 65 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); 66 67 /* segment body must fit */ 68 if (len < off + le32_to_cpu(seg->seg_size)) 69 return ICE_DDP_PKG_INVALID_FILE; 70 } 71 72 return ICE_DDP_PKG_SUCCESS; 73 } 74 75 /** 76 * ice_free_seg - free package segment pointer 77 * @hw: pointer to the hardware structure 78 * 79 * Frees the package segment pointer in the proper manner, depending on if the 80 * segment was allocated or just the passed in pointer was stored. 81 */ 82 void ice_free_seg(struct ice_hw *hw) 83 { 84 if (hw->pkg_copy) { 85 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); 86 hw->pkg_copy = NULL; 87 hw->pkg_size = 0; 88 } 89 hw->seg = NULL; 90 } 91 92 /** 93 * ice_chk_pkg_version - check package version for compatibility with driver 94 * @pkg_ver: pointer to a version structure to check 95 * 96 * Check to make sure that the package about to be downloaded is compatible with 97 * the driver. To be compatible, the major and minor components of the package 98 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR 99 * definitions. 100 */ 101 static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) 102 { 103 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || 104 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && 105 pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) 106 return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; 107 else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || 108 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && 109 pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) 110 return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; 111 112 return ICE_DDP_PKG_SUCCESS; 113 } 114 115 /** 116 * ice_pkg_val_buf 117 * @buf: pointer to the ice buffer 118 * 119 * This helper function validates a buffer's header. 120 */ 121 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 122 { 123 struct ice_buf_hdr *hdr; 124 u16 section_count; 125 u16 data_end; 126 127 hdr = (struct ice_buf_hdr *)buf->buf; 128 /* verify data */ 129 section_count = le16_to_cpu(hdr->section_count); 130 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) 131 return NULL; 132 133 data_end = le16_to_cpu(hdr->data_end); 134 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) 135 return NULL; 136 137 return hdr; 138 } 139 140 /** 141 * ice_find_buf_table 142 * @ice_seg: pointer to the ice segment 143 * 144 * Returns the address of the buffer table within the ice segment. 145 */ 146 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) 147 { 148 struct ice_nvm_table *nvms = (struct ice_nvm_table *) 149 (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); 150 151 return (__force struct ice_buf_table *)(nvms->vers + 152 le32_to_cpu(nvms->table_count)); 153 } 154 155 /** 156 * ice_pkg_enum_buf 157 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 158 * @state: pointer to the enum state 159 * 160 * This function will enumerate all the buffers in the ice segment. The first 161 * call is made with the ice_seg parameter non-NULL; on subsequent calls, 162 * ice_seg is set to NULL which continues the enumeration. When the function 163 * returns a NULL pointer, then the end of the buffers has been reached, or an 164 * unexpected value has been detected (for example an invalid section count or 165 * an invalid buffer end value). 166 */ 167 static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, 168 struct ice_pkg_enum *state) 169 { 170 if (ice_seg) { 171 state->buf_table = ice_find_buf_table(ice_seg); 172 if (!state->buf_table) 173 return NULL; 174 175 state->buf_idx = 0; 176 return ice_pkg_val_buf(state->buf_table->buf_array); 177 } 178 179 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) 180 return ice_pkg_val_buf(state->buf_table->buf_array + 181 state->buf_idx); 182 else 183 return NULL; 184 } 185 186 /** 187 * ice_pkg_advance_sect 188 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 189 * @state: pointer to the enum state 190 * 191 * This helper function will advance the section within the ice segment, 192 * also advancing the buffer if needed. 193 */ 194 static bool ice_pkg_advance_sect(struct ice_seg *ice_seg, 195 struct ice_pkg_enum *state) 196 { 197 if (!ice_seg && !state->buf) 198 return false; 199 200 if (!ice_seg && state->buf) 201 if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) 202 return true; 203 204 state->buf = ice_pkg_enum_buf(ice_seg, state); 205 if (!state->buf) 206 return false; 207 208 /* start of new buffer, reset section index */ 209 state->sect_idx = 0; 210 return true; 211 } 212 213 /** 214 * ice_pkg_enum_section 215 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 216 * @state: pointer to the enum state 217 * @sect_type: section type to enumerate 218 * 219 * This function will enumerate all the sections of a particular type in the 220 * ice segment. The first call is made with the ice_seg parameter non-NULL; 221 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 222 * When the function returns a NULL pointer, then the end of the matching 223 * sections has been reached. 224 */ 225 void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 226 u32 sect_type) 227 { 228 u16 offset, size; 229 230 if (ice_seg) 231 state->type = sect_type; 232 233 if (!ice_pkg_advance_sect(ice_seg, state)) 234 return NULL; 235 236 /* scan for next matching section */ 237 while (state->buf->section_entry[state->sect_idx].type != 238 cpu_to_le32(state->type)) 239 if (!ice_pkg_advance_sect(NULL, state)) 240 return NULL; 241 242 /* validate section */ 243 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 244 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) 245 return NULL; 246 247 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); 248 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) 249 return NULL; 250 251 /* make sure the section fits in the buffer */ 252 if (offset + size > ICE_PKG_BUF_SIZE) 253 return NULL; 254 255 state->sect_type = 256 le32_to_cpu(state->buf->section_entry[state->sect_idx].type); 257 258 /* calc pointer to this section */ 259 state->sect = 260 ((u8 *)state->buf) + 261 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 262 263 return state->sect; 264 } 265 266 /** 267 * ice_pkg_enum_entry 268 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 269 * @state: pointer to the enum state 270 * @sect_type: section type to enumerate 271 * @offset: pointer to variable that receives the offset in the table (optional) 272 * @handler: function that handles access to the entries into the section type 273 * 274 * This function will enumerate all the entries in particular section type in 275 * the ice segment. The first call is made with the ice_seg parameter non-NULL; 276 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 277 * When the function returns a NULL pointer, then the end of the entries has 278 * been reached. 279 * 280 * Since each section may have a different header and entry size, the handler 281 * function is needed to determine the number and location entries in each 282 * section. 283 * 284 * The offset parameter is optional, but should be used for sections that 285 * contain an offset for each section table. For such cases, the section handler 286 * function must return the appropriate offset + index to give the absolution 287 * offset for each entry. For example, if the base for a section's header 288 * indicates a base offset of 10, and the index for the entry is 2, then 289 * section handler function should set the offset to 10 + 2 = 12. 290 */ 291 static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, 292 struct ice_pkg_enum *state, u32 sect_type, 293 u32 *offset, 294 void *(*handler)(u32 sect_type, void *section, 295 u32 index, u32 *offset)) 296 { 297 void *entry; 298 299 if (ice_seg) { 300 if (!handler) 301 return NULL; 302 303 if (!ice_pkg_enum_section(ice_seg, state, sect_type)) 304 return NULL; 305 306 state->entry_idx = 0; 307 state->handler = handler; 308 } else { 309 state->entry_idx++; 310 } 311 312 if (!state->handler) 313 return NULL; 314 315 /* get entry */ 316 entry = state->handler(state->sect_type, state->sect, state->entry_idx, 317 offset); 318 if (!entry) { 319 /* end of a section, look for another section of this type */ 320 if (!ice_pkg_enum_section(NULL, state, 0)) 321 return NULL; 322 323 state->entry_idx = 0; 324 entry = state->handler(state->sect_type, state->sect, 325 state->entry_idx, offset); 326 } 327 328 return entry; 329 } 330 331 /** 332 * ice_sw_fv_handler 333 * @sect_type: section type 334 * @section: pointer to section 335 * @index: index of the field vector entry to be returned 336 * @offset: ptr to variable that receives the offset in the field vector table 337 * 338 * This is a callback function that can be passed to ice_pkg_enum_entry. 339 * This function treats the given section as of type ice_sw_fv_section and 340 * enumerates offset field. "offset" is an index into the field vector table. 341 */ 342 static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index, 343 u32 *offset) 344 { 345 struct ice_sw_fv_section *fv_section = section; 346 347 if (!section || sect_type != ICE_SID_FLD_VEC_SW) 348 return NULL; 349 if (index >= le16_to_cpu(fv_section->count)) 350 return NULL; 351 if (offset) 352 /* "index" passed in to this function is relative to a given 353 * 4k block. To get to the true index into the field vector 354 * table need to add the relative index to the base_offset 355 * field of this section 356 */ 357 *offset = le16_to_cpu(fv_section->base_offset) + index; 358 return fv_section->fv + index; 359 } 360 361 /** 362 * ice_get_prof_index_max - get the max profile index for used profile 363 * @hw: pointer to the HW struct 364 * 365 * Calling this function will get the max profile index for used profile 366 * and store the index number in struct ice_switch_info *switch_info 367 * in HW for following use. 368 */ 369 static int ice_get_prof_index_max(struct ice_hw *hw) 370 { 371 u16 prof_index = 0, j, max_prof_index = 0; 372 struct ice_pkg_enum state; 373 struct ice_seg *ice_seg; 374 bool flag = false; 375 struct ice_fv *fv; 376 u32 offset; 377 378 memset(&state, 0, sizeof(state)); 379 380 if (!hw->seg) 381 return -EINVAL; 382 383 ice_seg = hw->seg; 384 385 do { 386 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 387 &offset, ice_sw_fv_handler); 388 if (!fv) 389 break; 390 ice_seg = NULL; 391 392 /* in the profile that not be used, the prot_id is set to 0xff 393 * and the off is set to 0x1ff for all the field vectors. 394 */ 395 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 396 if (fv->ew[j].prot_id != ICE_PROT_INVALID || 397 fv->ew[j].off != ICE_FV_OFFSET_INVAL) 398 flag = true; 399 if (flag && prof_index > max_prof_index) 400 max_prof_index = prof_index; 401 402 prof_index++; 403 flag = false; 404 } while (fv); 405 406 hw->switch_info->max_used_prof_index = max_prof_index; 407 408 return 0; 409 } 410 411 /** 412 * ice_get_ddp_pkg_state - get DDP pkg state after download 413 * @hw: pointer to the HW struct 414 * @already_loaded: indicates if pkg was already loaded onto the device 415 */ 416 static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw, 417 bool already_loaded) 418 { 419 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 420 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 421 hw->pkg_ver.update == hw->active_pkg_ver.update && 422 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 423 !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { 424 if (already_loaded) 425 return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; 426 else 427 return ICE_DDP_PKG_SUCCESS; 428 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || 429 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { 430 return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; 431 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 432 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { 433 return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; 434 } else { 435 return ICE_DDP_PKG_ERR; 436 } 437 } 438 439 /** 440 * ice_init_pkg_regs - initialize additional package registers 441 * @hw: pointer to the hardware structure 442 */ 443 static void ice_init_pkg_regs(struct ice_hw *hw) 444 { 445 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF 446 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF 447 #define ICE_SW_BLK_IDX 0 448 449 /* setup Switch block input mask, which is 48-bits in two parts */ 450 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); 451 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); 452 } 453 454 /** 455 * ice_marker_ptype_tcam_handler 456 * @sect_type: section type 457 * @section: pointer to section 458 * @index: index of the Marker PType TCAM entry to be returned 459 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections 460 * 461 * This is a callback function that can be passed to ice_pkg_enum_entry. 462 * Handles enumeration of individual Marker PType TCAM entries. 463 */ 464 static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section, 465 u32 index, u32 *offset) 466 { 467 struct ice_marker_ptype_tcam_section *marker_ptype; 468 469 if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) 470 return NULL; 471 472 if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) 473 return NULL; 474 475 if (offset) 476 *offset = 0; 477 478 marker_ptype = section; 479 if (index >= le16_to_cpu(marker_ptype->count)) 480 return NULL; 481 482 return marker_ptype->tcam + index; 483 } 484 485 /** 486 * ice_add_dvm_hint 487 * @hw: pointer to the HW structure 488 * @val: value of the boost entry 489 * @enable: true if entry needs to be enabled, or false if needs to be disabled 490 */ 491 static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) 492 { 493 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { 494 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; 495 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; 496 hw->dvm_upd.count++; 497 } 498 } 499 500 /** 501 * ice_add_tunnel_hint 502 * @hw: pointer to the HW structure 503 * @label_name: label text 504 * @val: value of the tunnel port boost entry 505 */ 506 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) 507 { 508 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { 509 u16 i; 510 511 for (i = 0; tnls[i].type != TNL_LAST; i++) { 512 size_t len = strlen(tnls[i].label_prefix); 513 514 /* Look for matching label start, before continuing */ 515 if (strncmp(label_name, tnls[i].label_prefix, len)) 516 continue; 517 518 /* Make sure this label matches our PF. Note that the PF 519 * character ('0' - '7') will be located where our 520 * prefix string's null terminator is located. 521 */ 522 if ((label_name[len] - '0') == hw->pf_id) { 523 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; 524 hw->tnl.tbl[hw->tnl.count].valid = false; 525 hw->tnl.tbl[hw->tnl.count].boost_addr = val; 526 hw->tnl.tbl[hw->tnl.count].port = 0; 527 hw->tnl.count++; 528 break; 529 } 530 } 531 } 532 } 533 534 /** 535 * ice_label_enum_handler 536 * @sect_type: section type 537 * @section: pointer to section 538 * @index: index of the label entry to be returned 539 * @offset: pointer to receive absolute offset, always zero for label sections 540 * 541 * This is a callback function that can be passed to ice_pkg_enum_entry. 542 * Handles enumeration of individual label entries. 543 */ 544 static void *ice_label_enum_handler(u32 __always_unused sect_type, 545 void *section, u32 index, u32 *offset) 546 { 547 struct ice_label_section *labels; 548 549 if (!section) 550 return NULL; 551 552 if (index > ICE_MAX_LABELS_IN_BUF) 553 return NULL; 554 555 if (offset) 556 *offset = 0; 557 558 labels = section; 559 if (index >= le16_to_cpu(labels->count)) 560 return NULL; 561 562 return labels->label + index; 563 } 564 565 /** 566 * ice_enum_labels 567 * @ice_seg: pointer to the ice segment (NULL on subsequent calls) 568 * @type: the section type that will contain the label (0 on subsequent calls) 569 * @state: ice_pkg_enum structure that will hold the state of the enumeration 570 * @value: pointer to a value that will return the label's value if found 571 * 572 * Enumerates a list of labels in the package. The caller will call 573 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call 574 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL 575 * the end of the list has been reached. 576 */ 577 static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type, 578 struct ice_pkg_enum *state, u16 *value) 579 { 580 struct ice_label *label; 581 582 /* Check for valid label section on first call */ 583 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) 584 return NULL; 585 586 label = ice_pkg_enum_entry(ice_seg, state, type, NULL, 587 ice_label_enum_handler); 588 if (!label) 589 return NULL; 590 591 *value = le16_to_cpu(label->value); 592 return label->name; 593 } 594 595 /** 596 * ice_boost_tcam_handler 597 * @sect_type: section type 598 * @section: pointer to section 599 * @index: index of the boost TCAM entry to be returned 600 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections 601 * 602 * This is a callback function that can be passed to ice_pkg_enum_entry. 603 * Handles enumeration of individual boost TCAM entries. 604 */ 605 static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, 606 u32 *offset) 607 { 608 struct ice_boost_tcam_section *boost; 609 610 if (!section) 611 return NULL; 612 613 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) 614 return NULL; 615 616 if (index > ICE_MAX_BST_TCAMS_IN_BUF) 617 return NULL; 618 619 if (offset) 620 *offset = 0; 621 622 boost = section; 623 if (index >= le16_to_cpu(boost->count)) 624 return NULL; 625 626 return boost->tcam + index; 627 } 628 629 /** 630 * ice_find_boost_entry 631 * @ice_seg: pointer to the ice segment (non-NULL) 632 * @addr: Boost TCAM address of entry to search for 633 * @entry: returns pointer to the entry 634 * 635 * Finds a particular Boost TCAM entry and returns a pointer to that entry 636 * if it is found. The ice_seg parameter must not be NULL since the first call 637 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. 638 */ 639 static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, 640 struct ice_boost_tcam_entry **entry) 641 { 642 struct ice_boost_tcam_entry *tcam; 643 struct ice_pkg_enum state; 644 645 memset(&state, 0, sizeof(state)); 646 647 if (!ice_seg) 648 return -EINVAL; 649 650 do { 651 tcam = ice_pkg_enum_entry(ice_seg, &state, 652 ICE_SID_RXPARSER_BOOST_TCAM, NULL, 653 ice_boost_tcam_handler); 654 if (tcam && le16_to_cpu(tcam->addr) == addr) { 655 *entry = tcam; 656 return 0; 657 } 658 659 ice_seg = NULL; 660 } while (tcam); 661 662 *entry = NULL; 663 return -EIO; 664 } 665 666 /** 667 * ice_is_init_pkg_successful - check if DDP init was successful 668 * @state: state of the DDP pkg after download 669 */ 670 bool ice_is_init_pkg_successful(enum ice_ddp_state state) 671 { 672 switch (state) { 673 case ICE_DDP_PKG_SUCCESS: 674 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 675 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 676 return true; 677 default: 678 return false; 679 } 680 } 681 682 /** 683 * ice_pkg_buf_alloc 684 * @hw: pointer to the HW structure 685 * 686 * Allocates a package buffer and returns a pointer to the buffer header. 687 * Note: all package contents must be in Little Endian form. 688 */ 689 struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) 690 { 691 struct ice_buf_build *bld; 692 struct ice_buf_hdr *buf; 693 694 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); 695 if (!bld) 696 return NULL; 697 698 buf = (struct ice_buf_hdr *)bld; 699 buf->data_end = 700 cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry)); 701 return bld; 702 } 703 704 static bool ice_is_gtp_u_profile(u16 prof_idx) 705 { 706 return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && 707 prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || 708 prof_idx == ICE_PROFID_IPV4_GTPU_TEID; 709 } 710 711 static bool ice_is_gtp_c_profile(u16 prof_idx) 712 { 713 switch (prof_idx) { 714 case ICE_PROFID_IPV4_GTPC_TEID: 715 case ICE_PROFID_IPV4_GTPC_NO_TEID: 716 case ICE_PROFID_IPV6_GTPC_TEID: 717 case ICE_PROFID_IPV6_GTPC_NO_TEID: 718 return true; 719 default: 720 return false; 721 } 722 } 723 724 static bool ice_is_pfcp_profile(u16 prof_idx) 725 { 726 return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE && 727 prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION; 728 } 729 730 /** 731 * ice_get_sw_prof_type - determine switch profile type 732 * @hw: pointer to the HW structure 733 * @fv: pointer to the switch field vector 734 * @prof_idx: profile index to check 735 */ 736 static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, 737 struct ice_fv *fv, u32 prof_idx) 738 { 739 u16 i; 740 741 if (ice_is_gtp_c_profile(prof_idx)) 742 return ICE_PROF_TUN_GTPC; 743 744 if (ice_is_gtp_u_profile(prof_idx)) 745 return ICE_PROF_TUN_GTPU; 746 747 if (ice_is_pfcp_profile(prof_idx)) 748 return ICE_PROF_TUN_PFCP; 749 750 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { 751 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ 752 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && 753 fv->ew[i].off == ICE_VNI_OFFSET) 754 return ICE_PROF_TUN_UDP; 755 756 /* GRE tunnel will have GRE protocol */ 757 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) 758 return ICE_PROF_TUN_GRE; 759 } 760 761 return ICE_PROF_NON_TUN; 762 } 763 764 /** 765 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type 766 * @hw: pointer to hardware structure 767 * @req_profs: type of profiles requested 768 * @bm: pointer to memory for returning the bitmap of field vectors 769 */ 770 void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, 771 unsigned long *bm) 772 { 773 struct ice_pkg_enum state; 774 struct ice_seg *ice_seg; 775 struct ice_fv *fv; 776 777 if (req_profs == ICE_PROF_ALL) { 778 bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); 779 return; 780 } 781 782 memset(&state, 0, sizeof(state)); 783 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 784 ice_seg = hw->seg; 785 do { 786 enum ice_prof_type prof_type; 787 u32 offset; 788 789 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 790 &offset, ice_sw_fv_handler); 791 ice_seg = NULL; 792 793 if (fv) { 794 /* Determine field vector type */ 795 prof_type = ice_get_sw_prof_type(hw, fv, offset); 796 797 if (req_profs & prof_type) 798 set_bit((u16)offset, bm); 799 } 800 } while (fv); 801 } 802 803 /** 804 * ice_get_sw_fv_list 805 * @hw: pointer to the HW structure 806 * @lkups: list of protocol types 807 * @bm: bitmap of field vectors to consider 808 * @fv_list: Head of a list 809 * 810 * Finds all the field vector entries from switch block that contain 811 * a given protocol ID and offset and returns a list of structures of type 812 * "ice_sw_fv_list_entry". Every structure in the list has a field vector 813 * definition and profile ID information 814 * NOTE: The caller of the function is responsible for freeing the memory 815 * allocated for every list entry. 816 */ 817 int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, 818 unsigned long *bm, struct list_head *fv_list) 819 { 820 struct ice_sw_fv_list_entry *fvl; 821 struct ice_sw_fv_list_entry *tmp; 822 struct ice_pkg_enum state; 823 struct ice_seg *ice_seg; 824 struct ice_fv *fv; 825 u32 offset; 826 827 memset(&state, 0, sizeof(state)); 828 829 if (!lkups->n_val_words || !hw->seg) 830 return -EINVAL; 831 832 ice_seg = hw->seg; 833 do { 834 u16 i; 835 836 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 837 &offset, ice_sw_fv_handler); 838 if (!fv) 839 break; 840 ice_seg = NULL; 841 842 /* If field vector is not in the bitmap list, then skip this 843 * profile. 844 */ 845 if (!test_bit((u16)offset, bm)) 846 continue; 847 848 for (i = 0; i < lkups->n_val_words; i++) { 849 int j; 850 851 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 852 if (fv->ew[j].prot_id == 853 lkups->fv_words[i].prot_id && 854 fv->ew[j].off == lkups->fv_words[i].off) 855 break; 856 if (j >= hw->blk[ICE_BLK_SW].es.fvw) 857 break; 858 if (i + 1 == lkups->n_val_words) { 859 fvl = devm_kzalloc(ice_hw_to_dev(hw), 860 sizeof(*fvl), GFP_KERNEL); 861 if (!fvl) 862 goto err; 863 fvl->fv_ptr = fv; 864 fvl->profile_id = offset; 865 list_add(&fvl->list_entry, fv_list); 866 break; 867 } 868 } 869 } while (fv); 870 if (list_empty(fv_list)) { 871 dev_warn(ice_hw_to_dev(hw), 872 "Required profiles not found in currently loaded DDP package"); 873 return -EIO; 874 } 875 876 return 0; 877 878 err: 879 list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { 880 list_del(&fvl->list_entry); 881 devm_kfree(ice_hw_to_dev(hw), fvl); 882 } 883 884 return -ENOMEM; 885 } 886 887 /** 888 * ice_init_prof_result_bm - Initialize the profile result index bitmap 889 * @hw: pointer to hardware structure 890 */ 891 void ice_init_prof_result_bm(struct ice_hw *hw) 892 { 893 struct ice_pkg_enum state; 894 struct ice_seg *ice_seg; 895 struct ice_fv *fv; 896 897 memset(&state, 0, sizeof(state)); 898 899 if (!hw->seg) 900 return; 901 902 ice_seg = hw->seg; 903 do { 904 u32 off; 905 u16 i; 906 907 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 908 &off, ice_sw_fv_handler); 909 ice_seg = NULL; 910 if (!fv) 911 break; 912 913 bitmap_zero(hw->switch_info->prof_res_bm[off], 914 ICE_MAX_FV_WORDS); 915 916 /* Determine empty field vector indices, these can be 917 * used for recipe results. Skip index 0, since it is 918 * always used for Switch ID. 919 */ 920 for (i = 1; i < ICE_MAX_FV_WORDS; i++) 921 if (fv->ew[i].prot_id == ICE_PROT_INVALID && 922 fv->ew[i].off == ICE_FV_OFFSET_INVAL) 923 set_bit(i, hw->switch_info->prof_res_bm[off]); 924 } while (fv); 925 } 926 927 /** 928 * ice_pkg_buf_free 929 * @hw: pointer to the HW structure 930 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 931 * 932 * Frees a package buffer 933 */ 934 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) 935 { 936 devm_kfree(ice_hw_to_dev(hw), bld); 937 } 938 939 /** 940 * ice_pkg_buf_reserve_section 941 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 942 * @count: the number of sections to reserve 943 * 944 * Reserves one or more section table entries in a package buffer. This routine 945 * can be called multiple times as long as they are made before calling 946 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 947 * is called once, the number of sections that can be allocated will not be able 948 * to be increased; not using all reserved sections is fine, but this will 949 * result in some wasted space in the buffer. 950 * Note: all package contents must be in Little Endian form. 951 */ 952 int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) 953 { 954 struct ice_buf_hdr *buf; 955 u16 section_count; 956 u16 data_end; 957 958 if (!bld) 959 return -EINVAL; 960 961 buf = (struct ice_buf_hdr *)&bld->buf; 962 963 /* already an active section, can't increase table size */ 964 section_count = le16_to_cpu(buf->section_count); 965 if (section_count > 0) 966 return -EIO; 967 968 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) 969 return -EIO; 970 bld->reserved_section_table_entries += count; 971 972 data_end = le16_to_cpu(buf->data_end) + 973 flex_array_size(buf, section_entry, count); 974 buf->data_end = cpu_to_le16(data_end); 975 976 return 0; 977 } 978 979 /** 980 * ice_pkg_buf_alloc_section 981 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 982 * @type: the section type value 983 * @size: the size of the section to reserve (in bytes) 984 * 985 * Reserves memory in the buffer for a section's content and updates the 986 * buffers' status accordingly. This routine returns a pointer to the first 987 * byte of the section start within the buffer, which is used to fill in the 988 * section contents. 989 * Note: all package contents must be in Little Endian form. 990 */ 991 void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) 992 { 993 struct ice_buf_hdr *buf; 994 u16 sect_count; 995 u16 data_end; 996 997 if (!bld || !type || !size) 998 return NULL; 999 1000 buf = (struct ice_buf_hdr *)&bld->buf; 1001 1002 /* check for enough space left in buffer */ 1003 data_end = le16_to_cpu(buf->data_end); 1004 1005 /* section start must align on 4 byte boundary */ 1006 data_end = ALIGN(data_end, 4); 1007 1008 if ((data_end + size) > ICE_MAX_S_DATA_END) 1009 return NULL; 1010 1011 /* check for more available section table entries */ 1012 sect_count = le16_to_cpu(buf->section_count); 1013 if (sect_count < bld->reserved_section_table_entries) { 1014 void *section_ptr = ((u8 *)buf) + data_end; 1015 1016 buf->section_entry[sect_count].offset = cpu_to_le16(data_end); 1017 buf->section_entry[sect_count].size = cpu_to_le16(size); 1018 buf->section_entry[sect_count].type = cpu_to_le32(type); 1019 1020 data_end += size; 1021 buf->data_end = cpu_to_le16(data_end); 1022 1023 buf->section_count = cpu_to_le16(sect_count + 1); 1024 return section_ptr; 1025 } 1026 1027 /* no free section table entries */ 1028 return NULL; 1029 } 1030 1031 /** 1032 * ice_pkg_buf_alloc_single_section 1033 * @hw: pointer to the HW structure 1034 * @type: the section type value 1035 * @size: the size of the section to reserve (in bytes) 1036 * @section: returns pointer to the section 1037 * 1038 * Allocates a package buffer with a single section. 1039 * Note: all package contents must be in Little Endian form. 1040 */ 1041 struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw, 1042 u32 type, u16 size, 1043 void **section) 1044 { 1045 struct ice_buf_build *buf; 1046 1047 if (!section) 1048 return NULL; 1049 1050 buf = ice_pkg_buf_alloc(hw); 1051 if (!buf) 1052 return NULL; 1053 1054 if (ice_pkg_buf_reserve_section(buf, 1)) 1055 goto ice_pkg_buf_alloc_single_section_err; 1056 1057 *section = ice_pkg_buf_alloc_section(buf, type, size); 1058 if (!*section) 1059 goto ice_pkg_buf_alloc_single_section_err; 1060 1061 return buf; 1062 1063 ice_pkg_buf_alloc_single_section_err: 1064 ice_pkg_buf_free(hw, buf); 1065 return NULL; 1066 } 1067 1068 /** 1069 * ice_pkg_buf_get_active_sections 1070 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1071 * 1072 * Returns the number of active sections. Before using the package buffer 1073 * in an update package command, the caller should make sure that there is at 1074 * least one active section - otherwise, the buffer is not legal and should 1075 * not be used. 1076 * Note: all package contents must be in Little Endian form. 1077 */ 1078 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) 1079 { 1080 struct ice_buf_hdr *buf; 1081 1082 if (!bld) 1083 return 0; 1084 1085 buf = (struct ice_buf_hdr *)&bld->buf; 1086 return le16_to_cpu(buf->section_count); 1087 } 1088 1089 /** 1090 * ice_pkg_buf 1091 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1092 * 1093 * Return a pointer to the buffer's header 1094 */ 1095 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) 1096 { 1097 if (!bld) 1098 return NULL; 1099 1100 return &bld->buf; 1101 } 1102 1103 static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) 1104 { 1105 switch (aq_err) { 1106 case ICE_AQ_RC_ENOSEC: 1107 case ICE_AQ_RC_EBADSIG: 1108 return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; 1109 case ICE_AQ_RC_ESVN: 1110 return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; 1111 case ICE_AQ_RC_EBADMAN: 1112 case ICE_AQ_RC_EBADBUF: 1113 return ICE_DDP_PKG_LOAD_ERROR; 1114 default: 1115 return ICE_DDP_PKG_ERR; 1116 } 1117 } 1118 1119 /** 1120 * ice_acquire_global_cfg_lock 1121 * @hw: pointer to the HW structure 1122 * @access: access type (read or write) 1123 * 1124 * This function will request ownership of the global config lock for reading 1125 * or writing of the package. When attempting to obtain write access, the 1126 * caller must check for the following two return values: 1127 * 1128 * 0 - Means the caller has acquired the global config lock 1129 * and can perform writing of the package. 1130 * -EALREADY - Indicates another driver has already written the 1131 * package or has found that no update was necessary; in 1132 * this case, the caller can just skip performing any 1133 * update of the package. 1134 */ 1135 static int ice_acquire_global_cfg_lock(struct ice_hw *hw, 1136 enum ice_aq_res_access_type access) 1137 { 1138 int status; 1139 1140 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, 1141 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 1142 1143 if (!status) 1144 mutex_lock(&ice_global_cfg_lock_sw); 1145 else if (status == -EALREADY) 1146 ice_debug(hw, ICE_DBG_PKG, 1147 "Global config lock: No work to do\n"); 1148 1149 return status; 1150 } 1151 1152 /** 1153 * ice_release_global_cfg_lock 1154 * @hw: pointer to the HW structure 1155 * 1156 * This function will release the global config lock. 1157 */ 1158 static void ice_release_global_cfg_lock(struct ice_hw *hw) 1159 { 1160 mutex_unlock(&ice_global_cfg_lock_sw); 1161 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); 1162 } 1163 1164 /** 1165 * ice_aq_download_pkg 1166 * @hw: pointer to the hardware structure 1167 * @pkg_buf: the package buffer to transfer 1168 * @buf_size: the size of the package buffer 1169 * @last_buf: last buffer indicator 1170 * @error_offset: returns error offset 1171 * @error_info: returns error information 1172 * @cd: pointer to command details structure or NULL 1173 * 1174 * Download Package (0x0C40) 1175 */ 1176 static int 1177 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1178 u16 buf_size, bool last_buf, u32 *error_offset, 1179 u32 *error_info, struct ice_sq_cd *cd) 1180 { 1181 struct ice_aqc_download_pkg *cmd; 1182 struct ice_aq_desc desc; 1183 int status; 1184 1185 if (error_offset) 1186 *error_offset = 0; 1187 if (error_info) 1188 *error_info = 0; 1189 1190 cmd = &desc.params.download_pkg; 1191 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 1192 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1193 1194 if (last_buf) 1195 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1196 1197 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1198 if (status == -EIO) { 1199 /* Read error from buffer only when the FW returned an error */ 1200 struct ice_aqc_download_pkg_resp *resp; 1201 1202 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1203 if (error_offset) 1204 *error_offset = le32_to_cpu(resp->error_offset); 1205 if (error_info) 1206 *error_info = le32_to_cpu(resp->error_info); 1207 } 1208 1209 return status; 1210 } 1211 1212 /** 1213 * ice_get_pkg_seg_by_idx 1214 * @pkg_hdr: pointer to the package header to be searched 1215 * @idx: index of segment 1216 */ 1217 static struct ice_generic_seg_hdr * 1218 ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) 1219 { 1220 if (idx < le32_to_cpu(pkg_hdr->seg_count)) 1221 return (struct ice_generic_seg_hdr *) 1222 ((u8 *)pkg_hdr + 1223 le32_to_cpu(pkg_hdr->seg_offset[idx])); 1224 1225 return NULL; 1226 } 1227 1228 /** 1229 * ice_is_signing_seg_at_idx - determine if segment is a signing segment 1230 * @pkg_hdr: pointer to package header 1231 * @idx: segment index 1232 */ 1233 static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) 1234 { 1235 struct ice_generic_seg_hdr *seg; 1236 1237 seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1238 if (!seg) 1239 return false; 1240 1241 return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; 1242 } 1243 1244 /** 1245 * ice_is_signing_seg_type_at_idx 1246 * @pkg_hdr: pointer to package header 1247 * @idx: segment index 1248 * @seg_id: segment id that is expected 1249 * @sign_type: signing type 1250 * 1251 * Determine if a segment is a signing segment of the correct type 1252 */ 1253 static bool 1254 ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, 1255 u32 seg_id, u32 sign_type) 1256 { 1257 struct ice_sign_seg *seg; 1258 1259 if (!ice_is_signing_seg_at_idx(pkg_hdr, idx)) 1260 return false; 1261 1262 seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1263 1264 if (seg && le32_to_cpu(seg->seg_id) == seg_id && 1265 le32_to_cpu(seg->sign_type) == sign_type) 1266 return true; 1267 1268 return false; 1269 } 1270 1271 /** 1272 * ice_is_buffer_metadata - determine if package buffer is a metadata buffer 1273 * @buf: pointer to buffer header 1274 */ 1275 static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) 1276 { 1277 if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) 1278 return true; 1279 1280 return false; 1281 } 1282 1283 /** 1284 * ice_is_last_download_buffer 1285 * @buf: pointer to current buffer header 1286 * @idx: index of the buffer in the current sequence 1287 * @count: the buffer count in the current sequence 1288 * 1289 * Note: this routine should only be called if the buffer is not the last buffer 1290 */ 1291 static bool 1292 ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) 1293 { 1294 struct ice_buf *next_buf; 1295 1296 if ((idx + 1) == count) 1297 return true; 1298 1299 /* A set metadata flag in the next buffer will signal that the current 1300 * buffer will be the last buffer downloaded 1301 */ 1302 next_buf = ((struct ice_buf *)buf) + 1; 1303 1304 return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); 1305 } 1306 1307 /** 1308 * ice_dwnld_cfg_bufs_no_lock 1309 * @hw: pointer to the hardware structure 1310 * @bufs: pointer to an array of buffers 1311 * @start: buffer index of first buffer to download 1312 * @count: the number of buffers to download 1313 * @indicate_last: if true, then set last buffer flag on last buffer download 1314 * 1315 * Downloads package configuration buffers to the firmware. Metadata buffers 1316 * are skipped, and the first metadata buffer found indicates that the rest 1317 * of the buffers are all metadata buffers. 1318 */ 1319 static enum ice_ddp_state 1320 ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, 1321 u32 count, bool indicate_last) 1322 { 1323 enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; 1324 struct ice_buf_hdr *bh; 1325 enum ice_aq_err err; 1326 u32 offset, info, i; 1327 1328 if (!bufs || !count) 1329 return ICE_DDP_PKG_ERR; 1330 1331 /* If the first buffer's first section has its metadata bit set 1332 * then there are no buffers to be downloaded, and the operation is 1333 * considered a success. 1334 */ 1335 bh = (struct ice_buf_hdr *)(bufs + start); 1336 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) 1337 return ICE_DDP_PKG_SUCCESS; 1338 1339 for (i = 0; i < count; i++) { 1340 bool last = false; 1341 int status; 1342 1343 bh = (struct ice_buf_hdr *)(bufs + start + i); 1344 1345 if (indicate_last) 1346 last = ice_is_last_download_buffer(bh, i, count); 1347 1348 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, 1349 &offset, &info, NULL); 1350 1351 /* Save AQ status from download package */ 1352 if (status) { 1353 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", 1354 status, offset, info); 1355 err = hw->adminq.sq_last_status; 1356 state = ice_map_aq_err_to_ddp_state(err); 1357 break; 1358 } 1359 1360 if (last) 1361 break; 1362 } 1363 1364 return state; 1365 } 1366 1367 /** 1368 * ice_download_pkg_sig_seg - download a signature segment 1369 * @hw: pointer to the hardware structure 1370 * @seg: pointer to signature segment 1371 */ 1372 static enum ice_ddp_state 1373 ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) 1374 { 1375 return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, 1376 le32_to_cpu(seg->buf_tbl.buf_count), 1377 false); 1378 } 1379 1380 /** 1381 * ice_download_pkg_config_seg - download a config segment 1382 * @hw: pointer to the hardware structure 1383 * @pkg_hdr: pointer to package header 1384 * @idx: segment index 1385 * @start: starting buffer 1386 * @count: buffer count 1387 * 1388 * Note: idx must reference a ICE segment 1389 */ 1390 static enum ice_ddp_state 1391 ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, 1392 u32 idx, u32 start, u32 count) 1393 { 1394 struct ice_buf_table *bufs; 1395 struct ice_seg *seg; 1396 u32 buf_count; 1397 1398 seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1399 if (!seg) 1400 return ICE_DDP_PKG_ERR; 1401 1402 bufs = ice_find_buf_table(seg); 1403 buf_count = le32_to_cpu(bufs->buf_count); 1404 1405 if (start >= buf_count || start + count > buf_count) 1406 return ICE_DDP_PKG_ERR; 1407 1408 return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, 1409 true); 1410 } 1411 1412 /** 1413 * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment 1414 * @hw: pointer to the hardware structure 1415 * @pkg_hdr: pointer to package header 1416 * @idx: segment index (must be a signature segment) 1417 * 1418 * Note: idx must reference a signature segment 1419 */ 1420 static enum ice_ddp_state 1421 ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, 1422 u32 idx) 1423 { 1424 enum ice_ddp_state state; 1425 struct ice_sign_seg *seg; 1426 u32 conf_idx; 1427 u32 start; 1428 u32 count; 1429 1430 seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1431 if (!seg) { 1432 state = ICE_DDP_PKG_ERR; 1433 goto exit; 1434 } 1435 1436 conf_idx = le32_to_cpu(seg->signed_seg_idx); 1437 start = le32_to_cpu(seg->signed_buf_start); 1438 count = le32_to_cpu(seg->signed_buf_count); 1439 1440 state = ice_download_pkg_sig_seg(hw, seg); 1441 if (state) 1442 goto exit; 1443 1444 state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, 1445 count); 1446 1447 exit: 1448 return state; 1449 } 1450 1451 /** 1452 * ice_match_signing_seg - determine if a matching signing segment exists 1453 * @pkg_hdr: pointer to package header 1454 * @seg_id: segment id that is expected 1455 * @sign_type: signing type 1456 */ 1457 static bool 1458 ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) 1459 { 1460 u32 i; 1461 1462 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1463 if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, 1464 sign_type)) 1465 return true; 1466 } 1467 1468 return false; 1469 } 1470 1471 /** 1472 * ice_post_dwnld_pkg_actions - perform post download package actions 1473 * @hw: pointer to the hardware structure 1474 */ 1475 static enum ice_ddp_state 1476 ice_post_dwnld_pkg_actions(struct ice_hw *hw) 1477 { 1478 int status; 1479 1480 status = ice_set_vlan_mode(hw); 1481 if (status) { 1482 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", 1483 status); 1484 return ICE_DDP_PKG_ERR; 1485 } 1486 1487 return ICE_DDP_PKG_SUCCESS; 1488 } 1489 1490 /** 1491 * ice_download_pkg_with_sig_seg 1492 * @hw: pointer to the hardware structure 1493 * @pkg_hdr: pointer to package header 1494 * 1495 * Handles the download of a complete package. 1496 */ 1497 static enum ice_ddp_state 1498 ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1499 { 1500 enum ice_aq_err aq_err = hw->adminq.sq_last_status; 1501 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 1502 int status; 1503 u32 i; 1504 1505 ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); 1506 ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); 1507 1508 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1509 if (status) { 1510 if (status == -EALREADY) 1511 state = ICE_DDP_PKG_ALREADY_LOADED; 1512 else 1513 state = ice_map_aq_err_to_ddp_state(aq_err); 1514 return state; 1515 } 1516 1517 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1518 if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, 1519 hw->pkg_sign_type)) 1520 continue; 1521 1522 state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); 1523 if (state) 1524 break; 1525 } 1526 1527 if (!state) 1528 state = ice_post_dwnld_pkg_actions(hw); 1529 1530 ice_release_global_cfg_lock(hw); 1531 1532 return state; 1533 } 1534 1535 /** 1536 * ice_dwnld_cfg_bufs 1537 * @hw: pointer to the hardware structure 1538 * @bufs: pointer to an array of buffers 1539 * @count: the number of buffers in the array 1540 * 1541 * Obtains global config lock and downloads the package configuration buffers 1542 * to the firmware. 1543 */ 1544 static enum ice_ddp_state 1545 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1546 { 1547 enum ice_ddp_state state; 1548 struct ice_buf_hdr *bh; 1549 int status; 1550 1551 if (!bufs || !count) 1552 return ICE_DDP_PKG_ERR; 1553 1554 /* If the first buffer's first section has its metadata bit set 1555 * then there are no buffers to be downloaded, and the operation is 1556 * considered a success. 1557 */ 1558 bh = (struct ice_buf_hdr *)bufs; 1559 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) 1560 return ICE_DDP_PKG_SUCCESS; 1561 1562 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1563 if (status) { 1564 if (status == -EALREADY) 1565 return ICE_DDP_PKG_ALREADY_LOADED; 1566 return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); 1567 } 1568 1569 state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); 1570 if (!state) 1571 state = ice_post_dwnld_pkg_actions(hw); 1572 1573 ice_release_global_cfg_lock(hw); 1574 1575 return state; 1576 } 1577 1578 /** 1579 * ice_download_pkg_without_sig_seg 1580 * @hw: pointer to the hardware structure 1581 * @ice_seg: pointer to the segment of the package to be downloaded 1582 * 1583 * Handles the download of a complete package without signature segment. 1584 */ 1585 static enum ice_ddp_state 1586 ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) 1587 { 1588 struct ice_buf_table *ice_buf_tbl; 1589 1590 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", 1591 ice_seg->hdr.seg_format_ver.major, 1592 ice_seg->hdr.seg_format_ver.minor, 1593 ice_seg->hdr.seg_format_ver.update, 1594 ice_seg->hdr.seg_format_ver.draft); 1595 1596 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 1597 le32_to_cpu(ice_seg->hdr.seg_type), 1598 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); 1599 1600 ice_buf_tbl = ice_find_buf_table(ice_seg); 1601 1602 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", 1603 le32_to_cpu(ice_buf_tbl->buf_count)); 1604 1605 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, 1606 le32_to_cpu(ice_buf_tbl->buf_count)); 1607 } 1608 1609 /** 1610 * ice_download_pkg 1611 * @hw: pointer to the hardware structure 1612 * @pkg_hdr: pointer to package header 1613 * @ice_seg: pointer to the segment of the package to be downloaded 1614 * 1615 * Handles the download of a complete package. 1616 */ 1617 static enum ice_ddp_state 1618 ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, 1619 struct ice_seg *ice_seg) 1620 { 1621 enum ice_ddp_state state; 1622 1623 if (hw->pkg_has_signing_seg) 1624 state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); 1625 else 1626 state = ice_download_pkg_without_sig_seg(hw, ice_seg); 1627 1628 ice_post_pkg_dwnld_vlan_mode_cfg(hw); 1629 1630 return state; 1631 } 1632 1633 /** 1634 * ice_aq_get_pkg_info_list 1635 * @hw: pointer to the hardware structure 1636 * @pkg_info: the buffer which will receive the information list 1637 * @buf_size: the size of the pkg_info information buffer 1638 * @cd: pointer to command details structure or NULL 1639 * 1640 * Get Package Info List (0x0C43) 1641 */ 1642 static int ice_aq_get_pkg_info_list(struct ice_hw *hw, 1643 struct ice_aqc_get_pkg_info_resp *pkg_info, 1644 u16 buf_size, struct ice_sq_cd *cd) 1645 { 1646 struct ice_aq_desc desc; 1647 1648 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); 1649 1650 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); 1651 } 1652 1653 /** 1654 * ice_aq_update_pkg 1655 * @hw: pointer to the hardware structure 1656 * @pkg_buf: the package cmd buffer 1657 * @buf_size: the size of the package cmd buffer 1658 * @last_buf: last buffer indicator 1659 * @error_offset: returns error offset 1660 * @error_info: returns error information 1661 * @cd: pointer to command details structure or NULL 1662 * 1663 * Update Package (0x0C42) 1664 */ 1665 static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1666 u16 buf_size, bool last_buf, u32 *error_offset, 1667 u32 *error_info, struct ice_sq_cd *cd) 1668 { 1669 struct ice_aqc_download_pkg *cmd; 1670 struct ice_aq_desc desc; 1671 int status; 1672 1673 if (error_offset) 1674 *error_offset = 0; 1675 if (error_info) 1676 *error_info = 0; 1677 1678 cmd = &desc.params.download_pkg; 1679 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); 1680 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1681 1682 if (last_buf) 1683 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1684 1685 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1686 if (status == -EIO) { 1687 /* Read error from buffer only when the FW returned an error */ 1688 struct ice_aqc_download_pkg_resp *resp; 1689 1690 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1691 if (error_offset) 1692 *error_offset = le32_to_cpu(resp->error_offset); 1693 if (error_info) 1694 *error_info = le32_to_cpu(resp->error_info); 1695 } 1696 1697 return status; 1698 } 1699 1700 /** 1701 * ice_aq_upload_section 1702 * @hw: pointer to the hardware structure 1703 * @pkg_buf: the package buffer which will receive the section 1704 * @buf_size: the size of the package buffer 1705 * @cd: pointer to command details structure or NULL 1706 * 1707 * Upload Section (0x0C41) 1708 */ 1709 int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1710 u16 buf_size, struct ice_sq_cd *cd) 1711 { 1712 struct ice_aq_desc desc; 1713 1714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); 1715 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1716 1717 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1718 } 1719 1720 /** 1721 * ice_update_pkg_no_lock 1722 * @hw: pointer to the hardware structure 1723 * @bufs: pointer to an array of buffers 1724 * @count: the number of buffers in the array 1725 */ 1726 int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1727 { 1728 int status = 0; 1729 u32 i; 1730 1731 for (i = 0; i < count; i++) { 1732 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); 1733 bool last = ((i + 1) == count); 1734 u32 offset, info; 1735 1736 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), 1737 last, &offset, &info, NULL); 1738 1739 if (status) { 1740 ice_debug(hw, ICE_DBG_PKG, 1741 "Update pkg failed: err %d off %d inf %d\n", 1742 status, offset, info); 1743 break; 1744 } 1745 } 1746 1747 return status; 1748 } 1749 1750 /** 1751 * ice_update_pkg 1752 * @hw: pointer to the hardware structure 1753 * @bufs: pointer to an array of buffers 1754 * @count: the number of buffers in the array 1755 * 1756 * Obtains change lock and updates package. 1757 */ 1758 int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1759 { 1760 int status; 1761 1762 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 1763 if (status) 1764 return status; 1765 1766 status = ice_update_pkg_no_lock(hw, bufs, count); 1767 1768 ice_release_change_lock(hw); 1769 1770 return status; 1771 } 1772 1773 /** 1774 * ice_find_seg_in_pkg 1775 * @hw: pointer to the hardware structure 1776 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) 1777 * @pkg_hdr: pointer to the package header to be searched 1778 * 1779 * This function searches a package file for a particular segment type. On 1780 * success it returns a pointer to the segment header, otherwise it will 1781 * return NULL. 1782 */ 1783 static struct ice_generic_seg_hdr * 1784 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 1785 struct ice_pkg_hdr *pkg_hdr) 1786 { 1787 u32 i; 1788 1789 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 1790 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, 1791 pkg_hdr->pkg_format_ver.update, 1792 pkg_hdr->pkg_format_ver.draft); 1793 1794 /* Search all package segments for the requested segment type */ 1795 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1796 struct ice_generic_seg_hdr *seg; 1797 1798 seg = (struct ice_generic_seg_hdr 1799 *)((u8 *)pkg_hdr + 1800 le32_to_cpu(pkg_hdr->seg_offset[i])); 1801 1802 if (le32_to_cpu(seg->seg_type) == seg_type) 1803 return seg; 1804 } 1805 1806 return NULL; 1807 } 1808 1809 /** 1810 * ice_has_signing_seg - determine if package has a signing segment 1811 * @hw: pointer to the hardware structure 1812 * @pkg_hdr: pointer to the driver's package hdr 1813 */ 1814 static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1815 { 1816 struct ice_generic_seg_hdr *seg_hdr; 1817 1818 seg_hdr = (struct ice_generic_seg_hdr *) 1819 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); 1820 1821 return seg_hdr ? true : false; 1822 } 1823 1824 /** 1825 * ice_get_pkg_segment_id - get correct package segment id, based on device 1826 * @mac_type: MAC type of the device 1827 */ 1828 static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) 1829 { 1830 u32 seg_id; 1831 1832 switch (mac_type) { 1833 case ICE_MAC_E830: 1834 seg_id = SEGMENT_TYPE_ICE_E830; 1835 break; 1836 case ICE_MAC_GENERIC: 1837 case ICE_MAC_GENERIC_3K_E825: 1838 default: 1839 seg_id = SEGMENT_TYPE_ICE_E810; 1840 break; 1841 } 1842 1843 return seg_id; 1844 } 1845 1846 /** 1847 * ice_get_pkg_sign_type - get package segment sign type, based on device 1848 * @mac_type: MAC type of the device 1849 */ 1850 static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) 1851 { 1852 u32 sign_type; 1853 1854 switch (mac_type) { 1855 case ICE_MAC_E830: 1856 sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB; 1857 break; 1858 case ICE_MAC_GENERIC_3K_E825: 1859 sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825; 1860 break; 1861 case ICE_MAC_GENERIC: 1862 default: 1863 sign_type = SEGMENT_SIGN_TYPE_RSA2K; 1864 break; 1865 } 1866 1867 return sign_type; 1868 } 1869 1870 /** 1871 * ice_get_signing_req - get correct package requirements, based on device 1872 * @hw: pointer to the hardware structure 1873 */ 1874 static void ice_get_signing_req(struct ice_hw *hw) 1875 { 1876 hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); 1877 hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); 1878 } 1879 1880 /** 1881 * ice_init_pkg_info 1882 * @hw: pointer to the hardware structure 1883 * @pkg_hdr: pointer to the driver's package hdr 1884 * 1885 * Saves off the package details into the HW structure. 1886 */ 1887 static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, 1888 struct ice_pkg_hdr *pkg_hdr) 1889 { 1890 struct ice_generic_seg_hdr *seg_hdr; 1891 1892 if (!pkg_hdr) 1893 return ICE_DDP_PKG_ERR; 1894 1895 hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); 1896 ice_get_signing_req(hw); 1897 1898 ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", 1899 hw->pkg_seg_id); 1900 1901 seg_hdr = (struct ice_generic_seg_hdr *) 1902 ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); 1903 if (seg_hdr) { 1904 struct ice_meta_sect *meta; 1905 struct ice_pkg_enum state; 1906 1907 memset(&state, 0, sizeof(state)); 1908 1909 /* Get package information from the Metadata Section */ 1910 meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, 1911 ICE_SID_METADATA); 1912 if (!meta) { 1913 ice_debug(hw, ICE_DBG_INIT, 1914 "Did not find ice metadata section in package\n"); 1915 return ICE_DDP_PKG_INVALID_FILE; 1916 } 1917 1918 hw->pkg_ver = meta->ver; 1919 memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); 1920 1921 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", 1922 meta->ver.major, meta->ver.minor, meta->ver.update, 1923 meta->ver.draft, meta->name); 1924 1925 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; 1926 memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); 1927 1928 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", 1929 seg_hdr->seg_format_ver.major, 1930 seg_hdr->seg_format_ver.minor, 1931 seg_hdr->seg_format_ver.update, 1932 seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); 1933 } else { 1934 ice_debug(hw, ICE_DBG_INIT, 1935 "Did not find ice segment in driver package\n"); 1936 return ICE_DDP_PKG_INVALID_FILE; 1937 } 1938 1939 return ICE_DDP_PKG_SUCCESS; 1940 } 1941 1942 /** 1943 * ice_get_pkg_info 1944 * @hw: pointer to the hardware structure 1945 * 1946 * Store details of the package currently loaded in HW into the HW structure. 1947 */ 1948 static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) 1949 { 1950 DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info, 1951 ICE_PKG_CNT); 1952 u16 size = __struct_size(pkg_info); 1953 u32 i; 1954 1955 if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) 1956 return ICE_DDP_PKG_ERR; 1957 1958 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { 1959 #define ICE_PKG_FLAG_COUNT 4 1960 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; 1961 u8 place = 0; 1962 1963 if (pkg_info->pkg_info[i].is_active) { 1964 flags[place++] = 'A'; 1965 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 1966 hw->active_track_id = 1967 le32_to_cpu(pkg_info->pkg_info[i].track_id); 1968 memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, 1969 sizeof(pkg_info->pkg_info[i].name)); 1970 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 1971 } 1972 if (pkg_info->pkg_info[i].is_active_at_boot) 1973 flags[place++] = 'B'; 1974 if (pkg_info->pkg_info[i].is_modified) 1975 flags[place++] = 'M'; 1976 if (pkg_info->pkg_info[i].is_in_nvm) 1977 flags[place++] = 'N'; 1978 1979 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, 1980 pkg_info->pkg_info[i].ver.major, 1981 pkg_info->pkg_info[i].ver.minor, 1982 pkg_info->pkg_info[i].ver.update, 1983 pkg_info->pkg_info[i].ver.draft, 1984 pkg_info->pkg_info[i].name, flags); 1985 } 1986 1987 return ICE_DDP_PKG_SUCCESS; 1988 } 1989 1990 /** 1991 * ice_chk_pkg_compat 1992 * @hw: pointer to the hardware structure 1993 * @ospkg: pointer to the package hdr 1994 * @seg: pointer to the package segment hdr 1995 * 1996 * This function checks the package version compatibility with driver and NVM 1997 */ 1998 static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, 1999 struct ice_pkg_hdr *ospkg, 2000 struct ice_seg **seg) 2001 { 2002 DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info, 2003 ICE_PKG_CNT); 2004 u16 size = __struct_size(pkg); 2005 enum ice_ddp_state state; 2006 u32 i; 2007 2008 /* Check package version compatibility */ 2009 state = ice_chk_pkg_version(&hw->pkg_ver); 2010 if (state) { 2011 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); 2012 return state; 2013 } 2014 2015 /* find ICE segment in given package */ 2016 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, 2017 ospkg); 2018 if (!*seg) { 2019 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 2020 return ICE_DDP_PKG_INVALID_FILE; 2021 } 2022 2023 /* Check if FW is compatible with the OS package */ 2024 if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) 2025 return ICE_DDP_PKG_LOAD_ERROR; 2026 2027 for (i = 0; i < le32_to_cpu(pkg->count); i++) { 2028 /* loop till we find the NVM package */ 2029 if (!pkg->pkg_info[i].is_in_nvm) 2030 continue; 2031 if ((*seg)->hdr.seg_format_ver.major != 2032 pkg->pkg_info[i].ver.major || 2033 (*seg)->hdr.seg_format_ver.minor > 2034 pkg->pkg_info[i].ver.minor) { 2035 state = ICE_DDP_PKG_FW_MISMATCH; 2036 ice_debug(hw, ICE_DBG_INIT, 2037 "OS package is not compatible with NVM.\n"); 2038 } 2039 /* done processing NVM package so break */ 2040 break; 2041 } 2042 2043 return state; 2044 } 2045 2046 /** 2047 * ice_init_pkg_hints 2048 * @hw: pointer to the HW structure 2049 * @ice_seg: pointer to the segment of the package scan (non-NULL) 2050 * 2051 * This function will scan the package and save off relevant information 2052 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL 2053 * since the first call to ice_enum_labels requires a pointer to an actual 2054 * ice_seg structure. 2055 */ 2056 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) 2057 { 2058 struct ice_pkg_enum state; 2059 char *label_name; 2060 u16 val; 2061 int i; 2062 2063 memset(&hw->tnl, 0, sizeof(hw->tnl)); 2064 memset(&state, 0, sizeof(state)); 2065 2066 if (!ice_seg) 2067 return; 2068 2069 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, 2070 &val); 2071 2072 while (label_name) { 2073 if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) 2074 /* check for a tunnel entry */ 2075 ice_add_tunnel_hint(hw, label_name, val); 2076 2077 /* check for a dvm mode entry */ 2078 else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) 2079 ice_add_dvm_hint(hw, val, true); 2080 2081 /* check for a svm mode entry */ 2082 else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) 2083 ice_add_dvm_hint(hw, val, false); 2084 2085 label_name = ice_enum_labels(NULL, 0, &state, &val); 2086 } 2087 2088 /* Cache the appropriate boost TCAM entry pointers for tunnels */ 2089 for (i = 0; i < hw->tnl.count; i++) { 2090 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, 2091 &hw->tnl.tbl[i].boost_entry); 2092 if (hw->tnl.tbl[i].boost_entry) { 2093 hw->tnl.tbl[i].valid = true; 2094 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) 2095 hw->tnl.valid_count[hw->tnl.tbl[i].type]++; 2096 } 2097 } 2098 2099 /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ 2100 for (i = 0; i < hw->dvm_upd.count; i++) 2101 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, 2102 &hw->dvm_upd.tbl[i].boost_entry); 2103 } 2104 2105 /** 2106 * ice_fill_hw_ptype - fill the enabled PTYPE bit information 2107 * @hw: pointer to the HW structure 2108 */ 2109 static void ice_fill_hw_ptype(struct ice_hw *hw) 2110 { 2111 struct ice_marker_ptype_tcam_entry *tcam; 2112 struct ice_seg *seg = hw->seg; 2113 struct ice_pkg_enum state; 2114 2115 bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); 2116 if (!seg) 2117 return; 2118 2119 memset(&state, 0, sizeof(state)); 2120 2121 do { 2122 tcam = ice_pkg_enum_entry(seg, &state, 2123 ICE_SID_RXPARSER_MARKER_PTYPE, NULL, 2124 ice_marker_ptype_tcam_handler); 2125 if (tcam && 2126 le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && 2127 le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) 2128 set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); 2129 2130 seg = NULL; 2131 } while (tcam); 2132 } 2133 2134 /** 2135 * ice_init_pkg - initialize/download package 2136 * @hw: pointer to the hardware structure 2137 * @buf: pointer to the package buffer 2138 * @len: size of the package buffer 2139 * 2140 * This function initializes a package. The package contains HW tables 2141 * required to do packet processing. First, the function extracts package 2142 * information such as version. Then it finds the ice configuration segment 2143 * within the package; this function then saves a copy of the segment pointer 2144 * within the supplied package buffer. Next, the function will cache any hints 2145 * from the package, followed by downloading the package itself. Note, that if 2146 * a previous PF driver has already downloaded the package successfully, then 2147 * the current driver will not have to download the package again. 2148 * 2149 * The local package contents will be used to query default behavior and to 2150 * update specific sections of the HW's version of the package (e.g. to update 2151 * the parse graph to understand new protocols). 2152 * 2153 * This function stores a pointer to the package buffer memory, and it is 2154 * expected that the supplied buffer will not be freed immediately. If the 2155 * package buffer needs to be freed, such as when read from a file, use 2156 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this 2157 * case. 2158 */ 2159 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) 2160 { 2161 bool already_loaded = false; 2162 enum ice_ddp_state state; 2163 struct ice_pkg_hdr *pkg; 2164 struct ice_seg *seg; 2165 2166 if (!buf || !len) 2167 return ICE_DDP_PKG_ERR; 2168 2169 pkg = (struct ice_pkg_hdr *)buf; 2170 state = ice_verify_pkg(pkg, len); 2171 if (state) { 2172 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", 2173 state); 2174 return state; 2175 } 2176 2177 /* initialize package info */ 2178 state = ice_init_pkg_info(hw, pkg); 2179 if (state) 2180 return state; 2181 2182 /* must be a matching segment */ 2183 if (hw->pkg_has_signing_seg && 2184 !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type)) 2185 return ICE_DDP_PKG_ERR; 2186 2187 /* before downloading the package, check package version for 2188 * compatibility with driver 2189 */ 2190 state = ice_chk_pkg_compat(hw, pkg, &seg); 2191 if (state) 2192 return state; 2193 2194 /* initialize package hints and then download package */ 2195 ice_init_pkg_hints(hw, seg); 2196 state = ice_download_pkg(hw, pkg, seg); 2197 if (state == ICE_DDP_PKG_ALREADY_LOADED) { 2198 ice_debug(hw, ICE_DBG_INIT, 2199 "package previously loaded - no work.\n"); 2200 already_loaded = true; 2201 } 2202 2203 /* Get information on the package currently loaded in HW, then make sure 2204 * the driver is compatible with this version. 2205 */ 2206 if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { 2207 state = ice_get_pkg_info(hw); 2208 if (!state) 2209 state = ice_get_ddp_pkg_state(hw, already_loaded); 2210 } 2211 2212 if (ice_is_init_pkg_successful(state)) { 2213 hw->seg = seg; 2214 /* on successful package download update other required 2215 * registers to support the package and fill HW tables 2216 * with package content. 2217 */ 2218 ice_init_pkg_regs(hw); 2219 ice_fill_blk_tbls(hw); 2220 ice_fill_hw_ptype(hw); 2221 ice_get_prof_index_max(hw); 2222 } else { 2223 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state); 2224 } 2225 2226 return state; 2227 } 2228 2229 /** 2230 * ice_copy_and_init_pkg - initialize/download a copy of the package 2231 * @hw: pointer to the hardware structure 2232 * @buf: pointer to the package buffer 2233 * @len: size of the package buffer 2234 * 2235 * This function copies the package buffer, and then calls ice_init_pkg() to 2236 * initialize the copied package contents. 2237 * 2238 * The copying is necessary if the package buffer supplied is constant, or if 2239 * the memory may disappear shortly after calling this function. 2240 * 2241 * If the package buffer resides in the data segment and can be modified, the 2242 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). 2243 * 2244 * However, if the package buffer needs to be copied first, such as when being 2245 * read from a file, the caller should use ice_copy_and_init_pkg(). 2246 * 2247 * This function will first copy the package buffer, before calling 2248 * ice_init_pkg(). The caller is free to immediately destroy the original 2249 * package buffer, as the new copy will be managed by this function and 2250 * related routines. 2251 */ 2252 enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, 2253 u32 len) 2254 { 2255 enum ice_ddp_state state; 2256 u8 *buf_copy; 2257 2258 if (!buf || !len) 2259 return ICE_DDP_PKG_ERR; 2260 2261 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); 2262 2263 state = ice_init_pkg(hw, buf_copy, len); 2264 if (!ice_is_init_pkg_successful(state)) { 2265 /* Free the copy, since we failed to initialize the package */ 2266 devm_kfree(ice_hw_to_dev(hw), buf_copy); 2267 } else { 2268 /* Track the copied pkg so we can free it later */ 2269 hw->pkg_copy = buf_copy; 2270 hw->pkg_size = len; 2271 } 2272 2273 return state; 2274 } 2275