1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2022, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice.h" 6 #include "ice_ddp.h" 7 #include "ice_sched.h" 8 9 /* For supporting double VLAN mode, it is necessary to enable or disable certain 10 * boost tcam entries. The metadata labels names that match the following 11 * prefixes will be saved to allow enabling double VLAN mode. 12 */ 13 #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ 14 #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ 15 16 /* To support tunneling entries by PF, the package will append the PF number to 17 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. 18 */ 19 #define ICE_TNL_PRE "TNL_" 20 static const struct ice_tunnel_type_scan tnls[] = { 21 { TNL_VXLAN, "TNL_VXLAN_PF" }, 22 { TNL_GENEVE, "TNL_GENEVE_PF" }, 23 { TNL_LAST, "" } 24 }; 25 26 /** 27 * ice_verify_pkg - verify package 28 * @pkg: pointer to the package buffer 29 * @len: size of the package buffer 30 * 31 * Verifies various attributes of the package file, including length, format 32 * version, and the requirement of at least one segment. 33 */ 34 static enum ice_ddp_state ice_verify_pkg(const struct ice_pkg_hdr *pkg, u32 len) 35 { 36 u32 seg_count; 37 u32 i; 38 39 if (len < struct_size(pkg, seg_offset, 1)) 40 return ICE_DDP_PKG_INVALID_FILE; 41 42 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || 43 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || 44 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || 45 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) 46 return ICE_DDP_PKG_INVALID_FILE; 47 48 /* pkg must have at least one segment */ 49 seg_count = le32_to_cpu(pkg->seg_count); 50 if (seg_count < 1) 51 return ICE_DDP_PKG_INVALID_FILE; 52 53 /* make sure segment array fits in package length */ 54 if (len < struct_size(pkg, seg_offset, seg_count)) 55 return ICE_DDP_PKG_INVALID_FILE; 56 57 /* all segments must fit within length */ 58 for (i = 0; i < seg_count; i++) { 59 u32 off = le32_to_cpu(pkg->seg_offset[i]); 60 const struct ice_generic_seg_hdr *seg; 61 62 /* segment header must fit */ 63 if (len < off + sizeof(*seg)) 64 return ICE_DDP_PKG_INVALID_FILE; 65 66 seg = (void *)pkg + off; 67 68 /* segment body must fit */ 69 if (len < off + le32_to_cpu(seg->seg_size)) 70 return ICE_DDP_PKG_INVALID_FILE; 71 } 72 73 return ICE_DDP_PKG_SUCCESS; 74 } 75 76 /** 77 * ice_free_seg - free package segment pointer 78 * @hw: pointer to the hardware structure 79 * 80 * Frees the package segment pointer in the proper manner, depending on if the 81 * segment was allocated or just the passed in pointer was stored. 82 */ 83 void ice_free_seg(struct ice_hw *hw) 84 { 85 if (hw->pkg_copy) { 86 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); 87 hw->pkg_copy = NULL; 88 hw->pkg_size = 0; 89 } 90 hw->seg = NULL; 91 } 92 93 /** 94 * ice_chk_pkg_version - check package version for compatibility with driver 95 * @pkg_ver: pointer to a version structure to check 96 * 97 * Check to make sure that the package about to be downloaded is compatible with 98 * the driver. To be compatible, the major and minor components of the package 99 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR 100 * definitions. 101 */ 102 static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) 103 { 104 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || 105 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && 106 pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) 107 return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; 108 else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || 109 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && 110 pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) 111 return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; 112 113 return ICE_DDP_PKG_SUCCESS; 114 } 115 116 /** 117 * ice_pkg_val_buf 118 * @buf: pointer to the ice buffer 119 * 120 * This helper function validates a buffer's header. 121 */ 122 static const struct ice_buf_hdr *ice_pkg_val_buf(const struct ice_buf *buf) 123 { 124 const struct ice_buf_hdr *hdr; 125 u16 section_count; 126 u16 data_end; 127 128 hdr = (const struct ice_buf_hdr *)buf->buf; 129 /* verify data */ 130 section_count = le16_to_cpu(hdr->section_count); 131 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) 132 return NULL; 133 134 data_end = le16_to_cpu(hdr->data_end); 135 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) 136 return NULL; 137 138 return hdr; 139 } 140 141 /** 142 * ice_find_buf_table 143 * @ice_seg: pointer to the ice segment 144 * 145 * Returns the address of the buffer table within the ice segment. 146 */ 147 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) 148 { 149 struct ice_nvm_table *nvms = (struct ice_nvm_table *) 150 (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); 151 152 return (__force struct ice_buf_table *)(nvms->vers + 153 le32_to_cpu(nvms->table_count)); 154 } 155 156 /** 157 * ice_pkg_enum_buf 158 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 159 * @state: pointer to the enum state 160 * 161 * This function will enumerate all the buffers in the ice segment. The first 162 * call is made with the ice_seg parameter non-NULL; on subsequent calls, 163 * ice_seg is set to NULL which continues the enumeration. When the function 164 * returns a NULL pointer, then the end of the buffers has been reached, or an 165 * unexpected value has been detected (for example an invalid section count or 166 * an invalid buffer end value). 167 */ 168 static const struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, 169 struct ice_pkg_enum *state) 170 { 171 if (ice_seg) { 172 state->buf_table = ice_find_buf_table(ice_seg); 173 if (!state->buf_table) 174 return NULL; 175 176 state->buf_idx = 0; 177 return ice_pkg_val_buf(state->buf_table->buf_array); 178 } 179 180 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) 181 return ice_pkg_val_buf(state->buf_table->buf_array + 182 state->buf_idx); 183 else 184 return NULL; 185 } 186 187 /** 188 * ice_pkg_advance_sect 189 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 190 * @state: pointer to the enum state 191 * 192 * This helper function will advance the section within the ice segment, 193 * also advancing the buffer if needed. 194 */ 195 static bool ice_pkg_advance_sect(struct ice_seg *ice_seg, 196 struct ice_pkg_enum *state) 197 { 198 if (!ice_seg && !state->buf) 199 return false; 200 201 if (!ice_seg && state->buf) 202 if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) 203 return true; 204 205 state->buf = ice_pkg_enum_buf(ice_seg, state); 206 if (!state->buf) 207 return false; 208 209 /* start of new buffer, reset section index */ 210 state->sect_idx = 0; 211 return true; 212 } 213 214 /** 215 * ice_pkg_enum_section 216 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 217 * @state: pointer to the enum state 218 * @sect_type: section type to enumerate 219 * 220 * This function will enumerate all the sections of a particular type in the 221 * ice segment. The first call is made with the ice_seg parameter non-NULL; 222 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 223 * When the function returns a NULL pointer, then the end of the matching 224 * sections has been reached. 225 */ 226 void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 227 u32 sect_type) 228 { 229 u16 offset, size; 230 231 if (ice_seg) 232 state->type = sect_type; 233 234 if (!ice_pkg_advance_sect(ice_seg, state)) 235 return NULL; 236 237 /* scan for next matching section */ 238 while (state->buf->section_entry[state->sect_idx].type != 239 cpu_to_le32(state->type)) 240 if (!ice_pkg_advance_sect(NULL, state)) 241 return NULL; 242 243 /* validate section */ 244 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 245 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) 246 return NULL; 247 248 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); 249 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) 250 return NULL; 251 252 /* make sure the section fits in the buffer */ 253 if (offset + size > ICE_PKG_BUF_SIZE) 254 return NULL; 255 256 state->sect_type = 257 le32_to_cpu(state->buf->section_entry[state->sect_idx].type); 258 259 /* calc pointer to this section */ 260 state->sect = 261 ((u8 *)state->buf) + 262 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); 263 264 return state->sect; 265 } 266 267 /** 268 * ice_pkg_enum_entry 269 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) 270 * @state: pointer to the enum state 271 * @sect_type: section type to enumerate 272 * @offset: pointer to variable that receives the offset in the table (optional) 273 * @handler: function that handles access to the entries into the section type 274 * 275 * This function will enumerate all the entries in particular section type in 276 * the ice segment. The first call is made with the ice_seg parameter non-NULL; 277 * on subsequent calls, ice_seg is set to NULL which continues the enumeration. 278 * When the function returns a NULL pointer, then the end of the entries has 279 * been reached. 280 * 281 * Since each section may have a different header and entry size, the handler 282 * function is needed to determine the number and location entries in each 283 * section. 284 * 285 * The offset parameter is optional, but should be used for sections that 286 * contain an offset for each section table. For such cases, the section handler 287 * function must return the appropriate offset + index to give the absolution 288 * offset for each entry. For example, if the base for a section's header 289 * indicates a base offset of 10, and the index for the entry is 2, then 290 * section handler function should set the offset to 10 + 2 = 12. 291 */ 292 void *ice_pkg_enum_entry(struct ice_seg *ice_seg, 293 struct ice_pkg_enum *state, u32 sect_type, 294 u32 *offset, 295 void *(*handler)(u32 sect_type, void *section, 296 u32 index, u32 *offset)) 297 { 298 void *entry; 299 300 if (ice_seg) { 301 if (!handler) 302 return NULL; 303 304 if (!ice_pkg_enum_section(ice_seg, state, sect_type)) 305 return NULL; 306 307 state->entry_idx = 0; 308 state->handler = handler; 309 } else { 310 state->entry_idx++; 311 } 312 313 if (!state->handler) 314 return NULL; 315 316 /* get entry */ 317 entry = state->handler(state->sect_type, state->sect, state->entry_idx, 318 offset); 319 if (!entry) { 320 /* end of a section, look for another section of this type */ 321 if (!ice_pkg_enum_section(NULL, state, 0)) 322 return NULL; 323 324 state->entry_idx = 0; 325 entry = state->handler(state->sect_type, state->sect, 326 state->entry_idx, offset); 327 } 328 329 return entry; 330 } 331 332 /** 333 * ice_sw_fv_handler 334 * @sect_type: section type 335 * @section: pointer to section 336 * @index: index of the field vector entry to be returned 337 * @offset: ptr to variable that receives the offset in the field vector table 338 * 339 * This is a callback function that can be passed to ice_pkg_enum_entry. 340 * This function treats the given section as of type ice_sw_fv_section and 341 * enumerates offset field. "offset" is an index into the field vector table. 342 */ 343 static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index, 344 u32 *offset) 345 { 346 struct ice_sw_fv_section *fv_section = section; 347 348 if (!section || sect_type != ICE_SID_FLD_VEC_SW) 349 return NULL; 350 if (index >= le16_to_cpu(fv_section->count)) 351 return NULL; 352 if (offset) 353 /* "index" passed in to this function is relative to a given 354 * 4k block. To get to the true index into the field vector 355 * table need to add the relative index to the base_offset 356 * field of this section 357 */ 358 *offset = le16_to_cpu(fv_section->base_offset) + index; 359 return fv_section->fv + index; 360 } 361 362 /** 363 * ice_get_prof_index_max - get the max profile index for used profile 364 * @hw: pointer to the HW struct 365 * 366 * Calling this function will get the max profile index for used profile 367 * and store the index number in struct ice_switch_info *switch_info 368 * in HW for following use. 369 */ 370 static int ice_get_prof_index_max(struct ice_hw *hw) 371 { 372 u16 prof_index = 0, j, max_prof_index = 0; 373 struct ice_pkg_enum state; 374 struct ice_seg *ice_seg; 375 bool flag = false; 376 struct ice_fv *fv; 377 u32 offset; 378 379 memset(&state, 0, sizeof(state)); 380 381 if (!hw->seg) 382 return -EINVAL; 383 384 ice_seg = hw->seg; 385 386 do { 387 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 388 &offset, ice_sw_fv_handler); 389 if (!fv) 390 break; 391 ice_seg = NULL; 392 393 /* in the profile that not be used, the prot_id is set to 0xff 394 * and the off is set to 0x1ff for all the field vectors. 395 */ 396 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 397 if (fv->ew[j].prot_id != ICE_PROT_INVALID || 398 fv->ew[j].off != ICE_FV_OFFSET_INVAL) 399 flag = true; 400 if (flag && prof_index > max_prof_index) 401 max_prof_index = prof_index; 402 403 prof_index++; 404 flag = false; 405 } while (fv); 406 407 hw->switch_info->max_used_prof_index = max_prof_index; 408 409 return 0; 410 } 411 412 /** 413 * ice_get_ddp_pkg_state - get DDP pkg state after download 414 * @hw: pointer to the HW struct 415 * @already_loaded: indicates if pkg was already loaded onto the device 416 */ 417 static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw, 418 bool already_loaded) 419 { 420 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 421 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 422 hw->pkg_ver.update == hw->active_pkg_ver.update && 423 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 424 !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { 425 if (already_loaded) 426 return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; 427 else 428 return ICE_DDP_PKG_SUCCESS; 429 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || 430 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { 431 return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; 432 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 433 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { 434 return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; 435 } else { 436 return ICE_DDP_PKG_ERR; 437 } 438 } 439 440 /** 441 * ice_init_pkg_regs - initialize additional package registers 442 * @hw: pointer to the hardware structure 443 */ 444 static void ice_init_pkg_regs(struct ice_hw *hw) 445 { 446 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF 447 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF 448 #define ICE_SW_BLK_IDX 0 449 450 /* setup Switch block input mask, which is 48-bits in two parts */ 451 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); 452 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); 453 } 454 455 /** 456 * ice_marker_ptype_tcam_handler 457 * @sect_type: section type 458 * @section: pointer to section 459 * @index: index of the Marker PType TCAM entry to be returned 460 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections 461 * 462 * This is a callback function that can be passed to ice_pkg_enum_entry. 463 * Handles enumeration of individual Marker PType TCAM entries. 464 */ 465 static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section, 466 u32 index, u32 *offset) 467 { 468 struct ice_marker_ptype_tcam_section *marker_ptype; 469 470 if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) 471 return NULL; 472 473 if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) 474 return NULL; 475 476 if (offset) 477 *offset = 0; 478 479 marker_ptype = section; 480 if (index >= le16_to_cpu(marker_ptype->count)) 481 return NULL; 482 483 return marker_ptype->tcam + index; 484 } 485 486 /** 487 * ice_add_dvm_hint 488 * @hw: pointer to the HW structure 489 * @val: value of the boost entry 490 * @enable: true if entry needs to be enabled, or false if needs to be disabled 491 */ 492 static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) 493 { 494 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { 495 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; 496 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; 497 hw->dvm_upd.count++; 498 } 499 } 500 501 /** 502 * ice_add_tunnel_hint 503 * @hw: pointer to the HW structure 504 * @label_name: label text 505 * @val: value of the tunnel port boost entry 506 */ 507 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) 508 { 509 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { 510 u16 i; 511 512 for (i = 0; tnls[i].type != TNL_LAST; i++) { 513 size_t len = strlen(tnls[i].label_prefix); 514 515 /* Look for matching label start, before continuing */ 516 if (strncmp(label_name, tnls[i].label_prefix, len)) 517 continue; 518 519 /* Make sure this label matches our PF. Note that the PF 520 * character ('0' - '7') will be located where our 521 * prefix string's null terminator is located. 522 */ 523 if ((label_name[len] - '0') == hw->pf_id) { 524 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; 525 hw->tnl.tbl[hw->tnl.count].valid = false; 526 hw->tnl.tbl[hw->tnl.count].boost_addr = val; 527 hw->tnl.tbl[hw->tnl.count].port = 0; 528 hw->tnl.count++; 529 break; 530 } 531 } 532 } 533 } 534 535 /** 536 * ice_label_enum_handler 537 * @sect_type: section type 538 * @section: pointer to section 539 * @index: index of the label entry to be returned 540 * @offset: pointer to receive absolute offset, always zero for label sections 541 * 542 * This is a callback function that can be passed to ice_pkg_enum_entry. 543 * Handles enumeration of individual label entries. 544 */ 545 static void *ice_label_enum_handler(u32 __always_unused sect_type, 546 void *section, u32 index, u32 *offset) 547 { 548 struct ice_label_section *labels; 549 550 if (!section) 551 return NULL; 552 553 if (index > ICE_MAX_LABELS_IN_BUF) 554 return NULL; 555 556 if (offset) 557 *offset = 0; 558 559 labels = section; 560 if (index >= le16_to_cpu(labels->count)) 561 return NULL; 562 563 return labels->label + index; 564 } 565 566 /** 567 * ice_enum_labels 568 * @ice_seg: pointer to the ice segment (NULL on subsequent calls) 569 * @type: the section type that will contain the label (0 on subsequent calls) 570 * @state: ice_pkg_enum structure that will hold the state of the enumeration 571 * @value: pointer to a value that will return the label's value if found 572 * 573 * Enumerates a list of labels in the package. The caller will call 574 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call 575 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL 576 * the end of the list has been reached. 577 */ 578 static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type, 579 struct ice_pkg_enum *state, u16 *value) 580 { 581 struct ice_label *label; 582 583 /* Check for valid label section on first call */ 584 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) 585 return NULL; 586 587 label = ice_pkg_enum_entry(ice_seg, state, type, NULL, 588 ice_label_enum_handler); 589 if (!label) 590 return NULL; 591 592 *value = le16_to_cpu(label->value); 593 return label->name; 594 } 595 596 /** 597 * ice_boost_tcam_handler 598 * @sect_type: section type 599 * @section: pointer to section 600 * @index: index of the boost TCAM entry to be returned 601 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections 602 * 603 * This is a callback function that can be passed to ice_pkg_enum_entry. 604 * Handles enumeration of individual boost TCAM entries. 605 */ 606 static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, 607 u32 *offset) 608 { 609 struct ice_boost_tcam_section *boost; 610 611 if (!section) 612 return NULL; 613 614 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) 615 return NULL; 616 617 if (index > ICE_MAX_BST_TCAMS_IN_BUF) 618 return NULL; 619 620 if (offset) 621 *offset = 0; 622 623 boost = section; 624 if (index >= le16_to_cpu(boost->count)) 625 return NULL; 626 627 return boost->tcam + index; 628 } 629 630 /** 631 * ice_find_boost_entry 632 * @ice_seg: pointer to the ice segment (non-NULL) 633 * @addr: Boost TCAM address of entry to search for 634 * @entry: returns pointer to the entry 635 * 636 * Finds a particular Boost TCAM entry and returns a pointer to that entry 637 * if it is found. The ice_seg parameter must not be NULL since the first call 638 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. 639 */ 640 static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, 641 struct ice_boost_tcam_entry **entry) 642 { 643 struct ice_boost_tcam_entry *tcam; 644 struct ice_pkg_enum state; 645 646 memset(&state, 0, sizeof(state)); 647 648 if (!ice_seg) 649 return -EINVAL; 650 651 do { 652 tcam = ice_pkg_enum_entry(ice_seg, &state, 653 ICE_SID_RXPARSER_BOOST_TCAM, NULL, 654 ice_boost_tcam_handler); 655 if (tcam && le16_to_cpu(tcam->addr) == addr) { 656 *entry = tcam; 657 return 0; 658 } 659 660 ice_seg = NULL; 661 } while (tcam); 662 663 *entry = NULL; 664 return -EIO; 665 } 666 667 /** 668 * ice_is_init_pkg_successful - check if DDP init was successful 669 * @state: state of the DDP pkg after download 670 */ 671 bool ice_is_init_pkg_successful(enum ice_ddp_state state) 672 { 673 switch (state) { 674 case ICE_DDP_PKG_SUCCESS: 675 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 676 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 677 return true; 678 default: 679 return false; 680 } 681 } 682 683 /** 684 * ice_pkg_buf_alloc 685 * @hw: pointer to the HW structure 686 * 687 * Allocates a package buffer and returns a pointer to the buffer header. 688 * Note: all package contents must be in Little Endian form. 689 */ 690 struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) 691 { 692 struct ice_buf_build *bld; 693 struct ice_buf_hdr *buf; 694 695 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); 696 if (!bld) 697 return NULL; 698 699 buf = (struct ice_buf_hdr *)bld; 700 buf->data_end = 701 cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry)); 702 return bld; 703 } 704 705 static bool ice_is_gtp_u_profile(u16 prof_idx) 706 { 707 return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && 708 prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || 709 prof_idx == ICE_PROFID_IPV4_GTPU_TEID; 710 } 711 712 static bool ice_is_gtp_c_profile(u16 prof_idx) 713 { 714 switch (prof_idx) { 715 case ICE_PROFID_IPV4_GTPC_TEID: 716 case ICE_PROFID_IPV4_GTPC_NO_TEID: 717 case ICE_PROFID_IPV6_GTPC_TEID: 718 case ICE_PROFID_IPV6_GTPC_NO_TEID: 719 return true; 720 default: 721 return false; 722 } 723 } 724 725 static bool ice_is_pfcp_profile(u16 prof_idx) 726 { 727 return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE && 728 prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION; 729 } 730 731 /** 732 * ice_get_sw_prof_type - determine switch profile type 733 * @hw: pointer to the HW structure 734 * @fv: pointer to the switch field vector 735 * @prof_idx: profile index to check 736 */ 737 static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, 738 struct ice_fv *fv, u32 prof_idx) 739 { 740 u16 i; 741 742 if (ice_is_gtp_c_profile(prof_idx)) 743 return ICE_PROF_TUN_GTPC; 744 745 if (ice_is_gtp_u_profile(prof_idx)) 746 return ICE_PROF_TUN_GTPU; 747 748 if (ice_is_pfcp_profile(prof_idx)) 749 return ICE_PROF_TUN_PFCP; 750 751 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { 752 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ 753 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && 754 fv->ew[i].off == ICE_VNI_OFFSET) 755 return ICE_PROF_TUN_UDP; 756 757 /* GRE tunnel will have GRE protocol */ 758 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) 759 return ICE_PROF_TUN_GRE; 760 } 761 762 return ICE_PROF_NON_TUN; 763 } 764 765 /** 766 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type 767 * @hw: pointer to hardware structure 768 * @req_profs: type of profiles requested 769 * @bm: pointer to memory for returning the bitmap of field vectors 770 */ 771 void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, 772 unsigned long *bm) 773 { 774 struct ice_pkg_enum state; 775 struct ice_seg *ice_seg; 776 struct ice_fv *fv; 777 778 if (req_profs == ICE_PROF_ALL) { 779 bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); 780 return; 781 } 782 783 memset(&state, 0, sizeof(state)); 784 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 785 ice_seg = hw->seg; 786 do { 787 enum ice_prof_type prof_type; 788 u32 offset; 789 790 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 791 &offset, ice_sw_fv_handler); 792 ice_seg = NULL; 793 794 if (fv) { 795 /* Determine field vector type */ 796 prof_type = ice_get_sw_prof_type(hw, fv, offset); 797 798 if (req_profs & prof_type) 799 set_bit((u16)offset, bm); 800 } 801 } while (fv); 802 } 803 804 /** 805 * ice_get_sw_fv_list 806 * @hw: pointer to the HW structure 807 * @lkups: list of protocol types 808 * @bm: bitmap of field vectors to consider 809 * @fv_list: Head of a list 810 * 811 * Finds all the field vector entries from switch block that contain 812 * a given protocol ID and offset and returns a list of structures of type 813 * "ice_sw_fv_list_entry". Every structure in the list has a field vector 814 * definition and profile ID information 815 * NOTE: The caller of the function is responsible for freeing the memory 816 * allocated for every list entry. 817 */ 818 int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, 819 unsigned long *bm, struct list_head *fv_list) 820 { 821 struct ice_sw_fv_list_entry *fvl; 822 struct ice_sw_fv_list_entry *tmp; 823 struct ice_pkg_enum state; 824 struct ice_seg *ice_seg; 825 struct ice_fv *fv; 826 u32 offset; 827 828 memset(&state, 0, sizeof(state)); 829 830 if (!lkups->n_val_words || !hw->seg) 831 return -EINVAL; 832 833 ice_seg = hw->seg; 834 do { 835 u16 i; 836 837 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 838 &offset, ice_sw_fv_handler); 839 if (!fv) 840 break; 841 ice_seg = NULL; 842 843 /* If field vector is not in the bitmap list, then skip this 844 * profile. 845 */ 846 if (!test_bit((u16)offset, bm)) 847 continue; 848 849 for (i = 0; i < lkups->n_val_words; i++) { 850 int j; 851 852 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 853 if (fv->ew[j].prot_id == 854 lkups->fv_words[i].prot_id && 855 fv->ew[j].off == lkups->fv_words[i].off) 856 break; 857 if (j >= hw->blk[ICE_BLK_SW].es.fvw) 858 break; 859 if (i + 1 == lkups->n_val_words) { 860 fvl = devm_kzalloc(ice_hw_to_dev(hw), 861 sizeof(*fvl), GFP_KERNEL); 862 if (!fvl) 863 goto err; 864 fvl->fv_ptr = fv; 865 fvl->profile_id = offset; 866 list_add(&fvl->list_entry, fv_list); 867 break; 868 } 869 } 870 } while (fv); 871 if (list_empty(fv_list)) { 872 dev_warn(ice_hw_to_dev(hw), 873 "Required profiles not found in currently loaded DDP package"); 874 return -EIO; 875 } 876 877 return 0; 878 879 err: 880 list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { 881 list_del(&fvl->list_entry); 882 devm_kfree(ice_hw_to_dev(hw), fvl); 883 } 884 885 return -ENOMEM; 886 } 887 888 /** 889 * ice_init_prof_result_bm - Initialize the profile result index bitmap 890 * @hw: pointer to hardware structure 891 */ 892 void ice_init_prof_result_bm(struct ice_hw *hw) 893 { 894 struct ice_pkg_enum state; 895 struct ice_seg *ice_seg; 896 struct ice_fv *fv; 897 898 memset(&state, 0, sizeof(state)); 899 900 if (!hw->seg) 901 return; 902 903 ice_seg = hw->seg; 904 do { 905 u32 off; 906 u16 i; 907 908 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, 909 &off, ice_sw_fv_handler); 910 ice_seg = NULL; 911 if (!fv) 912 break; 913 914 bitmap_zero(hw->switch_info->prof_res_bm[off], 915 ICE_MAX_FV_WORDS); 916 917 /* Determine empty field vector indices, these can be 918 * used for recipe results. Skip index 0, since it is 919 * always used for Switch ID. 920 */ 921 for (i = 1; i < ICE_MAX_FV_WORDS; i++) 922 if (fv->ew[i].prot_id == ICE_PROT_INVALID && 923 fv->ew[i].off == ICE_FV_OFFSET_INVAL) 924 set_bit(i, hw->switch_info->prof_res_bm[off]); 925 } while (fv); 926 } 927 928 /** 929 * ice_pkg_buf_free 930 * @hw: pointer to the HW structure 931 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 932 * 933 * Frees a package buffer 934 */ 935 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) 936 { 937 devm_kfree(ice_hw_to_dev(hw), bld); 938 } 939 940 /** 941 * ice_pkg_buf_reserve_section 942 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 943 * @count: the number of sections to reserve 944 * 945 * Reserves one or more section table entries in a package buffer. This routine 946 * can be called multiple times as long as they are made before calling 947 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() 948 * is called once, the number of sections that can be allocated will not be able 949 * to be increased; not using all reserved sections is fine, but this will 950 * result in some wasted space in the buffer. 951 * Note: all package contents must be in Little Endian form. 952 */ 953 int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) 954 { 955 struct ice_buf_hdr *buf; 956 u16 section_count; 957 u16 data_end; 958 959 if (!bld) 960 return -EINVAL; 961 962 buf = (struct ice_buf_hdr *)&bld->buf; 963 964 /* already an active section, can't increase table size */ 965 section_count = le16_to_cpu(buf->section_count); 966 if (section_count > 0) 967 return -EIO; 968 969 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) 970 return -EIO; 971 bld->reserved_section_table_entries += count; 972 973 data_end = le16_to_cpu(buf->data_end) + 974 flex_array_size(buf, section_entry, count); 975 buf->data_end = cpu_to_le16(data_end); 976 977 return 0; 978 } 979 980 /** 981 * ice_pkg_buf_alloc_section 982 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 983 * @type: the section type value 984 * @size: the size of the section to reserve (in bytes) 985 * 986 * Reserves memory in the buffer for a section's content and updates the 987 * buffers' status accordingly. This routine returns a pointer to the first 988 * byte of the section start within the buffer, which is used to fill in the 989 * section contents. 990 * Note: all package contents must be in Little Endian form. 991 */ 992 void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) 993 { 994 struct ice_buf_hdr *buf; 995 u16 sect_count; 996 u16 data_end; 997 998 if (!bld || !type || !size) 999 return NULL; 1000 1001 buf = (struct ice_buf_hdr *)&bld->buf; 1002 1003 /* check for enough space left in buffer */ 1004 data_end = le16_to_cpu(buf->data_end); 1005 1006 /* section start must align on 4 byte boundary */ 1007 data_end = ALIGN(data_end, 4); 1008 1009 if ((data_end + size) > ICE_MAX_S_DATA_END) 1010 return NULL; 1011 1012 /* check for more available section table entries */ 1013 sect_count = le16_to_cpu(buf->section_count); 1014 if (sect_count < bld->reserved_section_table_entries) { 1015 void *section_ptr = ((u8 *)buf) + data_end; 1016 1017 buf->section_entry[sect_count].offset = cpu_to_le16(data_end); 1018 buf->section_entry[sect_count].size = cpu_to_le16(size); 1019 buf->section_entry[sect_count].type = cpu_to_le32(type); 1020 1021 data_end += size; 1022 buf->data_end = cpu_to_le16(data_end); 1023 1024 buf->section_count = cpu_to_le16(sect_count + 1); 1025 return section_ptr; 1026 } 1027 1028 /* no free section table entries */ 1029 return NULL; 1030 } 1031 1032 /** 1033 * ice_pkg_buf_alloc_single_section 1034 * @hw: pointer to the HW structure 1035 * @type: the section type value 1036 * @size: the size of the section to reserve (in bytes) 1037 * @section: returns pointer to the section 1038 * 1039 * Allocates a package buffer with a single section. 1040 * Note: all package contents must be in Little Endian form. 1041 */ 1042 struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw, 1043 u32 type, u16 size, 1044 void **section) 1045 { 1046 struct ice_buf_build *buf; 1047 1048 if (!section) 1049 return NULL; 1050 1051 buf = ice_pkg_buf_alloc(hw); 1052 if (!buf) 1053 return NULL; 1054 1055 if (ice_pkg_buf_reserve_section(buf, 1)) 1056 goto ice_pkg_buf_alloc_single_section_err; 1057 1058 *section = ice_pkg_buf_alloc_section(buf, type, size); 1059 if (!*section) 1060 goto ice_pkg_buf_alloc_single_section_err; 1061 1062 return buf; 1063 1064 ice_pkg_buf_alloc_single_section_err: 1065 ice_pkg_buf_free(hw, buf); 1066 return NULL; 1067 } 1068 1069 /** 1070 * ice_pkg_buf_get_active_sections 1071 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1072 * 1073 * Returns the number of active sections. Before using the package buffer 1074 * in an update package command, the caller should make sure that there is at 1075 * least one active section - otherwise, the buffer is not legal and should 1076 * not be used. 1077 * Note: all package contents must be in Little Endian form. 1078 */ 1079 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) 1080 { 1081 struct ice_buf_hdr *buf; 1082 1083 if (!bld) 1084 return 0; 1085 1086 buf = (struct ice_buf_hdr *)&bld->buf; 1087 return le16_to_cpu(buf->section_count); 1088 } 1089 1090 /** 1091 * ice_pkg_buf 1092 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) 1093 * 1094 * Return a pointer to the buffer's header 1095 */ 1096 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) 1097 { 1098 if (!bld) 1099 return NULL; 1100 1101 return &bld->buf; 1102 } 1103 1104 static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum libie_aq_err aq_err) 1105 { 1106 switch (aq_err) { 1107 case LIBIE_AQ_RC_ENOSEC: 1108 case LIBIE_AQ_RC_EBADSIG: 1109 return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; 1110 case LIBIE_AQ_RC_ESVN: 1111 return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; 1112 case LIBIE_AQ_RC_EBADMAN: 1113 case LIBIE_AQ_RC_EBADBUF: 1114 return ICE_DDP_PKG_LOAD_ERROR; 1115 default: 1116 return ICE_DDP_PKG_ERR; 1117 } 1118 } 1119 1120 /** 1121 * ice_acquire_global_cfg_lock 1122 * @hw: pointer to the HW structure 1123 * @access: access type (read or write) 1124 * 1125 * This function will request ownership of the global config lock for reading 1126 * or writing of the package. When attempting to obtain write access, the 1127 * caller must check for the following two return values: 1128 * 1129 * 0 - Means the caller has acquired the global config lock 1130 * and can perform writing of the package. 1131 * -EALREADY - Indicates another driver has already written the 1132 * package or has found that no update was necessary; in 1133 * this case, the caller can just skip performing any 1134 * update of the package. 1135 */ 1136 static int ice_acquire_global_cfg_lock(struct ice_hw *hw, 1137 enum ice_aq_res_access_type access) 1138 { 1139 int status; 1140 1141 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, 1142 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 1143 1144 if (!status) 1145 mutex_lock(&ice_global_cfg_lock_sw); 1146 else if (status == -EALREADY) 1147 ice_debug(hw, ICE_DBG_PKG, 1148 "Global config lock: No work to do\n"); 1149 1150 return status; 1151 } 1152 1153 /** 1154 * ice_release_global_cfg_lock 1155 * @hw: pointer to the HW structure 1156 * 1157 * This function will release the global config lock. 1158 */ 1159 static void ice_release_global_cfg_lock(struct ice_hw *hw) 1160 { 1161 mutex_unlock(&ice_global_cfg_lock_sw); 1162 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); 1163 } 1164 1165 /** 1166 * ice_aq_download_pkg 1167 * @hw: pointer to the hardware structure 1168 * @pkg_buf: the package buffer to transfer 1169 * @buf_size: the size of the package buffer 1170 * @last_buf: last buffer indicator 1171 * @error_offset: returns error offset 1172 * @error_info: returns error information 1173 * @cd: pointer to command details structure or NULL 1174 * 1175 * Download Package (0x0C40) 1176 */ 1177 static int 1178 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1179 u16 buf_size, bool last_buf, u32 *error_offset, 1180 u32 *error_info, struct ice_sq_cd *cd) 1181 { 1182 struct ice_aqc_download_pkg *cmd; 1183 struct libie_aq_desc desc; 1184 int status; 1185 1186 if (error_offset) 1187 *error_offset = 0; 1188 if (error_info) 1189 *error_info = 0; 1190 1191 cmd = libie_aq_raw(&desc); 1192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 1193 desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); 1194 1195 if (last_buf) 1196 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1197 1198 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1199 if (status == -EIO) { 1200 /* Read error from buffer only when the FW returned an error */ 1201 struct ice_aqc_download_pkg_resp *resp; 1202 1203 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1204 if (error_offset) 1205 *error_offset = le32_to_cpu(resp->error_offset); 1206 if (error_info) 1207 *error_info = le32_to_cpu(resp->error_info); 1208 } 1209 1210 return status; 1211 } 1212 1213 /** 1214 * ice_is_buffer_metadata - determine if package buffer is a metadata buffer 1215 * @buf: pointer to buffer header 1216 * Return: whether given @buf is a metadata one. 1217 */ 1218 static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) 1219 { 1220 return le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF; 1221 } 1222 1223 /** 1224 * struct ice_ddp_send_ctx - sending context of current DDP segment 1225 * @hw: pointer to the hardware struct 1226 * 1227 * Keeps current sending state (header, error) for the purpose of proper "last" 1228 * bit setting in ice_aq_download_pkg(). Use via calls to ice_ddp_send_hunk(). 1229 */ 1230 struct ice_ddp_send_ctx { 1231 struct ice_hw *hw; 1232 /* private: only for ice_ddp_send_hunk() */ 1233 struct ice_buf_hdr *hdr; 1234 int err; 1235 }; 1236 1237 static void ice_ddp_send_ctx_set_err(struct ice_ddp_send_ctx *ctx, int err) 1238 { 1239 ctx->err = err; 1240 } 1241 1242 /** 1243 * ice_ddp_send_hunk - send one hunk of data to FW 1244 * @ctx: current segment sending context 1245 * @hunk: next hunk to send, size is always ICE_PKG_BUF_SIZE 1246 * 1247 * Send the next hunk of data to FW, retrying if needed. 1248 * 1249 * Notice: must be called once more with a NULL @hunk to finish up; such call 1250 * will set up the "last" bit of an AQ request. After such call @ctx.hdr is 1251 * cleared, @hw is still valid. 1252 * 1253 * Return: %ICE_DDP_PKG_SUCCESS if there were no problems; a sticky @err 1254 * otherwise. 1255 */ 1256 static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx, 1257 struct ice_buf_hdr *hunk) 1258 { 1259 struct ice_buf_hdr *prev_hunk = ctx->hdr; 1260 struct ice_hw *hw = ctx->hw; 1261 bool prev_was_last = !hunk; 1262 enum libie_aq_err aq_err; 1263 u32 offset, info; 1264 int attempt, err; 1265 1266 if (ctx->err) 1267 return ctx->err; 1268 1269 ctx->hdr = hunk; 1270 if (!prev_hunk) 1271 return ICE_DDP_PKG_SUCCESS; /* no problem so far */ 1272 1273 for (attempt = 0; attempt < 5; attempt++) { 1274 if (attempt) 1275 msleep(20); 1276 1277 err = ice_aq_download_pkg(hw, prev_hunk, ICE_PKG_BUF_SIZE, 1278 prev_was_last, &offset, &info, NULL); 1279 1280 aq_err = hw->adminq.sq_last_status; 1281 if (aq_err != LIBIE_AQ_RC_ENOSEC && 1282 aq_err != LIBIE_AQ_RC_EBADSIG) 1283 break; 1284 } 1285 1286 if (err) { 1287 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", 1288 err, offset, info); 1289 ctx->err = ice_map_aq_err_to_ddp_state(aq_err); 1290 } else if (attempt) { 1291 dev_dbg(ice_hw_to_dev(hw), 1292 "ice_aq_download_pkg number of retries: %d\n", attempt); 1293 } 1294 1295 return ctx->err; 1296 } 1297 1298 /** 1299 * ice_dwnld_cfg_bufs_no_lock 1300 * @ctx: context of the current buffers section to send 1301 * @bufs: pointer to an array of buffers 1302 * @start: buffer index of first buffer to download 1303 * @count: the number of buffers to download 1304 * 1305 * Downloads package configuration buffers to the firmware. Metadata buffers 1306 * are skipped, and the first metadata buffer found indicates that the rest 1307 * of the buffers are all metadata buffers. 1308 */ 1309 static enum ice_ddp_state 1310 ice_dwnld_cfg_bufs_no_lock(struct ice_ddp_send_ctx *ctx, struct ice_buf *bufs, 1311 u32 start, u32 count) 1312 { 1313 struct ice_buf_hdr *bh; 1314 enum ice_ddp_state err; 1315 1316 if (!bufs || !count) { 1317 ice_ddp_send_ctx_set_err(ctx, ICE_DDP_PKG_ERR); 1318 return ICE_DDP_PKG_ERR; 1319 } 1320 1321 bufs += start; 1322 1323 for (int i = 0; i < count; i++, bufs++) { 1324 bh = (struct ice_buf_hdr *)bufs; 1325 /* Metadata buffers should not be sent to FW, 1326 * their presence means "we are done here". 1327 */ 1328 if (ice_is_buffer_metadata(bh)) 1329 break; 1330 1331 err = ice_ddp_send_hunk(ctx, bh); 1332 if (err) 1333 return err; 1334 } 1335 1336 return 0; 1337 } 1338 1339 /** 1340 * ice_get_pkg_seg_by_idx 1341 * @pkg_hdr: pointer to the package header to be searched 1342 * @idx: index of segment 1343 */ 1344 static struct ice_generic_seg_hdr * 1345 ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) 1346 { 1347 if (idx < le32_to_cpu(pkg_hdr->seg_count)) 1348 return (struct ice_generic_seg_hdr *) 1349 ((u8 *)pkg_hdr + 1350 le32_to_cpu(pkg_hdr->seg_offset[idx])); 1351 1352 return NULL; 1353 } 1354 1355 /** 1356 * ice_is_signing_seg_at_idx - determine if segment is a signing segment 1357 * @pkg_hdr: pointer to package header 1358 * @idx: segment index 1359 */ 1360 static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) 1361 { 1362 struct ice_generic_seg_hdr *seg; 1363 1364 seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1365 if (!seg) 1366 return false; 1367 1368 return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; 1369 } 1370 1371 /** 1372 * ice_is_signing_seg_type_at_idx 1373 * @pkg_hdr: pointer to package header 1374 * @idx: segment index 1375 * @seg_id: segment id that is expected 1376 * @sign_type: signing type 1377 * 1378 * Determine if a segment is a signing segment of the correct type 1379 */ 1380 static bool 1381 ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, 1382 u32 seg_id, u32 sign_type) 1383 { 1384 struct ice_sign_seg *seg; 1385 1386 if (!ice_is_signing_seg_at_idx(pkg_hdr, idx)) 1387 return false; 1388 1389 seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1390 1391 if (seg && le32_to_cpu(seg->seg_id) == seg_id && 1392 le32_to_cpu(seg->sign_type) == sign_type) 1393 return true; 1394 1395 return false; 1396 } 1397 1398 /** 1399 * ice_download_pkg_sig_seg - download a signature segment 1400 * @ctx: context of the current buffers section to send 1401 * @seg: pointer to signature segment 1402 */ 1403 static enum ice_ddp_state 1404 ice_download_pkg_sig_seg(struct ice_ddp_send_ctx *ctx, struct ice_sign_seg *seg) 1405 { 1406 return ice_dwnld_cfg_bufs_no_lock(ctx, seg->buf_tbl.buf_array, 0, 1407 le32_to_cpu(seg->buf_tbl.buf_count)); 1408 } 1409 1410 /** 1411 * ice_download_pkg_config_seg - download a config segment 1412 * @ctx: context of the current buffers section to send 1413 * @pkg_hdr: pointer to package header 1414 * @idx: segment index 1415 * @start: starting buffer 1416 * @count: buffer count 1417 * 1418 * Note: idx must reference a ICE segment 1419 */ 1420 static enum ice_ddp_state 1421 ice_download_pkg_config_seg(struct ice_ddp_send_ctx *ctx, 1422 struct ice_pkg_hdr *pkg_hdr, u32 idx, u32 start, 1423 u32 count) 1424 { 1425 struct ice_buf_table *bufs; 1426 struct ice_seg *seg; 1427 u32 buf_count; 1428 1429 seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1430 if (!seg) 1431 return ICE_DDP_PKG_ERR; 1432 1433 bufs = ice_find_buf_table(seg); 1434 buf_count = le32_to_cpu(bufs->buf_count); 1435 1436 if (start >= buf_count || start + count > buf_count) 1437 return ICE_DDP_PKG_ERR; 1438 1439 return ice_dwnld_cfg_bufs_no_lock(ctx, bufs->buf_array, start, count); 1440 } 1441 1442 static bool ice_is_last_sign_seg(u32 flags) 1443 { 1444 return !(flags & ICE_SIGN_SEG_FLAGS_VALID) || /* behavior prior to valid */ 1445 (flags & ICE_SIGN_SEG_FLAGS_LAST); 1446 } 1447 1448 /** 1449 * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment 1450 * @ctx: context of the current buffers section to send 1451 * @pkg_hdr: pointer to package header 1452 * @idx: segment index (must be a signature segment) 1453 * 1454 * Note: idx must reference a signature segment 1455 */ 1456 static enum ice_ddp_state 1457 ice_dwnld_sign_and_cfg_segs(struct ice_ddp_send_ctx *ctx, 1458 struct ice_pkg_hdr *pkg_hdr, u32 idx) 1459 { 1460 u32 conf_idx, start, count, flags; 1461 enum ice_ddp_state state; 1462 struct ice_sign_seg *seg; 1463 1464 seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); 1465 if (!seg) { 1466 state = ICE_DDP_PKG_ERR; 1467 ice_ddp_send_ctx_set_err(ctx, state); 1468 return state; 1469 } 1470 1471 count = le32_to_cpu(seg->signed_buf_count); 1472 state = ice_download_pkg_sig_seg(ctx, seg); 1473 if (state || !count) 1474 return state; 1475 1476 conf_idx = le32_to_cpu(seg->signed_seg_idx); 1477 start = le32_to_cpu(seg->signed_buf_start); 1478 1479 state = ice_download_pkg_config_seg(ctx, pkg_hdr, conf_idx, start, 1480 count); 1481 1482 /* finish up by sending last hunk with "last" flag set if requested by 1483 * DDP content 1484 */ 1485 flags = le32_to_cpu(seg->flags); 1486 if (ice_is_last_sign_seg(flags)) 1487 state = ice_ddp_send_hunk(ctx, NULL); 1488 1489 return state; 1490 } 1491 1492 /** 1493 * ice_match_signing_seg - determine if a matching signing segment exists 1494 * @pkg_hdr: pointer to package header 1495 * @seg_id: segment id that is expected 1496 * @sign_type: signing type 1497 */ 1498 static bool 1499 ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) 1500 { 1501 u32 i; 1502 1503 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1504 if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, 1505 sign_type)) 1506 return true; 1507 } 1508 1509 return false; 1510 } 1511 1512 /** 1513 * ice_post_dwnld_pkg_actions - perform post download package actions 1514 * @hw: pointer to the hardware structure 1515 */ 1516 static enum ice_ddp_state 1517 ice_post_dwnld_pkg_actions(struct ice_hw *hw) 1518 { 1519 int status; 1520 1521 status = ice_set_vlan_mode(hw); 1522 if (status) { 1523 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", 1524 status); 1525 return ICE_DDP_PKG_ERR; 1526 } 1527 1528 return ICE_DDP_PKG_SUCCESS; 1529 } 1530 1531 /** 1532 * ice_download_pkg_with_sig_seg 1533 * @hw: pointer to the hardware structure 1534 * @pkg_hdr: pointer to package header 1535 * 1536 * Handles the download of a complete package. 1537 */ 1538 static enum ice_ddp_state 1539 ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1540 { 1541 enum libie_aq_err aq_err = hw->adminq.sq_last_status; 1542 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 1543 struct ice_ddp_send_ctx ctx = { .hw = hw }; 1544 int status; 1545 u32 i; 1546 1547 ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); 1548 ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); 1549 1550 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1551 if (status) { 1552 if (status == -EALREADY) 1553 state = ICE_DDP_PKG_ALREADY_LOADED; 1554 else 1555 state = ice_map_aq_err_to_ddp_state(aq_err); 1556 return state; 1557 } 1558 1559 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1560 if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, 1561 hw->pkg_sign_type)) 1562 continue; 1563 1564 state = ice_dwnld_sign_and_cfg_segs(&ctx, pkg_hdr, i); 1565 if (state) 1566 break; 1567 } 1568 1569 if (!state) 1570 state = ice_post_dwnld_pkg_actions(hw); 1571 1572 ice_release_global_cfg_lock(hw); 1573 1574 return state; 1575 } 1576 1577 /** 1578 * ice_dwnld_cfg_bufs 1579 * @hw: pointer to the hardware structure 1580 * @bufs: pointer to an array of buffers 1581 * @count: the number of buffers in the array 1582 * 1583 * Obtains global config lock and downloads the package configuration buffers 1584 * to the firmware. 1585 */ 1586 static enum ice_ddp_state 1587 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1588 { 1589 struct ice_ddp_send_ctx ctx = { .hw = hw }; 1590 enum ice_ddp_state state; 1591 struct ice_buf_hdr *bh; 1592 int status; 1593 1594 if (!bufs || !count) 1595 return ICE_DDP_PKG_ERR; 1596 1597 /* If the first buffer's first section has its metadata bit set 1598 * then there are no buffers to be downloaded, and the operation is 1599 * considered a success. 1600 */ 1601 bh = (struct ice_buf_hdr *)bufs; 1602 if (ice_is_buffer_metadata(bh)) 1603 return ICE_DDP_PKG_SUCCESS; 1604 1605 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); 1606 if (status) { 1607 if (status == -EALREADY) 1608 return ICE_DDP_PKG_ALREADY_LOADED; 1609 return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); 1610 } 1611 1612 ice_dwnld_cfg_bufs_no_lock(&ctx, bufs, 0, count); 1613 /* finish up by sending last hunk with "last" flag set */ 1614 state = ice_ddp_send_hunk(&ctx, NULL); 1615 if (!state) 1616 state = ice_post_dwnld_pkg_actions(hw); 1617 1618 ice_release_global_cfg_lock(hw); 1619 1620 return state; 1621 } 1622 1623 /** 1624 * ice_download_pkg_without_sig_seg 1625 * @hw: pointer to the hardware structure 1626 * @ice_seg: pointer to the segment of the package to be downloaded 1627 * 1628 * Handles the download of a complete package without signature segment. 1629 */ 1630 static enum ice_ddp_state 1631 ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) 1632 { 1633 struct ice_buf_table *ice_buf_tbl; 1634 1635 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", 1636 ice_seg->hdr.seg_format_ver.major, 1637 ice_seg->hdr.seg_format_ver.minor, 1638 ice_seg->hdr.seg_format_ver.update, 1639 ice_seg->hdr.seg_format_ver.draft); 1640 1641 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", 1642 le32_to_cpu(ice_seg->hdr.seg_type), 1643 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); 1644 1645 ice_buf_tbl = ice_find_buf_table(ice_seg); 1646 1647 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", 1648 le32_to_cpu(ice_buf_tbl->buf_count)); 1649 1650 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, 1651 le32_to_cpu(ice_buf_tbl->buf_count)); 1652 } 1653 1654 /** 1655 * ice_download_pkg 1656 * @hw: pointer to the hardware structure 1657 * @pkg_hdr: pointer to package header 1658 * @ice_seg: pointer to the segment of the package to be downloaded 1659 * 1660 * Handles the download of a complete package. 1661 */ 1662 static enum ice_ddp_state 1663 ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, 1664 struct ice_seg *ice_seg) 1665 { 1666 enum ice_ddp_state state; 1667 1668 if (hw->pkg_has_signing_seg) 1669 state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); 1670 else 1671 state = ice_download_pkg_without_sig_seg(hw, ice_seg); 1672 1673 ice_post_pkg_dwnld_vlan_mode_cfg(hw); 1674 1675 return state; 1676 } 1677 1678 /** 1679 * ice_aq_get_pkg_info_list 1680 * @hw: pointer to the hardware structure 1681 * @pkg_info: the buffer which will receive the information list 1682 * @buf_size: the size of the pkg_info information buffer 1683 * @cd: pointer to command details structure or NULL 1684 * 1685 * Get Package Info List (0x0C43) 1686 */ 1687 static int ice_aq_get_pkg_info_list(struct ice_hw *hw, 1688 struct ice_aqc_get_pkg_info_resp *pkg_info, 1689 u16 buf_size, struct ice_sq_cd *cd) 1690 { 1691 struct libie_aq_desc desc; 1692 1693 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); 1694 1695 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); 1696 } 1697 1698 /** 1699 * ice_aq_update_pkg 1700 * @hw: pointer to the hardware structure 1701 * @pkg_buf: the package cmd buffer 1702 * @buf_size: the size of the package cmd buffer 1703 * @last_buf: last buffer indicator 1704 * @error_offset: returns error offset 1705 * @error_info: returns error information 1706 * @cd: pointer to command details structure or NULL 1707 * 1708 * Update Package (0x0C42) 1709 */ 1710 static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1711 u16 buf_size, bool last_buf, u32 *error_offset, 1712 u32 *error_info, struct ice_sq_cd *cd) 1713 { 1714 struct ice_aqc_download_pkg *cmd; 1715 struct libie_aq_desc desc; 1716 int status; 1717 1718 if (error_offset) 1719 *error_offset = 0; 1720 if (error_info) 1721 *error_info = 0; 1722 1723 cmd = libie_aq_raw(&desc); 1724 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); 1725 desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); 1726 1727 if (last_buf) 1728 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1729 1730 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1731 if (status == -EIO) { 1732 /* Read error from buffer only when the FW returned an error */ 1733 struct ice_aqc_download_pkg_resp *resp; 1734 1735 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1736 if (error_offset) 1737 *error_offset = le32_to_cpu(resp->error_offset); 1738 if (error_info) 1739 *error_info = le32_to_cpu(resp->error_info); 1740 } 1741 1742 return status; 1743 } 1744 1745 /** 1746 * ice_aq_upload_section 1747 * @hw: pointer to the hardware structure 1748 * @pkg_buf: the package buffer which will receive the section 1749 * @buf_size: the size of the package buffer 1750 * @cd: pointer to command details structure or NULL 1751 * 1752 * Upload Section (0x0C41) 1753 */ 1754 int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1755 u16 buf_size, struct ice_sq_cd *cd) 1756 { 1757 struct libie_aq_desc desc; 1758 1759 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); 1760 desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); 1761 1762 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1763 } 1764 1765 /** 1766 * ice_update_pkg_no_lock 1767 * @hw: pointer to the hardware structure 1768 * @bufs: pointer to an array of buffers 1769 * @count: the number of buffers in the array 1770 */ 1771 int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1772 { 1773 int status = 0; 1774 u32 i; 1775 1776 for (i = 0; i < count; i++) { 1777 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); 1778 bool last = ((i + 1) == count); 1779 u32 offset, info; 1780 1781 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), 1782 last, &offset, &info, NULL); 1783 1784 if (status) { 1785 ice_debug(hw, ICE_DBG_PKG, 1786 "Update pkg failed: err %d off %d inf %d\n", 1787 status, offset, info); 1788 break; 1789 } 1790 } 1791 1792 return status; 1793 } 1794 1795 /** 1796 * ice_update_pkg 1797 * @hw: pointer to the hardware structure 1798 * @bufs: pointer to an array of buffers 1799 * @count: the number of buffers in the array 1800 * 1801 * Obtains change lock and updates package. 1802 */ 1803 int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) 1804 { 1805 int status; 1806 1807 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 1808 if (status) 1809 return status; 1810 1811 status = ice_update_pkg_no_lock(hw, bufs, count); 1812 1813 ice_release_change_lock(hw); 1814 1815 return status; 1816 } 1817 1818 /** 1819 * ice_find_seg_in_pkg 1820 * @hw: pointer to the hardware structure 1821 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) 1822 * @pkg_hdr: pointer to the package header to be searched 1823 * 1824 * This function searches a package file for a particular segment type. On 1825 * success it returns a pointer to the segment header, otherwise it will 1826 * return NULL. 1827 */ 1828 static const struct ice_generic_seg_hdr * 1829 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 1830 const struct ice_pkg_hdr *pkg_hdr) 1831 { 1832 u32 i; 1833 1834 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", 1835 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, 1836 pkg_hdr->pkg_format_ver.update, 1837 pkg_hdr->pkg_format_ver.draft); 1838 1839 /* Search all package segments for the requested segment type */ 1840 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { 1841 const struct ice_generic_seg_hdr *seg; 1842 1843 seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]); 1844 1845 if (le32_to_cpu(seg->seg_type) == seg_type) 1846 return seg; 1847 } 1848 1849 return NULL; 1850 } 1851 1852 /** 1853 * ice_has_signing_seg - determine if package has a signing segment 1854 * @hw: pointer to the hardware structure 1855 * @pkg_hdr: pointer to the driver's package hdr 1856 */ 1857 static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) 1858 { 1859 struct ice_generic_seg_hdr *seg_hdr; 1860 1861 seg_hdr = (struct ice_generic_seg_hdr *) 1862 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); 1863 1864 return seg_hdr ? true : false; 1865 } 1866 1867 /** 1868 * ice_get_pkg_segment_id - get correct package segment id, based on device 1869 * @mac_type: MAC type of the device 1870 */ 1871 static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) 1872 { 1873 u32 seg_id; 1874 1875 switch (mac_type) { 1876 case ICE_MAC_E830: 1877 seg_id = SEGMENT_TYPE_ICE_E830; 1878 break; 1879 case ICE_MAC_GENERIC: 1880 case ICE_MAC_GENERIC_3K_E825: 1881 default: 1882 seg_id = SEGMENT_TYPE_ICE_E810; 1883 break; 1884 } 1885 1886 return seg_id; 1887 } 1888 1889 /** 1890 * ice_get_pkg_sign_type - get package segment sign type, based on device 1891 * @mac_type: MAC type of the device 1892 */ 1893 static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) 1894 { 1895 u32 sign_type; 1896 1897 switch (mac_type) { 1898 case ICE_MAC_E830: 1899 sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB; 1900 break; 1901 case ICE_MAC_GENERIC_3K_E825: 1902 sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825; 1903 break; 1904 case ICE_MAC_GENERIC: 1905 default: 1906 sign_type = SEGMENT_SIGN_TYPE_RSA2K; 1907 break; 1908 } 1909 1910 return sign_type; 1911 } 1912 1913 /** 1914 * ice_get_signing_req - get correct package requirements, based on device 1915 * @hw: pointer to the hardware structure 1916 */ 1917 static void ice_get_signing_req(struct ice_hw *hw) 1918 { 1919 hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); 1920 hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); 1921 } 1922 1923 /** 1924 * ice_init_pkg_info 1925 * @hw: pointer to the hardware structure 1926 * @pkg_hdr: pointer to the driver's package hdr 1927 * 1928 * Saves off the package details into the HW structure. 1929 */ 1930 static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, 1931 struct ice_pkg_hdr *pkg_hdr) 1932 { 1933 struct ice_generic_seg_hdr *seg_hdr; 1934 1935 if (!pkg_hdr) 1936 return ICE_DDP_PKG_ERR; 1937 1938 hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); 1939 ice_get_signing_req(hw); 1940 1941 ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", 1942 hw->pkg_seg_id); 1943 1944 seg_hdr = (struct ice_generic_seg_hdr *) 1945 ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); 1946 if (seg_hdr) { 1947 struct ice_meta_sect *meta; 1948 struct ice_pkg_enum state; 1949 1950 memset(&state, 0, sizeof(state)); 1951 1952 /* Get package information from the Metadata Section */ 1953 meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, 1954 ICE_SID_METADATA); 1955 if (!meta) { 1956 ice_debug(hw, ICE_DBG_INIT, 1957 "Did not find ice metadata section in package\n"); 1958 return ICE_DDP_PKG_INVALID_FILE; 1959 } 1960 1961 hw->pkg_ver = meta->ver; 1962 memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); 1963 1964 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", 1965 meta->ver.major, meta->ver.minor, meta->ver.update, 1966 meta->ver.draft, meta->name); 1967 1968 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; 1969 memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); 1970 1971 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", 1972 seg_hdr->seg_format_ver.major, 1973 seg_hdr->seg_format_ver.minor, 1974 seg_hdr->seg_format_ver.update, 1975 seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); 1976 } else { 1977 ice_debug(hw, ICE_DBG_INIT, 1978 "Did not find ice segment in driver package\n"); 1979 return ICE_DDP_PKG_INVALID_FILE; 1980 } 1981 1982 return ICE_DDP_PKG_SUCCESS; 1983 } 1984 1985 /** 1986 * ice_get_pkg_info 1987 * @hw: pointer to the hardware structure 1988 * 1989 * Store details of the package currently loaded in HW into the HW structure. 1990 */ 1991 static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) 1992 { 1993 DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info, 1994 ICE_PKG_CNT); 1995 u16 size = __struct_size(pkg_info); 1996 u32 i; 1997 1998 if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) 1999 return ICE_DDP_PKG_ERR; 2000 2001 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { 2002 #define ICE_PKG_FLAG_COUNT 4 2003 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; 2004 u8 place = 0; 2005 2006 if (pkg_info->pkg_info[i].is_active) { 2007 flags[place++] = 'A'; 2008 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; 2009 hw->active_track_id = 2010 le32_to_cpu(pkg_info->pkg_info[i].track_id); 2011 memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, 2012 sizeof(pkg_info->pkg_info[i].name)); 2013 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; 2014 } 2015 if (pkg_info->pkg_info[i].is_active_at_boot) 2016 flags[place++] = 'B'; 2017 if (pkg_info->pkg_info[i].is_modified) 2018 flags[place++] = 'M'; 2019 if (pkg_info->pkg_info[i].is_in_nvm) 2020 flags[place++] = 'N'; 2021 2022 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, 2023 pkg_info->pkg_info[i].ver.major, 2024 pkg_info->pkg_info[i].ver.minor, 2025 pkg_info->pkg_info[i].ver.update, 2026 pkg_info->pkg_info[i].ver.draft, 2027 pkg_info->pkg_info[i].name, flags); 2028 } 2029 2030 return ICE_DDP_PKG_SUCCESS; 2031 } 2032 2033 /** 2034 * ice_chk_pkg_compat 2035 * @hw: pointer to the hardware structure 2036 * @ospkg: pointer to the package hdr 2037 * @seg: pointer to the package segment hdr 2038 * 2039 * This function checks the package version compatibility with driver and NVM 2040 */ 2041 static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, 2042 struct ice_pkg_hdr *ospkg, 2043 struct ice_seg **seg) 2044 { 2045 DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info, 2046 ICE_PKG_CNT); 2047 u16 size = __struct_size(pkg); 2048 enum ice_ddp_state state; 2049 u32 i; 2050 2051 /* Check package version compatibility */ 2052 state = ice_chk_pkg_version(&hw->pkg_ver); 2053 if (state) { 2054 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); 2055 return state; 2056 } 2057 2058 /* find ICE segment in given package */ 2059 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, 2060 ospkg); 2061 if (!*seg) { 2062 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); 2063 return ICE_DDP_PKG_INVALID_FILE; 2064 } 2065 2066 /* Check if FW is compatible with the OS package */ 2067 if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) 2068 return ICE_DDP_PKG_LOAD_ERROR; 2069 2070 for (i = 0; i < le32_to_cpu(pkg->count); i++) { 2071 /* loop till we find the NVM package */ 2072 if (!pkg->pkg_info[i].is_in_nvm) 2073 continue; 2074 if ((*seg)->hdr.seg_format_ver.major != 2075 pkg->pkg_info[i].ver.major || 2076 (*seg)->hdr.seg_format_ver.minor > 2077 pkg->pkg_info[i].ver.minor) { 2078 state = ICE_DDP_PKG_FW_MISMATCH; 2079 ice_debug(hw, ICE_DBG_INIT, 2080 "OS package is not compatible with NVM.\n"); 2081 } 2082 /* done processing NVM package so break */ 2083 break; 2084 } 2085 2086 return state; 2087 } 2088 2089 /** 2090 * ice_init_pkg_hints 2091 * @hw: pointer to the HW structure 2092 * @ice_seg: pointer to the segment of the package scan (non-NULL) 2093 * 2094 * This function will scan the package and save off relevant information 2095 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL 2096 * since the first call to ice_enum_labels requires a pointer to an actual 2097 * ice_seg structure. 2098 */ 2099 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) 2100 { 2101 struct ice_pkg_enum state; 2102 char *label_name; 2103 u16 val; 2104 int i; 2105 2106 memset(&hw->tnl, 0, sizeof(hw->tnl)); 2107 memset(&state, 0, sizeof(state)); 2108 2109 if (!ice_seg) 2110 return; 2111 2112 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, 2113 &val); 2114 2115 while (label_name) { 2116 if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) 2117 /* check for a tunnel entry */ 2118 ice_add_tunnel_hint(hw, label_name, val); 2119 2120 /* check for a dvm mode entry */ 2121 else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) 2122 ice_add_dvm_hint(hw, val, true); 2123 2124 /* check for a svm mode entry */ 2125 else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) 2126 ice_add_dvm_hint(hw, val, false); 2127 2128 label_name = ice_enum_labels(NULL, 0, &state, &val); 2129 } 2130 2131 /* Cache the appropriate boost TCAM entry pointers for tunnels */ 2132 for (i = 0; i < hw->tnl.count; i++) { 2133 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, 2134 &hw->tnl.tbl[i].boost_entry); 2135 if (hw->tnl.tbl[i].boost_entry) { 2136 hw->tnl.tbl[i].valid = true; 2137 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) 2138 hw->tnl.valid_count[hw->tnl.tbl[i].type]++; 2139 } 2140 } 2141 2142 /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ 2143 for (i = 0; i < hw->dvm_upd.count; i++) 2144 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, 2145 &hw->dvm_upd.tbl[i].boost_entry); 2146 } 2147 2148 /** 2149 * ice_fill_hw_ptype - fill the enabled PTYPE bit information 2150 * @hw: pointer to the HW structure 2151 */ 2152 static void ice_fill_hw_ptype(struct ice_hw *hw) 2153 { 2154 struct ice_marker_ptype_tcam_entry *tcam; 2155 struct ice_seg *seg = hw->seg; 2156 struct ice_pkg_enum state; 2157 2158 bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); 2159 if (!seg) 2160 return; 2161 2162 memset(&state, 0, sizeof(state)); 2163 2164 do { 2165 tcam = ice_pkg_enum_entry(seg, &state, 2166 ICE_SID_RXPARSER_MARKER_PTYPE, NULL, 2167 ice_marker_ptype_tcam_handler); 2168 if (tcam && 2169 le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && 2170 le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) 2171 set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); 2172 2173 seg = NULL; 2174 } while (tcam); 2175 } 2176 2177 /** 2178 * ice_init_pkg - initialize/download package 2179 * @hw: pointer to the hardware structure 2180 * @buf: pointer to the package buffer 2181 * @len: size of the package buffer 2182 * 2183 * This function initializes a package. The package contains HW tables 2184 * required to do packet processing. First, the function extracts package 2185 * information such as version. Then it finds the ice configuration segment 2186 * within the package; this function then saves a copy of the segment pointer 2187 * within the supplied package buffer. Next, the function will cache any hints 2188 * from the package, followed by downloading the package itself. Note, that if 2189 * a previous PF driver has already downloaded the package successfully, then 2190 * the current driver will not have to download the package again. 2191 * 2192 * The local package contents will be used to query default behavior and to 2193 * update specific sections of the HW's version of the package (e.g. to update 2194 * the parse graph to understand new protocols). 2195 * 2196 * This function stores a pointer to the package buffer memory, and it is 2197 * expected that the supplied buffer will not be freed immediately. If the 2198 * package buffer needs to be freed, such as when read from a file, use 2199 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this 2200 * case. 2201 */ 2202 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) 2203 { 2204 bool already_loaded = false; 2205 enum ice_ddp_state state; 2206 struct ice_pkg_hdr *pkg; 2207 struct ice_seg *seg; 2208 2209 if (!buf || !len) 2210 return ICE_DDP_PKG_ERR; 2211 2212 pkg = (struct ice_pkg_hdr *)buf; 2213 state = ice_verify_pkg(pkg, len); 2214 if (state) { 2215 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", 2216 state); 2217 return state; 2218 } 2219 2220 /* initialize package info */ 2221 state = ice_init_pkg_info(hw, pkg); 2222 if (state) 2223 return state; 2224 2225 /* must be a matching segment */ 2226 if (hw->pkg_has_signing_seg && 2227 !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type)) 2228 return ICE_DDP_PKG_ERR; 2229 2230 /* before downloading the package, check package version for 2231 * compatibility with driver 2232 */ 2233 state = ice_chk_pkg_compat(hw, pkg, &seg); 2234 if (state) 2235 return state; 2236 2237 /* initialize package hints and then download package */ 2238 ice_init_pkg_hints(hw, seg); 2239 state = ice_download_pkg(hw, pkg, seg); 2240 if (state == ICE_DDP_PKG_ALREADY_LOADED) { 2241 ice_debug(hw, ICE_DBG_INIT, 2242 "package previously loaded - no work.\n"); 2243 already_loaded = true; 2244 } 2245 2246 /* Get information on the package currently loaded in HW, then make sure 2247 * the driver is compatible with this version. 2248 */ 2249 if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { 2250 state = ice_get_pkg_info(hw); 2251 if (!state) 2252 state = ice_get_ddp_pkg_state(hw, already_loaded); 2253 } 2254 2255 if (ice_is_init_pkg_successful(state)) { 2256 hw->seg = seg; 2257 /* on successful package download update other required 2258 * registers to support the package and fill HW tables 2259 * with package content. 2260 */ 2261 ice_init_pkg_regs(hw); 2262 ice_fill_blk_tbls(hw); 2263 ice_fill_hw_ptype(hw); 2264 ice_get_prof_index_max(hw); 2265 } else { 2266 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state); 2267 } 2268 2269 return state; 2270 } 2271 2272 /** 2273 * ice_copy_and_init_pkg - initialize/download a copy of the package 2274 * @hw: pointer to the hardware structure 2275 * @buf: pointer to the package buffer 2276 * @len: size of the package buffer 2277 * 2278 * This function copies the package buffer, and then calls ice_init_pkg() to 2279 * initialize the copied package contents. 2280 * 2281 * The copying is necessary if the package buffer supplied is constant, or if 2282 * the memory may disappear shortly after calling this function. 2283 * 2284 * If the package buffer resides in the data segment and can be modified, the 2285 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). 2286 * 2287 * However, if the package buffer needs to be copied first, such as when being 2288 * read from a file, the caller should use ice_copy_and_init_pkg(). 2289 * 2290 * This function will first copy the package buffer, before calling 2291 * ice_init_pkg(). The caller is free to immediately destroy the original 2292 * package buffer, as the new copy will be managed by this function and 2293 * related routines. 2294 */ 2295 enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, 2296 u32 len) 2297 { 2298 enum ice_ddp_state state; 2299 u8 *buf_copy; 2300 2301 if (!buf || !len) 2302 return ICE_DDP_PKG_ERR; 2303 2304 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); 2305 if (!buf_copy) 2306 return ICE_DDP_PKG_ERR; 2307 2308 state = ice_init_pkg(hw, buf_copy, len); 2309 if (!ice_is_init_pkg_successful(state)) { 2310 /* Free the copy, since we failed to initialize the package */ 2311 devm_kfree(ice_hw_to_dev(hw), buf_copy); 2312 } else { 2313 /* Track the copied pkg so we can free it later */ 2314 hw->pkg_copy = buf_copy; 2315 hw->pkg_size = len; 2316 } 2317 2318 return state; 2319 } 2320 2321 /** 2322 * ice_get_set_tx_topo - get or set Tx topology 2323 * @hw: pointer to the HW struct 2324 * @buf: pointer to Tx topology buffer 2325 * @buf_size: buffer size 2326 * @cd: pointer to command details structure or NULL 2327 * @flags: pointer to descriptor flags 2328 * @set: 0-get, 1-set topology 2329 * 2330 * The function will get or set Tx topology 2331 * 2332 * Return: zero when set was successful, negative values otherwise. 2333 */ 2334 static int 2335 ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, 2336 struct ice_sq_cd *cd, u8 *flags, bool set) 2337 { 2338 struct ice_aqc_get_set_tx_topo *cmd; 2339 struct libie_aq_desc desc; 2340 int status; 2341 2342 cmd = libie_aq_raw(&desc); 2343 if (set) { 2344 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); 2345 cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; 2346 /* requested to update a new topology, not a default topology */ 2347 if (buf) 2348 cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | 2349 ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; 2350 2351 desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); 2352 } else { 2353 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); 2354 cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; 2355 2356 if (hw->mac_type == ICE_MAC_E810 || 2357 hw->mac_type == ICE_MAC_GENERIC) 2358 desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); 2359 } 2360 2361 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2362 if (status) 2363 return status; 2364 /* read the return flag values (first byte) for get operation */ 2365 if (!set && flags) 2366 *flags = cmd->set_flags; 2367 2368 return 0; 2369 } 2370 2371 /** 2372 * ice_cfg_tx_topo - Initialize new Tx topology if available 2373 * @hw: pointer to the HW struct 2374 * @buf: pointer to Tx topology buffer 2375 * @len: buffer size 2376 * 2377 * The function will apply the new Tx topology from the package buffer 2378 * if available. 2379 * 2380 * Return: zero when update was successful, negative values otherwise. 2381 */ 2382 int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len) 2383 { 2384 u8 *new_topo = NULL, *topo __free(kfree) = NULL; 2385 const struct ice_run_time_cfg_seg *seg; 2386 const struct ice_buf_hdr *section; 2387 const struct ice_pkg_hdr *pkg_hdr; 2388 enum ice_ddp_state state; 2389 u16 offset, size = 0; 2390 u32 reg = 0; 2391 int status; 2392 u8 flags; 2393 2394 if (!buf || !len) 2395 return -EINVAL; 2396 2397 /* Does FW support new Tx topology mode ? */ 2398 if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { 2399 ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); 2400 return -EOPNOTSUPP; 2401 } 2402 2403 topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2404 if (!topo) 2405 return -ENOMEM; 2406 2407 /* Get the current Tx topology flags */ 2408 status = ice_get_set_tx_topo(hw, topo, ICE_AQ_MAX_BUF_LEN, NULL, &flags, 2409 false); 2410 2411 if (status) { 2412 ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); 2413 return status; 2414 } 2415 2416 /* Is default topology already applied ? */ 2417 if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && 2418 hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) { 2419 ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n"); 2420 return -EEXIST; 2421 } 2422 2423 /* Is new topology already applied ? */ 2424 if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && 2425 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { 2426 ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n"); 2427 return -EEXIST; 2428 } 2429 2430 /* Setting topology already issued? */ 2431 if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { 2432 ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n"); 2433 /* Add a small delay before exiting */ 2434 msleep(2000); 2435 return -EEXIST; 2436 } 2437 2438 /* Change the topology from new to default (5 to 9) */ 2439 if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && 2440 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { 2441 ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); 2442 goto update_topo; 2443 } 2444 2445 pkg_hdr = (const struct ice_pkg_hdr *)buf; 2446 state = ice_verify_pkg(pkg_hdr, len); 2447 if (state) { 2448 ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n", 2449 state); 2450 return -EIO; 2451 } 2452 2453 /* Find runtime configuration segment */ 2454 seg = (const struct ice_run_time_cfg_seg *) 2455 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); 2456 if (!seg) { 2457 ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); 2458 return -EIO; 2459 } 2460 2461 if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { 2462 ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", 2463 seg->buf_table.buf_count); 2464 return -EIO; 2465 } 2466 2467 section = ice_pkg_val_buf(seg->buf_table.buf_array); 2468 if (!section || le32_to_cpu(section->section_entry[0].type) != 2469 ICE_SID_TX_5_LAYER_TOPO) { 2470 ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); 2471 return -EIO; 2472 } 2473 2474 size = le16_to_cpu(section->section_entry[0].size); 2475 offset = le16_to_cpu(section->section_entry[0].offset); 2476 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { 2477 ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); 2478 return -EIO; 2479 } 2480 2481 /* Make sure the section fits in the buffer */ 2482 if (offset + size > ICE_PKG_BUF_SIZE) { 2483 ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); 2484 return -EIO; 2485 } 2486 2487 /* Get the new topology buffer, reuse current topo copy mem */ 2488 static_assert(ICE_PKG_BUF_SIZE == ICE_AQ_MAX_BUF_LEN); 2489 new_topo = topo; 2490 memcpy(new_topo, (u8 *)section + offset, size); 2491 2492 update_topo: 2493 /* Acquire global lock to make sure that set topology issued 2494 * by one PF. 2495 */ 2496 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, 2497 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 2498 if (status) { 2499 ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); 2500 return status; 2501 } 2502 2503 /* Check if reset was triggered already. */ 2504 reg = rd32(hw, GLGEN_RSTAT); 2505 if (reg & GLGEN_RSTAT_DEVSTATE_M) { 2506 /* Reset is in progress, re-init the HW again */ 2507 ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n"); 2508 ice_check_reset(hw); 2509 return 0; 2510 } 2511 2512 /* Set new topology */ 2513 status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); 2514 if (status) { 2515 ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n"); 2516 return status; 2517 } 2518 2519 /* New topology is updated, delay 1 second before issuing the CORER */ 2520 msleep(1000); 2521 ice_reset(hw, ICE_RESET_CORER); 2522 /* CORER will clear the global lock, so no explicit call 2523 * required for release. 2524 */ 2525 2526 return 0; 2527 } 2528