1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2024 Intel Corporation. */ 3 4 #include "ixgbe_common.h" 5 #include "ixgbe_e610.h" 6 #include "ixgbe_x550.h" 7 #include "ixgbe_type.h" 8 #include "ixgbe_x540.h" 9 #include "ixgbe_mbx.h" 10 #include "ixgbe_phy.h" 11 12 /** 13 * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should 14 * be resent 15 * @opcode: ACI opcode 16 * 17 * Check if ACI command should be sent again depending on the provided opcode. 18 * It may happen when CSR is busy during link state changes. 19 * 20 * Return: true if the sending command routine should be repeated, 21 * otherwise false. 22 */ 23 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode) 24 { 25 switch (opcode) { 26 case ixgbe_aci_opc_disable_rxen: 27 case ixgbe_aci_opc_get_phy_caps: 28 case ixgbe_aci_opc_get_link_status: 29 case ixgbe_aci_opc_get_link_topo: 30 return true; 31 } 32 33 return false; 34 } 35 36 /** 37 * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin 38 * Command Interface 39 * @hw: pointer to the HW struct 40 * @desc: descriptor describing the command 41 * @buf: buffer to use for indirect commands (NULL for direct commands) 42 * @buf_size: size of buffer for indirect commands (0 for direct commands) 43 * 44 * Admin Command is sent using CSR by setting descriptor and buffer in specific 45 * registers. 46 * 47 * Return: the exit code of the operation. 48 * * - 0 - success. 49 * * - -EIO - CSR mechanism is not enabled. 50 * * - -EBUSY - CSR mechanism is busy. 51 * * - -EINVAL - buf_size is too big or 52 * invalid argument buf or buf_size. 53 * * - -ETIME - Admin Command X command timeout. 54 * * - -EIO - Admin Command X invalid state of HICR register or 55 * Admin Command failed because of bad opcode was returned or 56 * Admin Command failed with error Y. 57 */ 58 static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, 59 struct ixgbe_aci_desc *desc, 60 void *buf, u16 buf_size) 61 { 62 u16 opcode, buf_tail_size = buf_size % 4; 63 u32 *raw_desc = (u32 *)desc; 64 u32 hicr, i, buf_tail = 0; 65 bool valid_buf = false; 66 67 hw->aci.last_status = IXGBE_ACI_RC_OK; 68 69 /* It's necessary to check if mechanism is enabled */ 70 hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR); 71 72 if (!(hicr & IXGBE_PF_HICR_EN)) 73 return -EIO; 74 75 if (hicr & IXGBE_PF_HICR_C) { 76 hw->aci.last_status = IXGBE_ACI_RC_EBUSY; 77 return -EBUSY; 78 } 79 80 opcode = le16_to_cpu(desc->opcode); 81 82 if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) 83 return -EINVAL; 84 85 if (buf) 86 desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF); 87 88 if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) { 89 if ((buf && !buf_size) || 90 (!buf && buf_size)) 91 return -EINVAL; 92 if (buf && buf_size) 93 valid_buf = true; 94 } 95 96 if (valid_buf) { 97 if (buf_tail_size) 98 memcpy(&buf_tail, buf + buf_size - buf_tail_size, 99 buf_tail_size); 100 101 if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF) 102 desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB); 103 104 desc->datalen = cpu_to_le16(buf_size); 105 106 if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) { 107 for (i = 0; i < buf_size / 4; i++) 108 IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]); 109 if (buf_tail_size) 110 IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail); 111 } 112 } 113 114 /* Descriptor is written to specific registers */ 115 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) 116 IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]); 117 118 /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and 119 * PF_HICR_EV 120 */ 121 hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) & 122 ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV); 123 IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr); 124 125 #define MAX_SLEEP_RESP_US 1000 126 #define MAX_TMOUT_RESP_SYNC_US 100000000 127 128 /* Wait for sync Admin Command response */ 129 read_poll_timeout(IXGBE_READ_REG, hicr, 130 (hicr & IXGBE_PF_HICR_SV) || 131 !(hicr & IXGBE_PF_HICR_C), 132 MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw, 133 IXGBE_PF_HICR); 134 135 #define MAX_TMOUT_RESP_ASYNC_US 150000000 136 137 /* Wait for async Admin Command response */ 138 read_poll_timeout(IXGBE_READ_REG, hicr, 139 (hicr & IXGBE_PF_HICR_EV) || 140 !(hicr & IXGBE_PF_HICR_C), 141 MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw, 142 IXGBE_PF_HICR); 143 144 /* Read sync Admin Command response */ 145 if ((hicr & IXGBE_PF_HICR_SV)) { 146 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { 147 raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i)); 148 raw_desc[i] = raw_desc[i]; 149 } 150 } 151 152 /* Read async Admin Command response */ 153 if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) { 154 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { 155 raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i)); 156 raw_desc[i] = raw_desc[i]; 157 } 158 } 159 160 /* Handle timeout and invalid state of HICR register */ 161 if (hicr & IXGBE_PF_HICR_C) 162 return -ETIME; 163 164 if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV)) 165 return -EIO; 166 167 /* For every command other than 0x0014 treat opcode mismatch 168 * as an error. Response to 0x0014 command read from HIDA_2 169 * is a descriptor of an event which is expected to contain 170 * different opcode than the command. 171 */ 172 if (desc->opcode != cpu_to_le16(opcode) && 173 opcode != ixgbe_aci_opc_get_fw_event) 174 return -EIO; 175 176 if (desc->retval) { 177 hw->aci.last_status = (enum ixgbe_aci_err) 178 le16_to_cpu(desc->retval); 179 return -EIO; 180 } 181 182 /* Write a response values to a buf */ 183 if (valid_buf) { 184 for (i = 0; i < buf_size / 4; i++) 185 ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); 186 if (buf_tail_size) { 187 buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); 188 memcpy(buf + buf_size - buf_tail_size, &buf_tail, 189 buf_tail_size); 190 } 191 } 192 193 return 0; 194 } 195 196 /** 197 * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface 198 * @hw: pointer to the HW struct 199 * @desc: descriptor describing the command 200 * @buf: buffer to use for indirect commands (NULL for direct commands) 201 * @buf_size: size of buffer for indirect commands (0 for direct commands) 202 * 203 * Helper function to send FW Admin Commands to the FW Admin Command Interface. 204 * 205 * Retry sending the FW Admin Command multiple times to the FW ACI 206 * if the EBUSY Admin Command error is returned. 207 * 208 * Return: the exit code of the operation. 209 */ 210 int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc, 211 void *buf, u16 buf_size) 212 { 213 u16 opcode = le16_to_cpu(desc->opcode); 214 struct ixgbe_aci_desc desc_cpy; 215 enum ixgbe_aci_err last_status; 216 u8 idx = 0, *buf_cpy = NULL; 217 bool is_cmd_for_retry; 218 unsigned long timeout; 219 int err; 220 221 is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode); 222 if (is_cmd_for_retry) { 223 if (buf) { 224 buf_cpy = kmalloc(buf_size, GFP_KERNEL); 225 if (!buf_cpy) 226 return -ENOMEM; 227 *buf_cpy = *(u8 *)buf; 228 } 229 desc_cpy = *desc; 230 } 231 232 timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS); 233 do { 234 mutex_lock(&hw->aci.lock); 235 err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size); 236 last_status = hw->aci.last_status; 237 mutex_unlock(&hw->aci.lock); 238 239 if (!is_cmd_for_retry || !err || 240 last_status != IXGBE_ACI_RC_EBUSY) 241 break; 242 243 if (buf) 244 memcpy(buf, buf_cpy, buf_size); 245 *desc = desc_cpy; 246 247 msleep(IXGBE_ACI_SEND_DELAY_TIME_MS); 248 } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE && 249 time_before(jiffies, timeout)); 250 251 kfree(buf_cpy); 252 253 return err; 254 } 255 256 /** 257 * ixgbe_aci_check_event_pending - check if there are any pending events 258 * @hw: pointer to the HW struct 259 * 260 * Determine if there are any pending events. 261 * 262 * Return: true if there are any currently pending events 263 * otherwise false. 264 */ 265 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw) 266 { 267 u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0; 268 u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS); 269 270 return (fwsts & ep_bit_mask) ? true : false; 271 } 272 273 /** 274 * ixgbe_aci_get_event - get an event from ACI 275 * @hw: pointer to the HW struct 276 * @e: event information structure 277 * @pending: optional flag signaling that there are more pending events 278 * 279 * Obtain an event from ACI and return its content 280 * through 'e' using ACI command (0x0014). 281 * Provide information if there are more events 282 * to retrieve through 'pending'. 283 * 284 * Return: the exit code of the operation. 285 */ 286 int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e, 287 bool *pending) 288 { 289 struct ixgbe_aci_desc desc; 290 int err; 291 292 if (!e || (!e->msg_buf && e->buf_len)) 293 return -EINVAL; 294 295 mutex_lock(&hw->aci.lock); 296 297 /* Check if there are any events pending */ 298 if (!ixgbe_aci_check_event_pending(hw)) { 299 err = -ENOENT; 300 goto aci_get_event_exit; 301 } 302 303 /* Obtain pending event */ 304 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event); 305 err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len); 306 if (err) 307 goto aci_get_event_exit; 308 309 /* Returned 0x0014 opcode indicates that no event was obtained */ 310 if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) { 311 err = -ENOENT; 312 goto aci_get_event_exit; 313 } 314 315 /* Determine size of event data */ 316 e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len); 317 /* Write event descriptor to event info structure */ 318 memcpy(&e->desc, &desc, sizeof(e->desc)); 319 320 /* Check if there are any further events pending */ 321 if (pending) 322 *pending = ixgbe_aci_check_event_pending(hw); 323 324 aci_get_event_exit: 325 mutex_unlock(&hw->aci.lock); 326 327 return err; 328 } 329 330 /** 331 * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values. 332 * @desc: pointer to the temp descriptor (non DMA mem) 333 * @opcode: the opcode can be used to decide which flags to turn off or on 334 * 335 * Helper function to fill the descriptor desc with default values 336 * and the provided opcode. 337 */ 338 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode) 339 { 340 /* Zero out the desc. */ 341 memset(desc, 0, sizeof(*desc)); 342 desc->opcode = cpu_to_le16(opcode); 343 desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI); 344 } 345 346 /** 347 * ixgbe_aci_get_fw_ver - Get the firmware version 348 * @hw: pointer to the HW struct 349 * 350 * Get the firmware version using ACI command (0x0001). 351 * 352 * Return: the exit code of the operation. 353 */ 354 static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw) 355 { 356 struct ixgbe_aci_cmd_get_ver *resp; 357 struct ixgbe_aci_desc desc; 358 int err; 359 360 resp = &desc.params.get_ver; 361 362 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver); 363 364 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 365 if (!err) { 366 hw->fw_branch = resp->fw_branch; 367 hw->fw_maj_ver = resp->fw_major; 368 hw->fw_min_ver = resp->fw_minor; 369 hw->fw_patch = resp->fw_patch; 370 hw->fw_build = le32_to_cpu(resp->fw_build); 371 hw->api_branch = resp->api_branch; 372 hw->api_maj_ver = resp->api_major; 373 hw->api_min_ver = resp->api_minor; 374 hw->api_patch = resp->api_patch; 375 } 376 377 return err; 378 } 379 380 /** 381 * ixgbe_aci_req_res - request a common resource 382 * @hw: pointer to the HW struct 383 * @res: resource ID 384 * @access: access type 385 * @sdp_number: resource number 386 * @timeout: the maximum time in ms that the driver may hold the resource 387 * 388 * Requests a common resource using the ACI command (0x0008). 389 * Specifies the maximum time the driver may hold the resource. 390 * If the requested resource is currently occupied by some other driver, 391 * a busy return value is returned and the timeout field value indicates the 392 * maximum time the current owner has to free it. 393 * 394 * Return: the exit code of the operation. 395 */ 396 static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, 397 enum ixgbe_aci_res_access_type access, 398 u8 sdp_number, u32 *timeout) 399 { 400 struct ixgbe_aci_cmd_req_res *cmd_resp; 401 struct ixgbe_aci_desc desc; 402 int err; 403 404 cmd_resp = &desc.params.res_owner; 405 406 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res); 407 408 cmd_resp->res_id = cpu_to_le16(res); 409 cmd_resp->access_type = cpu_to_le16(access); 410 cmd_resp->res_number = cpu_to_le32(sdp_number); 411 cmd_resp->timeout = cpu_to_le32(*timeout); 412 *timeout = 0; 413 414 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 415 416 /* If the resource is held by some other driver, the command completes 417 * with a busy return value and the timeout field indicates the maximum 418 * time the current owner of the resource has to free it. 419 */ 420 if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY) 421 *timeout = le32_to_cpu(cmd_resp->timeout); 422 423 return err; 424 } 425 426 /** 427 * ixgbe_aci_release_res - release a common resource using ACI 428 * @hw: pointer to the HW struct 429 * @res: resource ID 430 * @sdp_number: resource number 431 * 432 * Release a common resource using ACI command (0x0009). 433 * 434 * Return: the exit code of the operation. 435 */ 436 static int ixgbe_aci_release_res(struct ixgbe_hw *hw, 437 enum ixgbe_aci_res_ids res, u8 sdp_number) 438 { 439 struct ixgbe_aci_cmd_req_res *cmd; 440 struct ixgbe_aci_desc desc; 441 442 cmd = &desc.params.res_owner; 443 444 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res); 445 446 cmd->res_id = cpu_to_le16(res); 447 cmd->res_number = cpu_to_le32(sdp_number); 448 449 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 450 } 451 452 /** 453 * ixgbe_acquire_res - acquire the ownership of a resource 454 * @hw: pointer to the HW structure 455 * @res: resource ID 456 * @access: access type (read or write) 457 * @timeout: timeout in milliseconds 458 * 459 * Make an attempt to acquire the ownership of a resource using 460 * the ixgbe_aci_req_res to utilize ACI. 461 * In case if some other driver has previously acquired the resource and 462 * performed any necessary updates, the -EALREADY is returned, 463 * and the caller does not obtain the resource and has no further work to do. 464 * If needed, the function will poll until the current lock owner timeouts. 465 * 466 * Return: the exit code of the operation. 467 */ 468 int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, 469 enum ixgbe_aci_res_access_type access, u32 timeout) 470 { 471 #define IXGBE_RES_POLLING_DELAY_MS 10 472 u32 delay = IXGBE_RES_POLLING_DELAY_MS; 473 u32 res_timeout = timeout; 474 u32 retry_timeout; 475 int err; 476 477 err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); 478 479 /* A return code of -EALREADY means that another driver has 480 * previously acquired the resource and performed any necessary updates; 481 * in this case the caller does not obtain the resource and has no 482 * further work to do. 483 */ 484 if (err == -EALREADY) 485 return err; 486 487 /* If necessary, poll until the current lock owner timeouts. 488 * Set retry_timeout to the timeout value reported by the FW in the 489 * response to the "Request Resource Ownership" (0x0008) Admin Command 490 * as it indicates the maximum time the current owner of the resource 491 * is allowed to hold it. 492 */ 493 retry_timeout = res_timeout; 494 while (err && retry_timeout && res_timeout) { 495 msleep(delay); 496 retry_timeout = (retry_timeout > delay) ? 497 retry_timeout - delay : 0; 498 err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); 499 500 /* Success - lock acquired. 501 * -EALREADY - lock free, no work to do. 502 */ 503 if (!err || err == -EALREADY) 504 break; 505 } 506 507 return err; 508 } 509 510 /** 511 * ixgbe_release_res - release a common resource 512 * @hw: pointer to the HW structure 513 * @res: resource ID 514 * 515 * Release a common resource using ixgbe_aci_release_res. 516 */ 517 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res) 518 { 519 u32 total_delay = 0; 520 int err; 521 522 err = ixgbe_aci_release_res(hw, res, 0); 523 524 /* There are some rare cases when trying to release the resource 525 * results in an admin command timeout, so handle them correctly. 526 */ 527 while (err == -ETIME && 528 total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) { 529 usleep_range(1000, 1500); 530 err = ixgbe_aci_release_res(hw, res, 0); 531 total_delay++; 532 } 533 } 534 535 /** 536 * ixgbe_parse_e610_caps - Parse common device/function capabilities 537 * @hw: pointer to the HW struct 538 * @caps: pointer to common capabilities structure 539 * @elem: the capability element to parse 540 * @prefix: message prefix for tracing capabilities 541 * 542 * Given a capability element, extract relevant details into the common 543 * capability structure. 544 * 545 * Return: true if the capability matches one of the common capability ids, 546 * false otherwise. 547 */ 548 static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw, 549 struct ixgbe_hw_caps *caps, 550 struct ixgbe_aci_cmd_list_caps_elem *elem, 551 const char *prefix) 552 { 553 u32 logical_id = le32_to_cpu(elem->logical_id); 554 u32 phys_id = le32_to_cpu(elem->phys_id); 555 u32 number = le32_to_cpu(elem->number); 556 u16 cap = le16_to_cpu(elem->cap); 557 558 switch (cap) { 559 case IXGBE_ACI_CAPS_VALID_FUNCTIONS: 560 caps->valid_functions = number; 561 break; 562 case IXGBE_ACI_CAPS_SRIOV: 563 caps->sr_iov_1_1 = (number == 1); 564 break; 565 case IXGBE_ACI_CAPS_VMDQ: 566 caps->vmdq = (number == 1); 567 break; 568 case IXGBE_ACI_CAPS_DCB: 569 caps->dcb = (number == 1); 570 caps->active_tc_bitmap = logical_id; 571 caps->maxtc = phys_id; 572 break; 573 case IXGBE_ACI_CAPS_RSS: 574 caps->rss_table_size = number; 575 caps->rss_table_entry_width = logical_id; 576 break; 577 case IXGBE_ACI_CAPS_RXQS: 578 caps->num_rxq = number; 579 caps->rxq_first_id = phys_id; 580 break; 581 case IXGBE_ACI_CAPS_TXQS: 582 caps->num_txq = number; 583 caps->txq_first_id = phys_id; 584 break; 585 case IXGBE_ACI_CAPS_MSIX: 586 caps->num_msix_vectors = number; 587 caps->msix_vector_first_id = phys_id; 588 break; 589 case IXGBE_ACI_CAPS_NVM_VER: 590 break; 591 case IXGBE_ACI_CAPS_PENDING_NVM_VER: 592 caps->nvm_update_pending_nvm = true; 593 break; 594 case IXGBE_ACI_CAPS_PENDING_OROM_VER: 595 caps->nvm_update_pending_orom = true; 596 break; 597 case IXGBE_ACI_CAPS_PENDING_NET_VER: 598 caps->nvm_update_pending_netlist = true; 599 break; 600 case IXGBE_ACI_CAPS_MAX_MTU: 601 caps->max_mtu = number; 602 break; 603 case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE: 604 caps->pcie_reset_avoidance = (number > 0); 605 break; 606 case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT: 607 caps->reset_restrict_support = (number == 1); 608 break; 609 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0: 610 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1: 611 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2: 612 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3: 613 { 614 u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0; 615 616 caps->ext_topo_dev_img_ver_high[index] = number; 617 caps->ext_topo_dev_img_ver_low[index] = logical_id; 618 caps->ext_topo_dev_img_part_num[index] = 619 FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id); 620 caps->ext_topo_dev_img_load_en[index] = 621 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0; 622 caps->ext_topo_dev_img_prog_en[index] = 623 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0; 624 break; 625 } 626 default: 627 /* Not one of the recognized common capabilities */ 628 return false; 629 } 630 631 return true; 632 } 633 634 /** 635 * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps 636 * @hw: pointer to the HW struct 637 * @dev_p: pointer to device capabilities structure 638 * @cap: capability element to parse 639 * 640 * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities. 641 */ 642 static void 643 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw, 644 struct ixgbe_hw_dev_caps *dev_p, 645 struct ixgbe_aci_cmd_list_caps_elem *cap) 646 { 647 dev_p->num_funcs = hweight32(le32_to_cpu(cap->number)); 648 } 649 650 /** 651 * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps 652 * @hw: pointer to the HW struct 653 * @dev_p: pointer to device capabilities structure 654 * @cap: capability element to parse 655 * 656 * Parse IXGBE_ACI_CAPS_VF for device capabilities. 657 */ 658 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw, 659 struct ixgbe_hw_dev_caps *dev_p, 660 struct ixgbe_aci_cmd_list_caps_elem *cap) 661 { 662 dev_p->num_vfs_exposed = le32_to_cpu(cap->number); 663 } 664 665 /** 666 * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps 667 * @hw: pointer to the HW struct 668 * @dev_p: pointer to device capabilities structure 669 * @cap: capability element to parse 670 * 671 * Parse IXGBE_ACI_CAPS_VSI for device capabilities. 672 */ 673 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw, 674 struct ixgbe_hw_dev_caps *dev_p, 675 struct ixgbe_aci_cmd_list_caps_elem *cap) 676 { 677 dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number); 678 } 679 680 /** 681 * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps 682 * @hw: pointer to the HW struct 683 * @dev_p: pointer to device capabilities structure 684 * @cap: capability element to parse 685 * 686 * Parse IXGBE_ACI_CAPS_FD for device capabilities. 687 */ 688 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw, 689 struct ixgbe_hw_dev_caps *dev_p, 690 struct ixgbe_aci_cmd_list_caps_elem *cap) 691 { 692 dev_p->num_flow_director_fltr = le32_to_cpu(cap->number); 693 } 694 695 /** 696 * ixgbe_parse_dev_caps - Parse device capabilities 697 * @hw: pointer to the HW struct 698 * @dev_p: pointer to device capabilities structure 699 * @buf: buffer containing the device capability records 700 * @cap_count: the number of capabilities 701 * 702 * Helper device to parse device (0x000B) capabilities list. For 703 * capabilities shared between device and function, this relies on 704 * ixgbe_parse_e610_caps. 705 * 706 * Loop through the list of provided capabilities and extract the relevant 707 * data into the device capabilities structured. 708 */ 709 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw, 710 struct ixgbe_hw_dev_caps *dev_p, 711 void *buf, u32 cap_count) 712 { 713 struct ixgbe_aci_cmd_list_caps_elem *cap_resp; 714 u32 i; 715 716 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; 717 718 memset(dev_p, 0, sizeof(*dev_p)); 719 720 for (i = 0; i < cap_count; i++) { 721 u16 cap = le16_to_cpu(cap_resp[i].cap); 722 723 ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i], 724 "dev caps"); 725 726 switch (cap) { 727 case IXGBE_ACI_CAPS_VALID_FUNCTIONS: 728 ixgbe_parse_valid_functions_cap(hw, dev_p, 729 &cap_resp[i]); 730 break; 731 case IXGBE_ACI_CAPS_VF: 732 ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 733 break; 734 case IXGBE_ACI_CAPS_VSI: 735 ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 736 break; 737 case IXGBE_ACI_CAPS_FD: 738 ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 739 break; 740 default: 741 /* Don't list common capabilities as unknown */ 742 break; 743 } 744 } 745 } 746 747 /** 748 * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps 749 * @hw: pointer to the HW struct 750 * @func_p: pointer to function capabilities structure 751 * @cap: pointer to the capability element to parse 752 * 753 * Extract function capabilities for IXGBE_ACI_CAPS_VF. 754 */ 755 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw, 756 struct ixgbe_hw_func_caps *func_p, 757 struct ixgbe_aci_cmd_list_caps_elem *cap) 758 { 759 func_p->num_allocd_vfs = le32_to_cpu(cap->number); 760 func_p->vf_base_id = le32_to_cpu(cap->logical_id); 761 } 762 763 /** 764 * ixgbe_get_num_per_func - determine number of resources per PF 765 * @hw: pointer to the HW structure 766 * @max: value to be evenly split between each PF 767 * 768 * Determine the number of valid functions by going through the bitmap returned 769 * from parsing capabilities and use this to calculate the number of resources 770 * per PF based on the max value passed in. 771 * 772 * Return: the number of resources per PF or 0, if no PH are available. 773 */ 774 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max) 775 { 776 #define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0) 777 u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 778 IXGBE_CAPS_VALID_FUNCS_M); 779 780 return funcs ? (max / funcs) : 0; 781 } 782 783 /** 784 * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps 785 * @hw: pointer to the HW struct 786 * @func_p: pointer to function capabilities structure 787 * @cap: pointer to the capability element to parse 788 * 789 * Extract function capabilities for IXGBE_ACI_CAPS_VSI. 790 */ 791 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw, 792 struct ixgbe_hw_func_caps *func_p, 793 struct ixgbe_aci_cmd_list_caps_elem *cap) 794 { 795 func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI); 796 } 797 798 /** 799 * ixgbe_parse_func_caps - Parse function capabilities 800 * @hw: pointer to the HW struct 801 * @func_p: pointer to function capabilities structure 802 * @buf: buffer containing the function capability records 803 * @cap_count: the number of capabilities 804 * 805 * Helper function to parse function (0x000A) capabilities list. For 806 * capabilities shared between device and function, this relies on 807 * ixgbe_parse_e610_caps. 808 * 809 * Loop through the list of provided capabilities and extract the relevant 810 * data into the function capabilities structured. 811 */ 812 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw, 813 struct ixgbe_hw_func_caps *func_p, 814 void *buf, u32 cap_count) 815 { 816 struct ixgbe_aci_cmd_list_caps_elem *cap_resp; 817 u32 i; 818 819 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; 820 821 memset(func_p, 0, sizeof(*func_p)); 822 823 for (i = 0; i < cap_count; i++) { 824 u16 cap = le16_to_cpu(cap_resp[i].cap); 825 826 ixgbe_parse_e610_caps(hw, &func_p->common_cap, 827 &cap_resp[i], "func caps"); 828 829 switch (cap) { 830 case IXGBE_ACI_CAPS_VF: 831 ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 832 break; 833 case IXGBE_ACI_CAPS_VSI: 834 ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 835 break; 836 default: 837 /* Don't list common capabilities as unknown */ 838 break; 839 } 840 } 841 } 842 843 /** 844 * ixgbe_aci_list_caps - query function/device capabilities 845 * @hw: pointer to the HW struct 846 * @buf: a buffer to hold the capabilities 847 * @buf_size: size of the buffer 848 * @cap_count: if not NULL, set to the number of capabilities reported 849 * @opc: capabilities type to discover, device or function 850 * 851 * Get the function (0x000A) or device (0x000B) capabilities description from 852 * firmware and store it in the buffer. 853 * 854 * If the cap_count pointer is not NULL, then it is set to the number of 855 * capabilities firmware will report. Note that if the buffer size is too 856 * small, it is possible the command will return -ENOMEM. The 857 * cap_count will still be updated in this case. It is recommended that the 858 * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible 859 * buffer that firmware could return) to avoid this. 860 * 861 * Return: the exit code of the operation. 862 * Exit code of -ENOMEM means the buffer size is too small. 863 */ 864 int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size, 865 u32 *cap_count, enum ixgbe_aci_opc opc) 866 { 867 struct ixgbe_aci_cmd_list_caps *cmd; 868 struct ixgbe_aci_desc desc; 869 int err; 870 871 cmd = &desc.params.get_cap; 872 873 if (opc != ixgbe_aci_opc_list_func_caps && 874 opc != ixgbe_aci_opc_list_dev_caps) 875 return -EINVAL; 876 877 ixgbe_fill_dflt_direct_cmd_desc(&desc, opc); 878 err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size); 879 880 if (cap_count) 881 *cap_count = le32_to_cpu(cmd->count); 882 883 return err; 884 } 885 886 /** 887 * ixgbe_discover_dev_caps - Read and extract device capabilities 888 * @hw: pointer to the hardware structure 889 * @dev_caps: pointer to device capabilities structure 890 * 891 * Read the device capabilities and extract them into the dev_caps structure 892 * for later use. 893 * 894 * Return: the exit code of the operation. 895 */ 896 int ixgbe_discover_dev_caps(struct ixgbe_hw *hw, 897 struct ixgbe_hw_dev_caps *dev_caps) 898 { 899 u32 cap_count; 900 u8 *cbuf; 901 int err; 902 903 cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); 904 if (!cbuf) 905 return -ENOMEM; 906 907 /* Although the driver doesn't know the number of capabilities the 908 * device will return, we can simply send a 4KB buffer, the maximum 909 * possible size that firmware can return. 910 */ 911 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / 912 sizeof(struct ixgbe_aci_cmd_list_caps_elem); 913 914 err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, 915 &cap_count, 916 ixgbe_aci_opc_list_dev_caps); 917 if (!err) 918 ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 919 920 kfree(cbuf); 921 922 return 0; 923 } 924 925 /** 926 * ixgbe_discover_func_caps - Read and extract function capabilities 927 * @hw: pointer to the hardware structure 928 * @func_caps: pointer to function capabilities structure 929 * 930 * Read the function capabilities and extract them into the func_caps structure 931 * for later use. 932 * 933 * Return: the exit code of the operation. 934 */ 935 int ixgbe_discover_func_caps(struct ixgbe_hw *hw, 936 struct ixgbe_hw_func_caps *func_caps) 937 { 938 u32 cap_count; 939 u8 *cbuf; 940 int err; 941 942 cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); 943 if (!cbuf) 944 return -ENOMEM; 945 946 /* Although the driver doesn't know the number of capabilities the 947 * device will return, we can simply send a 4KB buffer, the maximum 948 * possible size that firmware can return. 949 */ 950 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / 951 sizeof(struct ixgbe_aci_cmd_list_caps_elem); 952 953 err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, 954 &cap_count, 955 ixgbe_aci_opc_list_func_caps); 956 if (!err) 957 ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count); 958 959 kfree(cbuf); 960 961 return 0; 962 } 963 964 /** 965 * ixgbe_get_caps - get info about the HW 966 * @hw: pointer to the hardware structure 967 * 968 * Retrieve both device and function capabilities. 969 * 970 * Return: the exit code of the operation. 971 */ 972 int ixgbe_get_caps(struct ixgbe_hw *hw) 973 { 974 int err; 975 976 err = ixgbe_discover_dev_caps(hw, &hw->dev_caps); 977 if (err) 978 return err; 979 980 return ixgbe_discover_func_caps(hw, &hw->func_caps); 981 } 982 983 /** 984 * ixgbe_aci_disable_rxen - disable RX 985 * @hw: pointer to the HW struct 986 * 987 * Request a safe disable of Receive Enable using ACI command (0x000C). 988 * 989 * Return: the exit code of the operation. 990 */ 991 int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw) 992 { 993 struct ixgbe_aci_cmd_disable_rxen *cmd; 994 struct ixgbe_aci_desc desc; 995 996 cmd = &desc.params.disable_rxen; 997 998 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen); 999 1000 cmd->lport_num = hw->bus.func; 1001 1002 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 1003 } 1004 1005 /** 1006 * ixgbe_aci_get_phy_caps - returns PHY capabilities 1007 * @hw: pointer to the HW struct 1008 * @qual_mods: report qualified modules 1009 * @report_mode: report mode capabilities 1010 * @pcaps: structure for PHY capabilities to be filled 1011 * 1012 * Returns the various PHY capabilities supported on the Port 1013 * using ACI command (0x0600). 1014 * 1015 * Return: the exit code of the operation. 1016 */ 1017 int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode, 1018 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps) 1019 { 1020 struct ixgbe_aci_cmd_get_phy_caps *cmd; 1021 u16 pcaps_size = sizeof(*pcaps); 1022 struct ixgbe_aci_desc desc; 1023 int err; 1024 1025 cmd = &desc.params.get_phy; 1026 1027 if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M)) 1028 return -EINVAL; 1029 1030 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps); 1031 1032 if (qual_mods) 1033 cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM); 1034 1035 cmd->param0 |= cpu_to_le16(report_mode); 1036 err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size); 1037 if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) { 1038 hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 1039 hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 1040 memcpy(hw->link.link_info.module_type, &pcaps->module_type, 1041 sizeof(hw->link.link_info.module_type)); 1042 } 1043 1044 return err; 1045 } 1046 1047 /** 1048 * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 1049 * @caps: PHY ability structure to copy data from 1050 * @cfg: PHY configuration structure to copy data to 1051 * 1052 * Helper function to copy data from PHY capabilities data structure 1053 * to PHY configuration data structure 1054 */ 1055 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps, 1056 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) 1057 { 1058 if (!caps || !cfg) 1059 return; 1060 1061 memset(cfg, 0, sizeof(*cfg)); 1062 cfg->phy_type_low = caps->phy_type_low; 1063 cfg->phy_type_high = caps->phy_type_high; 1064 cfg->caps = caps->caps; 1065 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 1066 cfg->eee_cap = caps->eee_cap; 1067 cfg->eeer_value = caps->eeer_value; 1068 cfg->link_fec_opt = caps->link_fec_options; 1069 cfg->module_compliance_enforcement = 1070 caps->module_compliance_enforcement; 1071 } 1072 1073 /** 1074 * ixgbe_aci_set_phy_cfg - set PHY configuration 1075 * @hw: pointer to the HW struct 1076 * @cfg: structure with PHY configuration data to be set 1077 * 1078 * Set the various PHY configuration parameters supported on the Port 1079 * using ACI command (0x0601). 1080 * One or more of the Set PHY config parameters may be ignored in an MFP 1081 * mode as the PF may not have the privilege to set some of the PHY Config 1082 * parameters. 1083 * 1084 * Return: the exit code of the operation. 1085 */ 1086 int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw, 1087 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) 1088 { 1089 struct ixgbe_aci_desc desc; 1090 int err; 1091 1092 if (!cfg) 1093 return -EINVAL; 1094 1095 /* Ensure that only valid bits of cfg->caps can be turned on. */ 1096 cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK; 1097 1098 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg); 1099 desc.params.set_phy.lport_num = hw->bus.func; 1100 desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD); 1101 1102 err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg)); 1103 if (!err) 1104 hw->phy.curr_user_phy_cfg = *cfg; 1105 1106 return err; 1107 } 1108 1109 /** 1110 * ixgbe_aci_set_link_restart_an - set up link and restart AN 1111 * @hw: pointer to the HW struct 1112 * @ena_link: if true: enable link, if false: disable link 1113 * 1114 * Function sets up the link and restarts the Auto-Negotiation over the link. 1115 * 1116 * Return: the exit code of the operation. 1117 */ 1118 int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link) 1119 { 1120 struct ixgbe_aci_cmd_restart_an *cmd; 1121 struct ixgbe_aci_desc desc; 1122 1123 cmd = &desc.params.restart_an; 1124 1125 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an); 1126 1127 cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART; 1128 cmd->lport_num = hw->bus.func; 1129 if (ena_link) 1130 cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE; 1131 else 1132 cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE; 1133 1134 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 1135 } 1136 1137 /** 1138 * ixgbe_is_media_cage_present - check if media cage is present 1139 * @hw: pointer to the HW struct 1140 * 1141 * Identify presence of media cage using the ACI command (0x06E0). 1142 * 1143 * Return: true if media cage is present, else false. If no cage, then 1144 * media type is backplane or BASE-T. 1145 */ 1146 static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw) 1147 { 1148 struct ixgbe_aci_cmd_get_link_topo *cmd; 1149 struct ixgbe_aci_desc desc; 1150 1151 cmd = &desc.params.get_link_topo; 1152 1153 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); 1154 1155 cmd->addr.topo_params.node_type_ctx = 1156 FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M, 1157 IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT); 1158 1159 /* Set node type. */ 1160 cmd->addr.topo_params.node_type_ctx |= 1161 FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M, 1162 IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE); 1163 1164 /* Node type cage can be used to determine if cage is present. If AQC 1165 * returns error (ENOENT), then no cage present. If no cage present then 1166 * connection type is backplane or BASE-T. 1167 */ 1168 return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL); 1169 } 1170 1171 /** 1172 * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type 1173 * @hw: pointer to the HW struct 1174 * 1175 * Try to identify the media type based on the phy type. 1176 * If more than one media type, the ixgbe_media_type_unknown is returned. 1177 * First, phy_type_low is checked, then phy_type_high. 1178 * If none are identified, the ixgbe_media_type_unknown is returned 1179 * 1180 * Return: type of a media based on phy type in form of enum. 1181 */ 1182 static enum ixgbe_media_type 1183 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw) 1184 { 1185 struct ixgbe_link_status *hw_link_info; 1186 1187 if (!hw) 1188 return ixgbe_media_type_unknown; 1189 1190 hw_link_info = &hw->link.link_info; 1191 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 1192 /* If more than one media type is selected, report unknown */ 1193 return ixgbe_media_type_unknown; 1194 1195 if (hw_link_info->phy_type_low) { 1196 /* 1G SGMII is a special case where some DA cable PHYs 1197 * may show this as an option when it really shouldn't 1198 * be since SGMII is meant to be between a MAC and a PHY 1199 * in a backplane. Try to detect this case and handle it 1200 */ 1201 if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII && 1202 (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == 1203 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 1204 hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == 1205 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 1206 return ixgbe_media_type_da; 1207 1208 switch (hw_link_info->phy_type_low) { 1209 case IXGBE_PHY_TYPE_LOW_1000BASE_SX: 1210 case IXGBE_PHY_TYPE_LOW_1000BASE_LX: 1211 case IXGBE_PHY_TYPE_LOW_10GBASE_SR: 1212 case IXGBE_PHY_TYPE_LOW_10GBASE_LR: 1213 case IXGBE_PHY_TYPE_LOW_25GBASE_SR: 1214 case IXGBE_PHY_TYPE_LOW_25GBASE_LR: 1215 return ixgbe_media_type_fiber; 1216 case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 1217 case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 1218 return ixgbe_media_type_fiber; 1219 case IXGBE_PHY_TYPE_LOW_100BASE_TX: 1220 case IXGBE_PHY_TYPE_LOW_1000BASE_T: 1221 case IXGBE_PHY_TYPE_LOW_2500BASE_T: 1222 case IXGBE_PHY_TYPE_LOW_5GBASE_T: 1223 case IXGBE_PHY_TYPE_LOW_10GBASE_T: 1224 case IXGBE_PHY_TYPE_LOW_25GBASE_T: 1225 return ixgbe_media_type_copper; 1226 case IXGBE_PHY_TYPE_LOW_10G_SFI_DA: 1227 case IXGBE_PHY_TYPE_LOW_25GBASE_CR: 1228 case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S: 1229 case IXGBE_PHY_TYPE_LOW_25GBASE_CR1: 1230 return ixgbe_media_type_da; 1231 case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C: 1232 if (ixgbe_is_media_cage_present(hw)) 1233 return ixgbe_media_type_aui; 1234 fallthrough; 1235 case IXGBE_PHY_TYPE_LOW_1000BASE_KX: 1236 case IXGBE_PHY_TYPE_LOW_2500BASE_KX: 1237 case IXGBE_PHY_TYPE_LOW_2500BASE_X: 1238 case IXGBE_PHY_TYPE_LOW_5GBASE_KR: 1239 case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1: 1240 case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C: 1241 case IXGBE_PHY_TYPE_LOW_25GBASE_KR: 1242 case IXGBE_PHY_TYPE_LOW_25GBASE_KR1: 1243 case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S: 1244 return ixgbe_media_type_backplane; 1245 } 1246 } else { 1247 switch (hw_link_info->phy_type_high) { 1248 case IXGBE_PHY_TYPE_HIGH_10BASE_T: 1249 return ixgbe_media_type_copper; 1250 } 1251 } 1252 return ixgbe_media_type_unknown; 1253 } 1254 1255 /** 1256 * ixgbe_update_link_info - update status of the HW network link 1257 * @hw: pointer to the HW struct 1258 * 1259 * Update the status of the HW network link. 1260 * 1261 * Return: the exit code of the operation. 1262 */ 1263 int ixgbe_update_link_info(struct ixgbe_hw *hw) 1264 { 1265 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps; 1266 struct ixgbe_link_status *li; 1267 int err; 1268 1269 if (!hw) 1270 return -EINVAL; 1271 1272 li = &hw->link.link_info; 1273 1274 err = ixgbe_aci_get_link_info(hw, true, NULL); 1275 if (err) 1276 return err; 1277 1278 if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE)) 1279 return 0; 1280 1281 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1282 if (!pcaps) 1283 return -ENOMEM; 1284 1285 err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, 1286 pcaps); 1287 1288 if (!err) 1289 memcpy(li->module_type, &pcaps->module_type, 1290 sizeof(li->module_type)); 1291 1292 kfree(pcaps); 1293 1294 return err; 1295 } 1296 1297 /** 1298 * ixgbe_get_link_status - get status of the HW network link 1299 * @hw: pointer to the HW struct 1300 * @link_up: pointer to bool (true/false = linkup/linkdown) 1301 * 1302 * Variable link_up is true if link is up, false if link is down. 1303 * The variable link_up is invalid if status is non zero. As a 1304 * result of this call, link status reporting becomes enabled 1305 * 1306 * Return: the exit code of the operation. 1307 */ 1308 int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up) 1309 { 1310 if (!hw || !link_up) 1311 return -EINVAL; 1312 1313 if (hw->link.get_link_info) { 1314 int err = ixgbe_update_link_info(hw); 1315 1316 if (err) 1317 return err; 1318 } 1319 1320 *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP; 1321 1322 return 0; 1323 } 1324 1325 /** 1326 * ixgbe_aci_get_link_info - get the link status 1327 * @hw: pointer to the HW struct 1328 * @ena_lse: enable/disable LinkStatusEvent reporting 1329 * @link: pointer to link status structure - optional 1330 * 1331 * Get the current Link Status using ACI command (0x607). 1332 * The current link can be optionally provided to update 1333 * the status. 1334 * 1335 * Return: the link status of the adapter. 1336 */ 1337 int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse, 1338 struct ixgbe_link_status *link) 1339 { 1340 struct ixgbe_aci_cmd_get_link_status_data link_data = {}; 1341 struct ixgbe_aci_cmd_get_link_status *resp; 1342 struct ixgbe_link_status *li_old, *li; 1343 struct ixgbe_fc_info *hw_fc_info; 1344 struct ixgbe_aci_desc desc; 1345 bool tx_pause, rx_pause; 1346 u8 cmd_flags; 1347 int err; 1348 1349 if (!hw) 1350 return -EINVAL; 1351 1352 li_old = &hw->link.link_info_old; 1353 li = &hw->link.link_info; 1354 hw_fc_info = &hw->fc; 1355 1356 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status); 1357 cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS; 1358 resp = &desc.params.get_link_status; 1359 resp->cmd_flags = cpu_to_le16(cmd_flags); 1360 resp->lport_num = hw->bus.func; 1361 1362 err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data)); 1363 if (err) 1364 return err; 1365 1366 /* Save off old link status information. */ 1367 *li_old = *li; 1368 1369 /* Update current link status information. */ 1370 li->link_speed = le16_to_cpu(link_data.link_speed); 1371 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 1372 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 1373 li->link_info = link_data.link_info; 1374 li->link_cfg_err = link_data.link_cfg_err; 1375 li->an_info = link_data.an_info; 1376 li->ext_info = link_data.ext_info; 1377 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 1378 li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK; 1379 li->topo_media_conflict = link_data.topo_media_conflict; 1380 li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M | 1381 IXGBE_ACI_CFG_PACING_TYPE_M); 1382 1383 /* Update fc info. */ 1384 tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX); 1385 rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX); 1386 if (tx_pause && rx_pause) 1387 hw_fc_info->current_mode = ixgbe_fc_full; 1388 else if (tx_pause) 1389 hw_fc_info->current_mode = ixgbe_fc_tx_pause; 1390 else if (rx_pause) 1391 hw_fc_info->current_mode = ixgbe_fc_rx_pause; 1392 else 1393 hw_fc_info->current_mode = ixgbe_fc_none; 1394 1395 li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) & 1396 IXGBE_ACI_LSE_IS_ENABLED); 1397 1398 /* Save link status information. */ 1399 if (link) 1400 *link = *li; 1401 1402 /* Flag cleared so calling functions don't call AQ again. */ 1403 hw->link.get_link_info = false; 1404 1405 return 0; 1406 } 1407 1408 /** 1409 * ixgbe_aci_set_event_mask - set event mask 1410 * @hw: pointer to the HW struct 1411 * @port_num: port number of the physical function 1412 * @mask: event mask to be set 1413 * 1414 * Set the event mask using ACI command (0x0613). 1415 * 1416 * Return: the exit code of the operation. 1417 */ 1418 int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask) 1419 { 1420 struct ixgbe_aci_cmd_set_event_mask *cmd; 1421 struct ixgbe_aci_desc desc; 1422 1423 cmd = &desc.params.set_event_mask; 1424 1425 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask); 1426 1427 cmd->lport_num = port_num; 1428 1429 cmd->event_mask = cpu_to_le16(mask); 1430 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 1431 } 1432 1433 /** 1434 * ixgbe_configure_lse - enable/disable link status events 1435 * @hw: pointer to the HW struct 1436 * @activate: true for enable lse, false otherwise 1437 * @mask: event mask to be set; a set bit means deactivation of the 1438 * corresponding event 1439 * 1440 * Set the event mask and then enable or disable link status events 1441 * 1442 * Return: the exit code of the operation. 1443 */ 1444 int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask) 1445 { 1446 int err; 1447 1448 err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask); 1449 if (err) 1450 return err; 1451 1452 /* Enabling link status events generation by fw. */ 1453 return ixgbe_aci_get_link_info(hw, activate, NULL); 1454 } 1455 1456 /** 1457 * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx 1458 * @hw: pointer to hardware structure 1459 * 1460 * Get firmware version and start the hardware using the generic 1461 * start_hw() and ixgbe_start_hw_gen2() functions. 1462 * 1463 * Return: the exit code of the operation. 1464 */ 1465 static int ixgbe_start_hw_e610(struct ixgbe_hw *hw) 1466 { 1467 int err; 1468 1469 err = ixgbe_aci_get_fw_ver(hw); 1470 if (err) 1471 return err; 1472 1473 err = ixgbe_start_hw_generic(hw); 1474 if (err) 1475 return err; 1476 1477 ixgbe_start_hw_gen2(hw); 1478 1479 return 0; 1480 } 1481 1482 /** 1483 * ixgbe_get_media_type_e610 - Gets media type 1484 * @hw: pointer to the HW struct 1485 * 1486 * In order to get the media type, the function gets PHY 1487 * capabilities and later on use them to identify the PHY type 1488 * checking phy_type_high and phy_type_low. 1489 * 1490 * Return: the type of media in form of ixgbe_media_type enum 1491 * or ixgbe_media_type_unknown in case of an error. 1492 */ 1493 enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw) 1494 { 1495 struct ixgbe_aci_cmd_get_phy_caps_data pcaps; 1496 int rc; 1497 1498 rc = ixgbe_update_link_info(hw); 1499 if (rc) 1500 return ixgbe_media_type_unknown; 1501 1502 /* If there is no link but PHY (dongle) is available SW should use 1503 * Get PHY Caps admin command instead of Get Link Status, find most 1504 * significant bit that is set in PHY types reported by the command 1505 * and use it to discover media type. 1506 */ 1507 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) && 1508 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) { 1509 int highest_bit; 1510 1511 /* Get PHY Capabilities */ 1512 rc = ixgbe_aci_get_phy_caps(hw, false, 1513 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, 1514 &pcaps); 1515 if (rc) 1516 return ixgbe_media_type_unknown; 1517 1518 highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high)); 1519 if (highest_bit) { 1520 hw->link.link_info.phy_type_high = 1521 BIT_ULL(highest_bit - 1); 1522 hw->link.link_info.phy_type_low = 0; 1523 } else { 1524 highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low)); 1525 if (highest_bit) { 1526 hw->link.link_info.phy_type_low = 1527 BIT_ULL(highest_bit - 1); 1528 hw->link.link_info.phy_type_high = 0; 1529 } 1530 } 1531 } 1532 1533 /* Based on link status or search above try to discover media type. */ 1534 hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw); 1535 1536 return hw->phy.media_type; 1537 } 1538 1539 /** 1540 * ixgbe_setup_link_e610 - Set up link 1541 * @hw: pointer to hardware structure 1542 * @speed: new link speed 1543 * @autoneg_wait: true when waiting for completion is needed 1544 * 1545 * Set up the link with the specified speed. 1546 * 1547 * Return: the exit code of the operation. 1548 */ 1549 int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed, 1550 bool autoneg_wait) 1551 { 1552 /* Simply request FW to perform proper PHY setup */ 1553 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); 1554 } 1555 1556 /** 1557 * ixgbe_check_link_e610 - Determine link and speed status 1558 * @hw: pointer to hardware structure 1559 * @speed: pointer to link speed 1560 * @link_up: true when link is up 1561 * @link_up_wait_to_complete: bool used to wait for link up or not 1562 * 1563 * Determine if the link is up and the current link speed 1564 * using ACI command (0x0607). 1565 * 1566 * Return: the exit code of the operation. 1567 */ 1568 int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 1569 bool *link_up, bool link_up_wait_to_complete) 1570 { 1571 int err; 1572 u32 i; 1573 1574 if (!speed || !link_up) 1575 return -EINVAL; 1576 1577 /* Set get_link_info flag to ensure that fresh 1578 * link information will be obtained from FW 1579 * by sending Get Link Status admin command. 1580 */ 1581 hw->link.get_link_info = true; 1582 1583 /* Update link information in adapter context. */ 1584 err = ixgbe_get_link_status(hw, link_up); 1585 if (err) 1586 return err; 1587 1588 /* Wait for link up if it was requested. */ 1589 if (link_up_wait_to_complete && !(*link_up)) { 1590 for (i = 0; i < hw->mac.max_link_up_time; i++) { 1591 msleep(100); 1592 hw->link.get_link_info = true; 1593 err = ixgbe_get_link_status(hw, link_up); 1594 if (err) 1595 return err; 1596 if (*link_up) 1597 break; 1598 } 1599 } 1600 1601 /* Use link information in adapter context updated by the call 1602 * to ixgbe_get_link_status() to determine current link speed. 1603 * Link speed information is valid only when link up was 1604 * reported by FW. 1605 */ 1606 if (*link_up) { 1607 switch (hw->link.link_info.link_speed) { 1608 case IXGBE_ACI_LINK_SPEED_10MB: 1609 *speed = IXGBE_LINK_SPEED_10_FULL; 1610 break; 1611 case IXGBE_ACI_LINK_SPEED_100MB: 1612 *speed = IXGBE_LINK_SPEED_100_FULL; 1613 break; 1614 case IXGBE_ACI_LINK_SPEED_1000MB: 1615 *speed = IXGBE_LINK_SPEED_1GB_FULL; 1616 break; 1617 case IXGBE_ACI_LINK_SPEED_2500MB: 1618 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 1619 break; 1620 case IXGBE_ACI_LINK_SPEED_5GB: 1621 *speed = IXGBE_LINK_SPEED_5GB_FULL; 1622 break; 1623 case IXGBE_ACI_LINK_SPEED_10GB: 1624 *speed = IXGBE_LINK_SPEED_10GB_FULL; 1625 break; 1626 default: 1627 *speed = IXGBE_LINK_SPEED_UNKNOWN; 1628 break; 1629 } 1630 } else { 1631 *speed = IXGBE_LINK_SPEED_UNKNOWN; 1632 } 1633 1634 return 0; 1635 } 1636 1637 /** 1638 * ixgbe_get_link_capabilities_e610 - Determine link capabilities 1639 * @hw: pointer to hardware structure 1640 * @speed: pointer to link speed 1641 * @autoneg: true when autoneg or autotry is enabled 1642 * 1643 * Determine speed and AN parameters of a link. 1644 * 1645 * Return: the exit code of the operation. 1646 */ 1647 int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw, 1648 ixgbe_link_speed *speed, 1649 bool *autoneg) 1650 { 1651 if (!speed || !autoneg) 1652 return -EINVAL; 1653 1654 *autoneg = true; 1655 *speed = hw->phy.speeds_supported; 1656 1657 return 0; 1658 } 1659 1660 /** 1661 * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode 1662 * @hw: pointer to hardware structure 1663 * @cfg: PHY configuration data to set FC mode 1664 * @req_mode: FC mode to configure 1665 * 1666 * Configures PHY Flow Control according to the provided configuration. 1667 * 1668 * Return: the exit code of the operation. 1669 */ 1670 int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw, 1671 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg, 1672 enum ixgbe_fc_mode req_mode) 1673 { 1674 u8 pause_mask = 0x0; 1675 1676 if (!cfg) 1677 return -EINVAL; 1678 1679 switch (req_mode) { 1680 case ixgbe_fc_full: 1681 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; 1682 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; 1683 break; 1684 case ixgbe_fc_rx_pause: 1685 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; 1686 break; 1687 case ixgbe_fc_tx_pause: 1688 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; 1689 break; 1690 default: 1691 break; 1692 } 1693 1694 /* Clear the old pause settings. */ 1695 cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE | 1696 IXGBE_ACI_PHY_EN_RX_LINK_PAUSE); 1697 1698 /* Set the new capabilities. */ 1699 cfg->caps |= pause_mask; 1700 1701 return 0; 1702 } 1703 1704 /** 1705 * ixgbe_setup_fc_e610 - Set up flow control 1706 * @hw: pointer to hardware structure 1707 * 1708 * Set up flow control. This has to be done during init time. 1709 * 1710 * Return: the exit code of the operation. 1711 */ 1712 int ixgbe_setup_fc_e610(struct ixgbe_hw *hw) 1713 { 1714 struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {}; 1715 struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {}; 1716 int err; 1717 1718 /* Get the current PHY config */ 1719 err = ixgbe_aci_get_phy_caps(hw, false, 1720 IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps); 1721 if (err) 1722 return err; 1723 1724 ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg); 1725 1726 /* Configure the set PHY data */ 1727 err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode); 1728 if (err) 1729 return err; 1730 1731 /* If the capabilities have changed, then set the new config */ 1732 if (cfg.caps != pcaps.caps) { 1733 cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; 1734 1735 err = ixgbe_aci_set_phy_cfg(hw, &cfg); 1736 if (err) 1737 return err; 1738 } 1739 1740 return err; 1741 } 1742 1743 /** 1744 * ixgbe_fc_autoneg_e610 - Configure flow control 1745 * @hw: pointer to hardware structure 1746 * 1747 * Configure Flow Control. 1748 */ 1749 void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw) 1750 { 1751 int err; 1752 1753 /* Get current link err. 1754 * Current FC mode will be stored in the hw context. 1755 */ 1756 err = ixgbe_aci_get_link_info(hw, false, NULL); 1757 if (err) 1758 goto no_autoneg; 1759 1760 /* Check if the link is up */ 1761 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) 1762 goto no_autoneg; 1763 1764 /* Check if auto-negotiation has completed */ 1765 if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) 1766 goto no_autoneg; 1767 1768 hw->fc.fc_was_autonegged = true; 1769 return; 1770 1771 no_autoneg: 1772 hw->fc.fc_was_autonegged = false; 1773 hw->fc.current_mode = hw->fc.requested_mode; 1774 } 1775 1776 /** 1777 * ixgbe_disable_rx_e610 - Disable RX unit 1778 * @hw: pointer to hardware structure 1779 * 1780 * Disable RX DMA unit on E610 with use of ACI command (0x000C). 1781 * 1782 * Return: the exit code of the operation. 1783 */ 1784 void ixgbe_disable_rx_e610(struct ixgbe_hw *hw) 1785 { 1786 u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1787 u32 pfdtxgswc; 1788 int err; 1789 1790 if (!(rxctrl & IXGBE_RXCTRL_RXEN)) 1791 return; 1792 1793 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 1794 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 1795 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 1796 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 1797 hw->mac.set_lben = true; 1798 } else { 1799 hw->mac.set_lben = false; 1800 } 1801 1802 err = ixgbe_aci_disable_rxen(hw); 1803 1804 /* If we fail - disable RX using register write */ 1805 if (err) { 1806 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1807 if (rxctrl & IXGBE_RXCTRL_RXEN) { 1808 rxctrl &= ~IXGBE_RXCTRL_RXEN; 1809 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 1810 } 1811 } 1812 } 1813 1814 /** 1815 * ixgbe_init_phy_ops_e610 - PHY specific init 1816 * @hw: pointer to hardware structure 1817 * 1818 * Initialize any function pointers that were not able to be 1819 * set during init_shared_code because the PHY type was not known. 1820 * 1821 * Return: the exit code of the operation. 1822 */ 1823 int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw) 1824 { 1825 struct ixgbe_mac_info *mac = &hw->mac; 1826 struct ixgbe_phy_info *phy = &hw->phy; 1827 1828 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) 1829 phy->ops.set_phy_power = ixgbe_set_phy_power_e610; 1830 else 1831 phy->ops.set_phy_power = NULL; 1832 1833 /* Identify the PHY */ 1834 return phy->ops.identify(hw); 1835 } 1836 1837 /** 1838 * ixgbe_identify_phy_e610 - Identify PHY 1839 * @hw: pointer to hardware structure 1840 * 1841 * Determine PHY type, supported speeds and PHY ID. 1842 * 1843 * Return: the exit code of the operation. 1844 */ 1845 int ixgbe_identify_phy_e610(struct ixgbe_hw *hw) 1846 { 1847 struct ixgbe_aci_cmd_get_phy_caps_data pcaps; 1848 u64 phy_type_low, phy_type_high; 1849 int err; 1850 1851 /* Set PHY type */ 1852 hw->phy.type = ixgbe_phy_fw; 1853 1854 err = ixgbe_aci_get_phy_caps(hw, false, 1855 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps); 1856 if (err) 1857 return err; 1858 1859 if (!(pcaps.module_compliance_enforcement & 1860 IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) { 1861 /* Handle lenient mode */ 1862 err = ixgbe_aci_get_phy_caps(hw, false, 1863 IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA, 1864 &pcaps); 1865 if (err) 1866 return err; 1867 } 1868 1869 /* Determine supported speeds */ 1870 hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN; 1871 phy_type_high = le64_to_cpu(pcaps.phy_type_high); 1872 phy_type_low = le64_to_cpu(pcaps.phy_type_low); 1873 1874 if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T || 1875 phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII) 1876 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL; 1877 if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX || 1878 phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII || 1879 phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII) 1880 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; 1881 if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T || 1882 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX || 1883 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX || 1884 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX || 1885 phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII || 1886 phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII) 1887 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; 1888 if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T || 1889 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA || 1890 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR || 1891 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR || 1892 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 || 1893 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || 1894 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C || 1895 phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII) 1896 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; 1897 1898 /* 2.5 and 5 Gbps link speeds must be excluded from the 1899 * auto-negotiation set used during driver initialization due to 1900 * compatibility issues with certain switches. Those issues do not 1901 * exist in case of E610 2.5G SKU device (0x57b1). 1902 */ 1903 if (!hw->phy.autoneg_advertised && 1904 hw->device_id != IXGBE_DEV_ID_E610_2_5G_T) 1905 hw->phy.autoneg_advertised = hw->phy.speeds_supported; 1906 1907 if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T || 1908 phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X || 1909 phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX || 1910 phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII || 1911 phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII) 1912 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; 1913 1914 if (!hw->phy.autoneg_advertised && 1915 hw->device_id == IXGBE_DEV_ID_E610_2_5G_T) 1916 hw->phy.autoneg_advertised = hw->phy.speeds_supported; 1917 1918 if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T || 1919 phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR || 1920 phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII) 1921 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; 1922 1923 /* Set PHY ID */ 1924 memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32)); 1925 1926 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL | 1927 IXGBE_LINK_SPEED_100_FULL | 1928 IXGBE_LINK_SPEED_1GB_FULL; 1929 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; 1930 1931 return 0; 1932 } 1933 1934 /** 1935 * ixgbe_identify_module_e610 - Identify SFP module type 1936 * @hw: pointer to hardware structure 1937 * 1938 * Identify the SFP module type. 1939 * 1940 * Return: the exit code of the operation. 1941 */ 1942 int ixgbe_identify_module_e610(struct ixgbe_hw *hw) 1943 { 1944 bool media_available; 1945 u8 module_type; 1946 int err; 1947 1948 err = ixgbe_update_link_info(hw); 1949 if (err) 1950 return err; 1951 1952 media_available = 1953 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE); 1954 1955 if (media_available) { 1956 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 1957 1958 /* Get module type from hw context updated by 1959 * ixgbe_update_link_info() 1960 */ 1961 module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT]; 1962 1963 if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) || 1964 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) { 1965 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 1966 } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) { 1967 hw->phy.sfp_type = ixgbe_sfp_type_sr; 1968 } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) || 1969 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) { 1970 hw->phy.sfp_type = ixgbe_sfp_type_lr; 1971 } 1972 } else { 1973 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 1974 return -ENOENT; 1975 } 1976 1977 return 0; 1978 } 1979 1980 /** 1981 * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs 1982 * @hw: pointer to hardware structure 1983 * 1984 * Set the parameters for the firmware-controlled PHYs. 1985 * 1986 * Return: the exit code of the operation. 1987 */ 1988 int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw) 1989 { 1990 struct ixgbe_aci_cmd_get_phy_caps_data pcaps; 1991 struct ixgbe_aci_cmd_set_phy_cfg_data pcfg; 1992 u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA; 1993 u64 sup_phy_type_low, sup_phy_type_high; 1994 u64 phy_type_low = 0, phy_type_high = 0; 1995 int err; 1996 1997 err = ixgbe_aci_get_link_info(hw, false, NULL); 1998 if (err) 1999 return err; 2000 2001 /* If media is not available get default config. */ 2002 if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) 2003 rmode = IXGBE_ACI_REPORT_DFLT_CFG; 2004 2005 err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps); 2006 if (err) 2007 return err; 2008 2009 sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low); 2010 sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high); 2011 2012 /* Get Active configuration to avoid unintended changes. */ 2013 err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG, 2014 &pcaps); 2015 if (err) 2016 return err; 2017 2018 ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg); 2019 2020 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) { 2021 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T; 2022 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII; 2023 } 2024 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) { 2025 phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX; 2026 phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII; 2027 phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII; 2028 } 2029 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { 2030 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T; 2031 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX; 2032 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX; 2033 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX; 2034 phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII; 2035 phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII; 2036 } 2037 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) { 2038 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T; 2039 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X; 2040 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX; 2041 phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII; 2042 phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII; 2043 } 2044 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) { 2045 phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T; 2046 phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR; 2047 phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII; 2048 } 2049 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) { 2050 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T; 2051 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA; 2052 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR; 2053 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR; 2054 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1; 2055 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC; 2056 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C; 2057 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII; 2058 } 2059 2060 /* Mask the set values to avoid requesting unsupported link types. */ 2061 phy_type_low &= sup_phy_type_low; 2062 pcfg.phy_type_low = cpu_to_le64(phy_type_low); 2063 phy_type_high &= sup_phy_type_high; 2064 pcfg.phy_type_high = cpu_to_le64(phy_type_high); 2065 2066 if (pcfg.phy_type_high != pcaps.phy_type_high || 2067 pcfg.phy_type_low != pcaps.phy_type_low || 2068 pcfg.caps != pcaps.caps) { 2069 pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK; 2070 pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; 2071 2072 err = ixgbe_aci_set_phy_cfg(hw, &pcfg); 2073 if (err) 2074 return err; 2075 } 2076 2077 return 0; 2078 } 2079 2080 /** 2081 * ixgbe_set_phy_power_e610 - Control power for copper PHY 2082 * @hw: pointer to hardware structure 2083 * @on: true for on, false for off 2084 * 2085 * Set the power on/off of the PHY 2086 * by getting its capabilities and setting the appropriate 2087 * configuration parameters. 2088 * 2089 * Return: the exit code of the operation. 2090 */ 2091 int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on) 2092 { 2093 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; 2094 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; 2095 int err; 2096 2097 err = ixgbe_aci_get_phy_caps(hw, false, 2098 IXGBE_ACI_REPORT_ACTIVE_CFG, 2099 &phy_caps); 2100 if (err) 2101 return err; 2102 2103 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); 2104 2105 if (on) 2106 phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER; 2107 else 2108 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER; 2109 2110 /* PHY is already in requested power mode. */ 2111 if (phy_caps.caps == phy_cfg.caps) 2112 return 0; 2113 2114 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK; 2115 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; 2116 2117 return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); 2118 } 2119 2120 /** 2121 * ixgbe_enter_lplu_e610 - Transition to low power states 2122 * @hw: pointer to hardware structure 2123 * 2124 * Configures Low Power Link Up on transition to low power states 2125 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the 2126 * X557 PHY immediately prior to entering LPLU. 2127 * 2128 * Return: the exit code of the operation. 2129 */ 2130 int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw) 2131 { 2132 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; 2133 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; 2134 int err; 2135 2136 err = ixgbe_aci_get_phy_caps(hw, false, 2137 IXGBE_ACI_REPORT_ACTIVE_CFG, 2138 &phy_caps); 2139 if (err) 2140 return err; 2141 2142 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); 2143 2144 phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG; 2145 2146 return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); 2147 } 2148 2149 /** 2150 * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params 2151 * @hw: pointer to hardware structure 2152 * 2153 * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw 2154 * struct in order to set up EEPROM access. 2155 * 2156 * Return: the operation exit code. 2157 */ 2158 int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw) 2159 { 2160 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2161 u32 gens_stat; 2162 u8 sr_size; 2163 2164 if (eeprom->type != ixgbe_eeprom_uninitialized) 2165 return 0; 2166 2167 eeprom->type = ixgbe_flash; 2168 2169 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS); 2170 sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); 2171 2172 /* Switching to words (sr_size contains power of 2). */ 2173 eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB; 2174 2175 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, 2176 eeprom->word_size); 2177 2178 return 0; 2179 } 2180 2181 /** 2182 * ixgbe_aci_get_netlist_node - get a node handle 2183 * @hw: pointer to the hw struct 2184 * @cmd: get_link_topo AQ structure 2185 * @node_part_number: output node part number if node found 2186 * @node_handle: output node handle parameter if node found 2187 * 2188 * Get the netlist node and assigns it to 2189 * the provided handle using ACI command (0x06E0). 2190 * 2191 * Return: the exit code of the operation. 2192 */ 2193 int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw, 2194 struct ixgbe_aci_cmd_get_link_topo *cmd, 2195 u8 *node_part_number, u16 *node_handle) 2196 { 2197 struct ixgbe_aci_desc desc; 2198 2199 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); 2200 desc.params.get_link_topo = *cmd; 2201 2202 if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0)) 2203 return -EOPNOTSUPP; 2204 2205 if (node_handle) 2206 *node_handle = 2207 le16_to_cpu(desc.params.get_link_topo.addr.handle); 2208 if (node_part_number) 2209 *node_part_number = desc.params.get_link_topo.node_part_num; 2210 2211 return 0; 2212 } 2213 2214 /** 2215 * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership 2216 * @hw: pointer to the HW structure 2217 * @access: NVM access type (read or write) 2218 * 2219 * Request NVM ownership. 2220 * 2221 * Return: the exit code of the operation. 2222 */ 2223 int ixgbe_acquire_nvm(struct ixgbe_hw *hw, 2224 enum ixgbe_aci_res_access_type access) 2225 { 2226 u32 fla; 2227 2228 /* Skip if we are in blank NVM programming mode */ 2229 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); 2230 if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) 2231 return 0; 2232 2233 return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access, 2234 IXGBE_NVM_TIMEOUT); 2235 } 2236 2237 /** 2238 * ixgbe_release_nvm - Generic request for releasing the NVM ownership 2239 * @hw: pointer to the HW structure 2240 * 2241 * Release NVM ownership. 2242 */ 2243 void ixgbe_release_nvm(struct ixgbe_hw *hw) 2244 { 2245 u32 fla; 2246 2247 /* Skip if we are in blank NVM programming mode */ 2248 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); 2249 if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) 2250 return; 2251 2252 ixgbe_release_res(hw, IXGBE_NVM_RES_ID); 2253 } 2254 2255 /** 2256 * ixgbe_aci_read_nvm - read NVM 2257 * @hw: pointer to the HW struct 2258 * @module_typeid: module pointer location in words from the NVM beginning 2259 * @offset: byte offset from the module beginning 2260 * @length: length of the section to be read (in bytes from the offset) 2261 * @data: command buffer (size [bytes] = length) 2262 * @last_command: tells if this is the last command in a series 2263 * @read_shadow_ram: tell if this is a shadow RAM read 2264 * 2265 * Read the NVM using ACI command (0x0701). 2266 * 2267 * Return: the exit code of the operation. 2268 */ 2269 int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, 2270 u16 length, void *data, bool last_command, 2271 bool read_shadow_ram) 2272 { 2273 struct ixgbe_aci_cmd_nvm *cmd; 2274 struct ixgbe_aci_desc desc; 2275 2276 if (offset > IXGBE_ACI_NVM_MAX_OFFSET) 2277 return -EINVAL; 2278 2279 cmd = &desc.params.nvm; 2280 2281 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read); 2282 2283 if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT) 2284 cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY; 2285 2286 /* If this is the last command in a series, set the proper flag. */ 2287 if (last_command) 2288 cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD; 2289 cmd->module_typeid = cpu_to_le16(module_typeid); 2290 cmd->offset_low = cpu_to_le16(offset & 0xFFFF); 2291 cmd->offset_high = (offset >> 16) & 0xFF; 2292 cmd->length = cpu_to_le16(length); 2293 2294 return ixgbe_aci_send_cmd(hw, &desc, data, length); 2295 } 2296 2297 /** 2298 * ixgbe_nvm_validate_checksum - validate checksum 2299 * @hw: pointer to the HW struct 2300 * 2301 * Verify NVM PFA checksum validity using ACI command (0x0706). 2302 * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned. 2303 * The function acquires and then releases the NVM ownership. 2304 * 2305 * Return: the exit code of the operation. 2306 */ 2307 int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw) 2308 { 2309 struct ixgbe_aci_cmd_nvm_checksum *cmd; 2310 struct ixgbe_aci_desc desc; 2311 int err; 2312 2313 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 2314 if (err) 2315 return err; 2316 2317 cmd = &desc.params.nvm_checksum; 2318 2319 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum); 2320 cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY; 2321 2322 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 2323 2324 ixgbe_release_nvm(hw); 2325 2326 if (!err && cmd->checksum != 2327 cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) { 2328 struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, 2329 hw); 2330 2331 err = -EIO; 2332 netdev_err(adapter->netdev, "Invalid Shadow Ram checksum"); 2333 } 2334 2335 return err; 2336 } 2337 2338 /** 2339 * ixgbe_discover_flash_size - Discover the available flash size 2340 * @hw: pointer to the HW struct 2341 * 2342 * The device flash could be up to 16MB in size. However, it is possible that 2343 * the actual size is smaller. Use bisection to determine the accessible size 2344 * of flash memory. 2345 * 2346 * Return: the exit code of the operation. 2347 */ 2348 static int ixgbe_discover_flash_size(struct ixgbe_hw *hw) 2349 { 2350 u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1; 2351 int err; 2352 2353 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 2354 if (err) 2355 return err; 2356 2357 while ((max_size - min_size) > 1) { 2358 u32 offset = (max_size + min_size) / 2; 2359 u32 len = 1; 2360 u8 data; 2361 2362 err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false); 2363 if (err == -EIO && 2364 hw->aci.last_status == IXGBE_ACI_RC_EINVAL) { 2365 err = 0; 2366 max_size = offset; 2367 } else if (!err) { 2368 min_size = offset; 2369 } else { 2370 /* an unexpected error occurred */ 2371 goto err_read_flat_nvm; 2372 } 2373 } 2374 2375 hw->flash.flash_size = max_size; 2376 2377 err_read_flat_nvm: 2378 ixgbe_release_nvm(hw); 2379 2380 return err; 2381 } 2382 2383 /** 2384 * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word 2385 * @hw: pointer to the HW structure 2386 * @offset: the word offset of the Shadow RAM word to read 2387 * @pointer: pointer value read from Shadow RAM 2388 * 2389 * Read the given Shadow RAM word, and convert it to a pointer value specified 2390 * in bytes. This function assumes the specified offset is a valid pointer 2391 * word. 2392 * 2393 * Each pointer word specifies whether it is stored in word size or 4KB 2394 * sector size by using the highest bit. The reported pointer value will be in 2395 * bytes, intended for flat NVM reads. 2396 * 2397 * Return: the exit code of the operation. 2398 */ 2399 static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset, 2400 u32 *pointer) 2401 { 2402 u16 value; 2403 int err; 2404 2405 err = ixgbe_read_ee_aci_e610(hw, offset, &value); 2406 if (err) 2407 return err; 2408 2409 /* Determine if the pointer is in 4KB or word units */ 2410 if (value & IXGBE_SR_NVM_PTR_4KB_UNITS) 2411 *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K; 2412 else 2413 *pointer = value * sizeof(u16); 2414 2415 return 0; 2416 } 2417 2418 /** 2419 * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word 2420 * @hw: pointer to the HW structure 2421 * @offset: the word offset of the Shadow RAM to read 2422 * @size: size value read from the Shadow RAM 2423 * 2424 * Read the given Shadow RAM word, and convert it to an area size value 2425 * specified in bytes. This function assumes the specified offset is a valid 2426 * area size word. 2427 * 2428 * Each area size word is specified in 4KB sector units. This function reports 2429 * the size in bytes, intended for flat NVM reads. 2430 * 2431 * Return: the exit code of the operation. 2432 */ 2433 static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size) 2434 { 2435 u16 value; 2436 int err; 2437 2438 err = ixgbe_read_ee_aci_e610(hw, offset, &value); 2439 if (err) 2440 return err; 2441 2442 /* Area sizes are always specified in 4KB units */ 2443 *size = value * SZ_4K; 2444 2445 return 0; 2446 } 2447 2448 /** 2449 * ixgbe_determine_active_flash_banks - Discover active bank for each module 2450 * @hw: pointer to the HW struct 2451 * 2452 * Read the Shadow RAM control word and determine which banks are active for 2453 * the NVM, OROM, and Netlist modules. Also read and calculate the associated 2454 * pointer and size. These values are then cached into the ixgbe_flash_info 2455 * structure for later use in order to calculate the correct offset to read 2456 * from the active module. 2457 * 2458 * Return: the exit code of the operation. 2459 */ 2460 static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw) 2461 { 2462 struct ixgbe_bank_info *banks = &hw->flash.banks; 2463 u16 ctrl_word; 2464 int err; 2465 2466 err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD, 2467 &ctrl_word); 2468 if (err) 2469 return err; 2470 2471 if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) != 2472 IXGBE_SR_CTRL_WORD_VALID) 2473 return -ENODATA; 2474 2475 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK)) 2476 banks->nvm_bank = IXGBE_1ST_FLASH_BANK; 2477 else 2478 banks->nvm_bank = IXGBE_2ND_FLASH_BANK; 2479 2480 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK)) 2481 banks->orom_bank = IXGBE_1ST_FLASH_BANK; 2482 else 2483 banks->orom_bank = IXGBE_2ND_FLASH_BANK; 2484 2485 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK)) 2486 banks->netlist_bank = IXGBE_1ST_FLASH_BANK; 2487 else 2488 banks->netlist_bank = IXGBE_2ND_FLASH_BANK; 2489 2490 err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR, 2491 &banks->nvm_ptr); 2492 if (err) 2493 return err; 2494 2495 err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE, 2496 &banks->nvm_size); 2497 if (err) 2498 return err; 2499 2500 err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR, 2501 &banks->orom_ptr); 2502 if (err) 2503 return err; 2504 2505 err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE, 2506 &banks->orom_size); 2507 if (err) 2508 return err; 2509 2510 err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR, 2511 &banks->netlist_ptr); 2512 if (err) 2513 return err; 2514 2515 err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE, 2516 &banks->netlist_size); 2517 2518 return err; 2519 } 2520 2521 /** 2522 * ixgbe_get_flash_bank_offset - Get offset into requested flash bank 2523 * @hw: pointer to the HW structure 2524 * @bank: whether to read from the active or inactive flash bank 2525 * @module: the module to read from 2526 * 2527 * Based on the module, lookup the module offset from the beginning of the 2528 * flash. 2529 * 2530 * Return: the flash offset. Note that a value of zero is invalid and must be 2531 * treated as an error. 2532 */ 2533 static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw, 2534 enum ixgbe_bank_select bank, 2535 u16 module) 2536 { 2537 struct ixgbe_bank_info *banks = &hw->flash.banks; 2538 enum ixgbe_flash_bank active_bank; 2539 bool second_bank_active; 2540 u32 offset, size; 2541 2542 switch (module) { 2543 case IXGBE_E610_SR_1ST_NVM_BANK_PTR: 2544 offset = banks->nvm_ptr; 2545 size = banks->nvm_size; 2546 active_bank = banks->nvm_bank; 2547 break; 2548 case IXGBE_E610_SR_1ST_OROM_BANK_PTR: 2549 offset = banks->orom_ptr; 2550 size = banks->orom_size; 2551 active_bank = banks->orom_bank; 2552 break; 2553 case IXGBE_E610_SR_NETLIST_BANK_PTR: 2554 offset = banks->netlist_ptr; 2555 size = banks->netlist_size; 2556 active_bank = banks->netlist_bank; 2557 break; 2558 default: 2559 return 0; 2560 } 2561 2562 switch (active_bank) { 2563 case IXGBE_1ST_FLASH_BANK: 2564 second_bank_active = false; 2565 break; 2566 case IXGBE_2ND_FLASH_BANK: 2567 second_bank_active = true; 2568 break; 2569 default: 2570 return 0; 2571 } 2572 2573 /* The second flash bank is stored immediately following the first 2574 * bank. Based on whether the 1st or 2nd bank is active, and whether 2575 * we want the active or inactive bank, calculate the desired offset. 2576 */ 2577 switch (bank) { 2578 case IXGBE_ACTIVE_FLASH_BANK: 2579 return offset + (second_bank_active ? size : 0); 2580 case IXGBE_INACTIVE_FLASH_BANK: 2581 return offset + (second_bank_active ? 0 : size); 2582 } 2583 2584 return 0; 2585 } 2586 2587 /** 2588 * ixgbe_read_flash_module - Read a word from one of the main NVM modules 2589 * @hw: pointer to the HW structure 2590 * @bank: which bank of the module to read 2591 * @module: the module to read 2592 * @offset: the offset into the module in bytes 2593 * @data: storage for the word read from the flash 2594 * @length: bytes of data to read 2595 * 2596 * Read data from the specified flash module. The bank parameter indicates 2597 * whether or not to read from the active bank or the inactive bank of that 2598 * module. 2599 * 2600 * The word will be read using flat NVM access, and relies on the 2601 * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks() 2602 * during initialization. 2603 * 2604 * Return: the exit code of the operation. 2605 */ 2606 static int ixgbe_read_flash_module(struct ixgbe_hw *hw, 2607 enum ixgbe_bank_select bank, 2608 u16 module, u32 offset, u8 *data, u32 length) 2609 { 2610 u32 start; 2611 int err; 2612 2613 start = ixgbe_get_flash_bank_offset(hw, bank, module); 2614 if (!start) 2615 return -EINVAL; 2616 2617 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 2618 if (err) 2619 return err; 2620 2621 err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false); 2622 2623 ixgbe_release_nvm(hw); 2624 2625 return err; 2626 } 2627 2628 /** 2629 * ixgbe_read_nvm_module - Read from the active main NVM module 2630 * @hw: pointer to the HW structure 2631 * @bank: whether to read from active or inactive NVM module 2632 * @offset: offset into the NVM module to read, in words 2633 * @data: storage for returned word value 2634 * 2635 * Read the specified word from the active NVM module. This includes the CSS 2636 * header at the start of the NVM module. 2637 * 2638 * Return: the exit code of the operation. 2639 */ 2640 static int ixgbe_read_nvm_module(struct ixgbe_hw *hw, 2641 enum ixgbe_bank_select bank, 2642 u32 offset, u16 *data) 2643 { 2644 __le16 data_local; 2645 int err; 2646 2647 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR, 2648 offset * sizeof(data_local), 2649 (u8 *)&data_local, 2650 sizeof(data_local)); 2651 if (!err) 2652 *data = le16_to_cpu(data_local); 2653 2654 return err; 2655 } 2656 2657 /** 2658 * ixgbe_read_netlist_module - Read data from the netlist module area 2659 * @hw: pointer to the HW structure 2660 * @bank: whether to read from the active or inactive module 2661 * @offset: offset into the netlist to read from 2662 * @data: storage for returned word value 2663 * 2664 * Read a word from the specified netlist bank. 2665 * 2666 * Return: the exit code of the operation. 2667 */ 2668 static int ixgbe_read_netlist_module(struct ixgbe_hw *hw, 2669 enum ixgbe_bank_select bank, 2670 u32 offset, u16 *data) 2671 { 2672 __le16 data_local; 2673 int err; 2674 2675 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR, 2676 offset * sizeof(data_local), 2677 (u8 *)&data_local, sizeof(data_local)); 2678 if (!err) 2679 *data = le16_to_cpu(data_local); 2680 2681 return err; 2682 } 2683 2684 /** 2685 * ixgbe_read_orom_module - Read from the active Option ROM module 2686 * @hw: pointer to the HW structure 2687 * @bank: whether to read from active or inactive OROM module 2688 * @offset: offset into the OROM module to read, in words 2689 * @data: storage for returned word value 2690 * 2691 * Read the specified word from the active Option ROM module of the flash. 2692 * Note that unlike the NVM module, the CSS data is stored at the end of the 2693 * module instead of at the beginning. 2694 * 2695 * Return: the exit code of the operation. 2696 */ 2697 static int ixgbe_read_orom_module(struct ixgbe_hw *hw, 2698 enum ixgbe_bank_select bank, 2699 u32 offset, u16 *data) 2700 { 2701 __le16 data_local; 2702 int err; 2703 2704 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR, 2705 offset * sizeof(data_local), 2706 (u8 *)&data_local, sizeof(data_local)); 2707 if (!err) 2708 *data = le16_to_cpu(data_local); 2709 2710 return err; 2711 } 2712 2713 /** 2714 * ixgbe_get_nvm_css_hdr_len - Read the CSS header length 2715 * @hw: pointer to the HW struct 2716 * @bank: whether to read from the active or inactive flash bank 2717 * @hdr_len: storage for header length in words 2718 * 2719 * Read the CSS header length from the NVM CSS header and add the 2720 * Authentication header size, and then convert to words. 2721 * 2722 * Return: the exit code of the operation. 2723 */ 2724 static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw, 2725 enum ixgbe_bank_select bank, 2726 u32 *hdr_len) 2727 { 2728 u16 hdr_len_l, hdr_len_h; 2729 u32 hdr_len_dword; 2730 int err; 2731 2732 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L, 2733 &hdr_len_l); 2734 if (err) 2735 return err; 2736 2737 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H, 2738 &hdr_len_h); 2739 if (err) 2740 return err; 2741 2742 /* CSS header length is in DWORD, so convert to words and add 2743 * authentication header size. 2744 */ 2745 hdr_len_dword = (hdr_len_h << 16) | hdr_len_l; 2746 *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN; 2747 2748 return 0; 2749 } 2750 2751 /** 2752 * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy 2753 * @hw: pointer to the HW structure 2754 * @bank: whether to read from the active or inactive NVM module 2755 * @offset: offset into the Shadow RAM copy to read, in words 2756 * @data: storage for returned word value 2757 * 2758 * Read the specified word from the copy of the Shadow RAM found in the 2759 * specified NVM module. 2760 * 2761 * Return: the exit code of the operation. 2762 */ 2763 static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw, 2764 enum ixgbe_bank_select bank, 2765 u32 offset, u16 *data) 2766 { 2767 u32 hdr_len; 2768 int err; 2769 2770 err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len); 2771 if (err) 2772 return err; 2773 2774 hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP); 2775 2776 return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data); 2777 } 2778 2779 /** 2780 * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header 2781 * @hw: pointer to the HW struct 2782 * @bank: whether to read from the active or inactive flash bank 2783 * @srev: storage for security revision 2784 * 2785 * Read the security revision out of the CSS header of the active NVM module 2786 * bank. 2787 * 2788 * Return: the exit code of the operation. 2789 */ 2790 static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw, 2791 enum ixgbe_bank_select bank, u32 *srev) 2792 { 2793 u16 srev_l, srev_h; 2794 int err; 2795 2796 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l); 2797 if (err) 2798 return err; 2799 2800 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h); 2801 if (err) 2802 return err; 2803 2804 *srev = (srev_h << 16) | srev_l; 2805 2806 return 0; 2807 } 2808 2809 /** 2810 * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM 2811 * @hw: pointer to the HW struct 2812 * @bank: whether to read from the active or inactive flash module 2813 * @civd: storage for the Option ROM CIVD data. 2814 * 2815 * Searches through the Option ROM flash contents to locate the CIVD data for 2816 * the image. 2817 * 2818 * Return: the exit code of the operation. 2819 */ 2820 static int 2821 ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank, 2822 struct ixgbe_orom_civd_info *civd) 2823 { 2824 struct ixgbe_orom_civd_info tmp; 2825 u32 offset; 2826 int err; 2827 2828 /* The CIVD section is located in the Option ROM aligned to 512 bytes. 2829 * The first 4 bytes must contain the ASCII characters "$CIV". 2830 * A simple modulo 256 sum of all of the bytes of the structure must 2831 * equal 0. 2832 */ 2833 for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size; 2834 offset += SZ_512) { 2835 u8 sum = 0; 2836 u32 i; 2837 2838 err = ixgbe_read_flash_module(hw, bank, 2839 IXGBE_E610_SR_1ST_OROM_BANK_PTR, 2840 offset, 2841 (u8 *)&tmp, sizeof(tmp)); 2842 if (err) 2843 return err; 2844 2845 /* Skip forward until we find a matching signature */ 2846 if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature, 2847 sizeof(tmp.signature))) 2848 continue; 2849 2850 /* Verify that the simple checksum is zero */ 2851 for (i = 0; i < sizeof(tmp); i++) 2852 sum += ((u8 *)&tmp)[i]; 2853 2854 if (sum) 2855 return -EDOM; 2856 2857 *civd = tmp; 2858 return 0; 2859 } 2860 2861 return -ENODATA; 2862 } 2863 2864 /** 2865 * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header 2866 * @hw: pointer to the HW struct 2867 * @bank: whether to read from active or inactive flash module 2868 * @srev: storage for security revision 2869 * 2870 * Read the security revision out of the CSS header of the active OROM module 2871 * bank. 2872 * 2873 * Return: the exit code of the operation. 2874 */ 2875 static int ixgbe_get_orom_srev(struct ixgbe_hw *hw, 2876 enum ixgbe_bank_select bank, 2877 u32 *srev) 2878 { 2879 u32 orom_size_word = hw->flash.banks.orom_size / 2; 2880 u32 css_start, hdr_len; 2881 u16 srev_l, srev_h; 2882 int err; 2883 2884 err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len); 2885 if (err) 2886 return err; 2887 2888 if (orom_size_word < hdr_len) 2889 return -EINVAL; 2890 2891 /* Calculate how far into the Option ROM the CSS header starts. Note 2892 * that ixgbe_read_orom_module takes a word offset. 2893 */ 2894 css_start = orom_size_word - hdr_len; 2895 err = ixgbe_read_orom_module(hw, bank, 2896 css_start + IXGBE_NVM_CSS_SREV_L, 2897 &srev_l); 2898 if (err) 2899 return err; 2900 2901 err = ixgbe_read_orom_module(hw, bank, 2902 css_start + IXGBE_NVM_CSS_SREV_H, 2903 &srev_h); 2904 if (err) 2905 return err; 2906 2907 *srev = srev_h << 16 | srev_l; 2908 2909 return 0; 2910 } 2911 2912 /** 2913 * ixgbe_get_orom_ver_info - Read Option ROM version information 2914 * @hw: pointer to the HW struct 2915 * @bank: whether to read from the active or inactive flash module 2916 * @orom: pointer to Option ROM info structure 2917 * 2918 * Read Option ROM version and security revision from the Option ROM flash 2919 * section. 2920 * 2921 * Return: the exit code of the operation. 2922 */ 2923 static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw, 2924 enum ixgbe_bank_select bank, 2925 struct ixgbe_orom_info *orom) 2926 { 2927 struct ixgbe_orom_civd_info civd; 2928 u32 combo_ver; 2929 int err; 2930 2931 err = ixgbe_get_orom_civd_data(hw, bank, &civd); 2932 if (err) 2933 return err; 2934 2935 combo_ver = le32_to_cpu(civd.combo_ver); 2936 2937 orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver); 2938 orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver); 2939 orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver); 2940 2941 return ixgbe_get_orom_srev(hw, bank, &orom->srev); 2942 } 2943 2944 /** 2945 * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank 2946 * @hw: pointer to the HW structure 2947 * @orom: storage for Option ROM version information 2948 * 2949 * Read the Option ROM version and security revision data for the inactive 2950 * section of flash. Used to access version data for a pending update that has 2951 * not yet been activated. 2952 * 2953 * Return: the exit code of the operation. 2954 */ 2955 int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw, 2956 struct ixgbe_orom_info *orom) 2957 { 2958 return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom); 2959 } 2960 2961 /** 2962 * ixgbe_get_nvm_ver_info - Read NVM version information 2963 * @hw: pointer to the HW struct 2964 * @bank: whether to read from the active or inactive flash bank 2965 * @nvm: pointer to NVM info structure 2966 * 2967 * Read the NVM EETRACK ID and map version of the main NVM image bank, filling 2968 * in the nvm info structure. 2969 * 2970 * Return: the exit code of the operation. 2971 */ 2972 static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw, 2973 enum ixgbe_bank_select bank, 2974 struct ixgbe_nvm_info *nvm) 2975 { 2976 u16 eetrack_lo, eetrack_hi, ver; 2977 int err; 2978 2979 err = ixgbe_read_nvm_sr_copy(hw, bank, 2980 IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver); 2981 if (err) 2982 return err; 2983 2984 nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver); 2985 nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver); 2986 2987 err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO, 2988 &eetrack_lo); 2989 if (err) 2990 return err; 2991 2992 err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI, 2993 &eetrack_hi); 2994 if (err) 2995 return err; 2996 2997 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; 2998 2999 ixgbe_get_nvm_srev(hw, bank, &nvm->srev); 3000 3001 return 0; 3002 } 3003 3004 /** 3005 * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank 3006 * @hw: pointer to the HW structure 3007 * @nvm: storage for Option ROM version information 3008 * 3009 * Read the NVM EETRACK ID, Map version, and security revision of the 3010 * inactive NVM bank. Used to access version data for a pending update that 3011 * has not yet been activated. 3012 * 3013 * Return: the exit code of the operation. 3014 */ 3015 int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm) 3016 { 3017 return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm); 3018 } 3019 3020 /** 3021 * ixgbe_get_netlist_info - Read the netlist version information 3022 * @hw: pointer to the HW struct 3023 * @bank: whether to read from the active or inactive flash bank 3024 * @netlist: pointer to netlist version info structure 3025 * 3026 * Get the netlist version information from the requested bank. Reads the Link 3027 * Topology section to find the Netlist ID block and extract the relevant 3028 * information into the netlist version structure. 3029 * 3030 * Return: the exit code of the operation. 3031 */ 3032 static int ixgbe_get_netlist_info(struct ixgbe_hw *hw, 3033 enum ixgbe_bank_select bank, 3034 struct ixgbe_netlist_info *netlist) 3035 { 3036 u16 module_id, length, node_count, i; 3037 u16 *id_blk; 3038 int err; 3039 3040 err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET, 3041 &module_id); 3042 if (err) 3043 return err; 3044 3045 if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) 3046 return -EIO; 3047 3048 err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN, 3049 &length); 3050 if (err) 3051 return err; 3052 3053 /* Sanity check that we have at least enough words to store the 3054 * netlist ID block. 3055 */ 3056 if (length < IXGBE_NETLIST_ID_BLK_SIZE) 3057 return -EIO; 3058 3059 err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT, 3060 &node_count); 3061 if (err) 3062 return err; 3063 3064 node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M; 3065 3066 id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); 3067 if (!id_blk) 3068 return -ENOMEM; 3069 3070 /* Read out the entire Netlist ID Block at once. */ 3071 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR, 3072 IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * 3073 sizeof(*id_blk), (u8 *)id_blk, 3074 IXGBE_NETLIST_ID_BLK_SIZE * 3075 sizeof(*id_blk)); 3076 if (err) 3077 goto free_id_blk; 3078 3079 for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++) 3080 id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]); 3081 3082 netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 | 3083 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW]; 3084 netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 | 3085 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW]; 3086 netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 | 3087 id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW]; 3088 netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 | 3089 id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW]; 3090 netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER]; 3091 /* Read the left most 4 bytes of SHA */ 3092 netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 | 3093 id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)]; 3094 3095 free_id_blk: 3096 kfree(id_blk); 3097 return err; 3098 } 3099 3100 /** 3101 * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank 3102 * @hw: pointer to the HW struct 3103 * @netlist: pointer to netlist version info structure 3104 * 3105 * Read the netlist version data from the inactive netlist bank. Used to 3106 * extract version data of a pending flash update in order to display the 3107 * version data. 3108 * 3109 * Return: the exit code of the operation. 3110 */ 3111 int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, 3112 struct ixgbe_netlist_info *netlist) 3113 { 3114 return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist); 3115 } 3116 3117 /** 3118 * ixgbe_get_flash_data - get flash data 3119 * @hw: pointer to the HW struct 3120 * 3121 * Read and populate flash data such as Shadow RAM size, 3122 * max_timeout and blank_nvm_mode 3123 * 3124 * Return: the exit code of the operation. 3125 */ 3126 int ixgbe_get_flash_data(struct ixgbe_hw *hw) 3127 { 3128 struct ixgbe_flash_info *flash = &hw->flash; 3129 u32 fla, gens_stat; 3130 u8 sr_size; 3131 int err; 3132 3133 /* The SR size is stored regardless of the NVM programming mode 3134 * as the blank mode may be used in the factory line. 3135 */ 3136 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS); 3137 sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); 3138 3139 /* Switching to words (sr_size contains power of 2) */ 3140 flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16)); 3141 3142 /* Check if we are in the normal or blank NVM programming mode */ 3143 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); 3144 if (fla & IXGBE_GLNVM_FLA_LOCKED_M) { 3145 flash->blank_nvm_mode = false; 3146 } else { 3147 flash->blank_nvm_mode = true; 3148 return -EIO; 3149 } 3150 3151 err = ixgbe_discover_flash_size(hw); 3152 if (err) 3153 return err; 3154 3155 err = ixgbe_determine_active_flash_banks(hw); 3156 if (err) 3157 return err; 3158 3159 err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, 3160 &flash->nvm); 3161 if (err) 3162 return err; 3163 3164 err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, 3165 &flash->orom); 3166 if (err) 3167 return err; 3168 3169 err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK, 3170 &flash->netlist); 3171 return err; 3172 } 3173 3174 /** 3175 * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI 3176 * @hw: pointer to the HW structure 3177 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 3178 * @data: word read from the Shadow RAM 3179 * 3180 * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm. 3181 * 3182 * Return: the exit code of the operation. 3183 */ 3184 int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data) 3185 { 3186 u32 bytes = sizeof(u16); 3187 u16 data_local; 3188 int err; 3189 3190 err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes, 3191 (u8 *)&data_local, true); 3192 if (err) 3193 return err; 3194 3195 *data = data_local; 3196 return 0; 3197 } 3198 3199 /** 3200 * ixgbe_read_flat_nvm - Read portion of NVM by flat offset 3201 * @hw: pointer to the HW struct 3202 * @offset: offset from beginning of NVM 3203 * @length: (in) number of bytes to read; (out) number of bytes actually read 3204 * @data: buffer to return data in (sized to fit the specified length) 3205 * @read_shadow_ram: if true, read from shadow RAM instead of NVM 3206 * 3207 * Reads a portion of the NVM, as a flat memory space. This function correctly 3208 * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size 3209 * from being exceeded in case of Shadow RAM read requests and ensures that no 3210 * single read request exceeds the maximum 4KB read for a single admin command. 3211 * 3212 * Returns an error code on failure. Note that the data pointer may be 3213 * partially updated if some reads succeed before a failure. 3214 * 3215 * Return: the exit code of the operation. 3216 */ 3217 int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, 3218 u8 *data, bool read_shadow_ram) 3219 { 3220 u32 inlen = *length; 3221 u32 bytes_read = 0; 3222 bool last_cmd; 3223 int err; 3224 3225 /* Verify the length of the read if this is for the Shadow RAM */ 3226 if (read_shadow_ram && ((offset + inlen) > 3227 (hw->eeprom.word_size * 2u))) 3228 return -EINVAL; 3229 3230 do { 3231 u32 read_size, sector_offset; 3232 3233 /* ixgbe_aci_read_nvm cannot read more than 4KB at a time. 3234 * Additionally, a read from the Shadow RAM may not cross over 3235 * a sector boundary. Conveniently, the sector size is also 4KB. 3236 */ 3237 sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE; 3238 read_size = min_t(u32, 3239 IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset, 3240 inlen - bytes_read); 3241 3242 last_cmd = !(bytes_read + read_size < inlen); 3243 3244 /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size 3245 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE 3246 * maximum size guarantees that it will fit within the 2 bytes. 3247 */ 3248 err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT, 3249 offset, (u16)read_size, 3250 data + bytes_read, last_cmd, 3251 read_shadow_ram); 3252 if (err) 3253 break; 3254 3255 bytes_read += read_size; 3256 offset += read_size; 3257 } while (!last_cmd); 3258 3259 *length = bytes_read; 3260 return err; 3261 } 3262 3263 /** 3264 * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI 3265 * @hw: pointer to the HW structure 3266 * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF) 3267 * @words: (in) number of words to read; (out) number of words actually read 3268 * @data: words read from the Shadow RAM 3269 * 3270 * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM 3271 * ownership. 3272 * 3273 * Return: the operation exit code. 3274 */ 3275 int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, 3276 u16 *data) 3277 { 3278 u32 bytes = *words * 2; 3279 int err; 3280 3281 err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); 3282 if (err) 3283 return err; 3284 3285 *words = bytes / 2; 3286 3287 for (int i = 0; i < *words; i++) 3288 data[i] = le16_to_cpu(((__le16 *)data)[i]); 3289 3290 return 0; 3291 } 3292 3293 /** 3294 * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command. 3295 * @hw: pointer to hardware structure 3296 * @offset: offset of word in the EEPROM to read 3297 * @data: word read from the EEPROM 3298 * 3299 * Reads a 16 bit word from the EEPROM using the ACI. 3300 * If the EEPROM params are not initialized, the function 3301 * initialize them before proceeding with reading. 3302 * The function acquires and then releases the NVM ownership. 3303 * 3304 * Return: the exit code of the operation. 3305 */ 3306 int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data) 3307 { 3308 int err; 3309 3310 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { 3311 err = hw->eeprom.ops.init_params(hw); 3312 if (err) 3313 return err; 3314 } 3315 3316 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 3317 if (err) 3318 return err; 3319 3320 err = ixgbe_read_sr_word_aci(hw, offset, data); 3321 ixgbe_release_nvm(hw); 3322 3323 return err; 3324 } 3325 3326 /** 3327 * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI 3328 * @hw: pointer to hardware structure 3329 * @offset: offset of words in the EEPROM to read 3330 * @words: number of words to read 3331 * @data: words to read from the EEPROM 3332 * 3333 * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params 3334 * prior to the read. Acquire/release the NVM ownership. 3335 * 3336 * Return: the operation exit code. 3337 */ 3338 int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset, 3339 u16 words, u16 *data) 3340 { 3341 int err; 3342 3343 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { 3344 err = hw->eeprom.ops.init_params(hw); 3345 if (err) 3346 return err; 3347 } 3348 3349 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 3350 if (err) 3351 return err; 3352 3353 err = ixgbe_read_sr_buf_aci(hw, offset, &words, data); 3354 ixgbe_release_nvm(hw); 3355 3356 return err; 3357 } 3358 3359 /** 3360 * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum 3361 * @hw: pointer to hardware structure 3362 * @checksum_val: calculated checksum 3363 * 3364 * Performs checksum calculation and validates the EEPROM checksum. If the 3365 * caller does not need checksum_val, the value can be NULL. 3366 * If the EEPROM params are not initialized, the function 3367 * initialize them before proceeding. 3368 * The function acquires and then releases the NVM ownership. 3369 * 3370 * Return: the exit code of the operation. 3371 */ 3372 int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val) 3373 { 3374 int err; 3375 3376 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { 3377 err = hw->eeprom.ops.init_params(hw); 3378 if (err) 3379 return err; 3380 } 3381 3382 err = ixgbe_nvm_validate_checksum(hw); 3383 if (err) 3384 return err; 3385 3386 if (checksum_val) { 3387 u16 tmp_checksum; 3388 3389 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 3390 if (err) 3391 return err; 3392 3393 err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD, 3394 &tmp_checksum); 3395 ixgbe_release_nvm(hw); 3396 3397 if (!err) 3398 *checksum_val = tmp_checksum; 3399 } 3400 3401 return err; 3402 } 3403 3404 /** 3405 * ixgbe_reset_hw_e610 - Perform hardware reset 3406 * @hw: pointer to hardware structure 3407 * 3408 * Resets the hardware by resetting the transmit and receive units, masks 3409 * and clears all interrupts, and performs a reset. 3410 * 3411 * Return: the exit code of the operation. 3412 */ 3413 int ixgbe_reset_hw_e610(struct ixgbe_hw *hw) 3414 { 3415 u32 swfw_mask = hw->phy.phy_semaphore_mask; 3416 u32 ctrl, i; 3417 int err; 3418 3419 /* Call adapter stop to disable tx/rx and clear interrupts */ 3420 err = hw->mac.ops.stop_adapter(hw); 3421 if (err) 3422 goto reset_hw_out; 3423 3424 /* Flush pending Tx transactions. */ 3425 ixgbe_clear_tx_pending(hw); 3426 3427 hw->phy.ops.init(hw); 3428 mac_reset_top: 3429 err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); 3430 if (err) 3431 return -EBUSY; 3432 ctrl = IXGBE_CTRL_RST; 3433 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 3434 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 3435 IXGBE_WRITE_FLUSH(hw); 3436 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 3437 3438 /* Poll for reset bit to self-clear indicating reset is complete */ 3439 for (i = 0; i < 10; i++) { 3440 udelay(1); 3441 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 3442 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 3443 break; 3444 } 3445 3446 if (ctrl & IXGBE_CTRL_RST_MASK) { 3447 struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, 3448 hw); 3449 3450 err = -EIO; 3451 netdev_err(adapter->netdev, "Reset polling failed to complete."); 3452 } 3453 3454 /* Double resets are required for recovery from certain error 3455 * conditions. Between resets, it is necessary to stall to allow time 3456 * for any pending HW events to complete. 3457 */ 3458 msleep(100); 3459 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 3460 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 3461 goto mac_reset_top; 3462 } 3463 3464 /* Set the Rx packet buffer size. */ 3465 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17)); 3466 3467 /* Store the permanent mac address */ 3468 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 3469 3470 /* Maximum number of Receive Address Registers. */ 3471 #define IXGBE_MAX_NUM_RAR 128 3472 3473 /* Store MAC address from RAR0, clear receive address registers, and 3474 * clear the multicast table. Also reset num_rar_entries to the 3475 * maximum number of Receive Address Registers, since we modify this 3476 * value when programming the SAN MAC address. 3477 */ 3478 hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR; 3479 hw->mac.ops.init_rx_addrs(hw); 3480 3481 /* Initialize bus function number */ 3482 hw->mac.ops.set_lan_id(hw); 3483 3484 reset_hw_out: 3485 return err; 3486 } 3487 3488 /** 3489 * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA 3490 * @hw: pointer to hardware structure 3491 * @module_tlv: pointer to module TLV to return 3492 * @module_tlv_len: pointer to module TLV length to return 3493 * @module_type: module type requested 3494 * 3495 * Find the requested sub module TLV type from the Preserved Field 3496 * Area (PFA) and returns the TLV pointer and length. The caller can 3497 * use these to read the variable length TLV value. 3498 * 3499 * Return: the exit code of the operation. 3500 */ 3501 static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv, 3502 u16 *module_tlv_len, u16 module_type) 3503 { 3504 u16 pfa_len, pfa_ptr, pfa_end_ptr; 3505 u16 next_tlv; 3506 int err; 3507 3508 err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr); 3509 if (err) 3510 return err; 3511 3512 err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len); 3513 if (err) 3514 return err; 3515 3516 /* Starting with first TLV after PFA length, iterate through the list 3517 * of TLVs to find the requested one. 3518 */ 3519 next_tlv = pfa_ptr + 1; 3520 pfa_end_ptr = pfa_ptr + pfa_len; 3521 while (next_tlv < pfa_end_ptr) { 3522 u16 tlv_sub_module_type, tlv_len; 3523 3524 /* Read TLV type */ 3525 err = ixgbe_read_ee_aci_e610(hw, next_tlv, 3526 &tlv_sub_module_type); 3527 if (err) 3528 break; 3529 3530 /* Read TLV length */ 3531 err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len); 3532 if (err) 3533 break; 3534 3535 if (tlv_sub_module_type == module_type) { 3536 if (tlv_len) { 3537 *module_tlv = next_tlv; 3538 *module_tlv_len = tlv_len; 3539 return 0; 3540 } 3541 return -EIO; 3542 } 3543 /* Check next TLV, i.e. current TLV pointer + length + 2 words 3544 * (for current TLV's type and length). 3545 */ 3546 next_tlv = next_tlv + tlv_len + 2; 3547 } 3548 /* Module does not exist */ 3549 return -ENODATA; 3550 } 3551 3552 /** 3553 * ixgbe_read_pba_string_e610 - Read PBA string from NVM 3554 * @hw: pointer to hardware structure 3555 * @pba_num: stores the part number string from the NVM 3556 * @pba_num_size: part number string buffer length 3557 * 3558 * Read the part number string from the NVM. 3559 * 3560 * Return: the exit code of the operation. 3561 */ 3562 static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num, 3563 u32 pba_num_size) 3564 { 3565 u16 pba_tlv, pba_tlv_len; 3566 u16 pba_word, pba_size; 3567 int err; 3568 3569 *pba_num = '\0'; 3570 3571 err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, 3572 IXGBE_E610_SR_PBA_BLOCK_PTR); 3573 if (err) 3574 return err; 3575 3576 /* pba_size is the next word */ 3577 err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size); 3578 if (err) 3579 return err; 3580 3581 if (pba_tlv_len < pba_size) 3582 return -EINVAL; 3583 3584 /* Subtract one to get PBA word count (PBA Size word is included in 3585 * total size). 3586 */ 3587 pba_size--; 3588 3589 if (pba_num_size < (((u32)pba_size * 2) + 1)) 3590 return -EINVAL; 3591 3592 for (u16 i = 0; i < pba_size; i++) { 3593 err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i, 3594 &pba_word); 3595 if (err) 3596 return err; 3597 3598 pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK, 3599 pba_word); 3600 pba_num[(i * 2) + 1] = pba_word & 0xFF; 3601 } 3602 3603 pba_num[(pba_size * 2)] = '\0'; 3604 3605 return err; 3606 } 3607 3608 static const struct ixgbe_mac_operations mac_ops_e610 = { 3609 .init_hw = ixgbe_init_hw_generic, 3610 .start_hw = ixgbe_start_hw_e610, 3611 .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic, 3612 .enable_rx_dma = ixgbe_enable_rx_dma_generic, 3613 .get_mac_addr = ixgbe_get_mac_addr_generic, 3614 .get_device_caps = ixgbe_get_device_caps_generic, 3615 .stop_adapter = ixgbe_stop_adapter_generic, 3616 .set_lan_id = ixgbe_set_lan_id_multi_port_pcie, 3617 .set_rxpba = ixgbe_set_rxpba_generic, 3618 .check_link = ixgbe_check_link_e610, 3619 .blink_led_start = ixgbe_blink_led_start_X540, 3620 .blink_led_stop = ixgbe_blink_led_stop_X540, 3621 .set_rar = ixgbe_set_rar_generic, 3622 .clear_rar = ixgbe_clear_rar_generic, 3623 .set_vmdq = ixgbe_set_vmdq_generic, 3624 .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic, 3625 .clear_vmdq = ixgbe_clear_vmdq_generic, 3626 .init_rx_addrs = ixgbe_init_rx_addrs_generic, 3627 .update_mc_addr_list = ixgbe_update_mc_addr_list_generic, 3628 .enable_mc = ixgbe_enable_mc_generic, 3629 .disable_mc = ixgbe_disable_mc_generic, 3630 .clear_vfta = ixgbe_clear_vfta_generic, 3631 .set_vfta = ixgbe_set_vfta_generic, 3632 .fc_enable = ixgbe_fc_enable_generic, 3633 .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550, 3634 .init_uta_tables = ixgbe_init_uta_tables_generic, 3635 .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing, 3636 .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing, 3637 .set_source_address_pruning = 3638 ixgbe_set_source_address_pruning_x550, 3639 .set_ethertype_anti_spoofing = 3640 ixgbe_set_ethertype_anti_spoofing_x550, 3641 .disable_rx_buff = ixgbe_disable_rx_buff_generic, 3642 .enable_rx_buff = ixgbe_enable_rx_buff_generic, 3643 .enable_rx = ixgbe_enable_rx_generic, 3644 .disable_rx = ixgbe_disable_rx_e610, 3645 .led_on = ixgbe_led_on_generic, 3646 .led_off = ixgbe_led_off_generic, 3647 .init_led_link_act = ixgbe_init_led_link_act_generic, 3648 .reset_hw = ixgbe_reset_hw_e610, 3649 .get_media_type = ixgbe_get_media_type_e610, 3650 .setup_link = ixgbe_setup_link_e610, 3651 .get_link_capabilities = ixgbe_get_link_capabilities_e610, 3652 .get_bus_info = ixgbe_get_bus_info_generic, 3653 .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540, 3654 .release_swfw_sync = ixgbe_release_swfw_sync_X540, 3655 .init_swfw_sync = ixgbe_init_swfw_sync_X540, 3656 .prot_autoc_read = prot_autoc_read_generic, 3657 .prot_autoc_write = prot_autoc_write_generic, 3658 .setup_fc = ixgbe_setup_fc_e610, 3659 .fc_autoneg = ixgbe_fc_autoneg_e610, 3660 }; 3661 3662 static const struct ixgbe_phy_operations phy_ops_e610 = { 3663 .init = ixgbe_init_phy_ops_e610, 3664 .identify = ixgbe_identify_phy_e610, 3665 .identify_sfp = ixgbe_identify_module_e610, 3666 .setup_link_speed = ixgbe_setup_phy_link_speed_generic, 3667 .setup_link = ixgbe_setup_phy_link_e610, 3668 .enter_lplu = ixgbe_enter_lplu_e610, 3669 }; 3670 3671 static const struct ixgbe_eeprom_operations eeprom_ops_e610 = { 3672 .read = ixgbe_read_ee_aci_e610, 3673 .read_buffer = ixgbe_read_ee_aci_buffer_e610, 3674 .validate_checksum = ixgbe_validate_eeprom_checksum_e610, 3675 .read_pba_string = ixgbe_read_pba_string_e610, 3676 .init_params = ixgbe_init_eeprom_params_e610, 3677 }; 3678 3679 const struct ixgbe_info ixgbe_e610_info = { 3680 .mac = ixgbe_mac_e610, 3681 .get_invariants = ixgbe_get_invariants_X540, 3682 .mac_ops = &mac_ops_e610, 3683 .eeprom_ops = &eeprom_ops_e610, 3684 .phy_ops = &phy_ops_e610, 3685 .mbx_ops = &mbx_ops_generic, 3686 .mvals = ixgbe_mvals_x550em_a, 3687 }; 3688