1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2024 Intel Corporation. */ 3 4 #include "ixgbe_common.h" 5 #include "ixgbe_e610.h" 6 #include "ixgbe_x550.h" 7 #include "ixgbe_type.h" 8 #include "ixgbe_x540.h" 9 #include "ixgbe_mbx.h" 10 #include "ixgbe_phy.h" 11 12 /** 13 * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should 14 * be resent 15 * @opcode: ACI opcode 16 * 17 * Check if ACI command should be sent again depending on the provided opcode. 18 * It may happen when CSR is busy during link state changes. 19 * 20 * Return: true if the sending command routine should be repeated, 21 * otherwise false. 22 */ 23 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode) 24 { 25 switch (opcode) { 26 case ixgbe_aci_opc_disable_rxen: 27 case ixgbe_aci_opc_get_phy_caps: 28 case ixgbe_aci_opc_get_link_status: 29 case ixgbe_aci_opc_get_link_topo: 30 return true; 31 } 32 33 return false; 34 } 35 36 /** 37 * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin 38 * Command Interface 39 * @hw: pointer to the HW struct 40 * @desc: descriptor describing the command 41 * @buf: buffer to use for indirect commands (NULL for direct commands) 42 * @buf_size: size of buffer for indirect commands (0 for direct commands) 43 * 44 * Admin Command is sent using CSR by setting descriptor and buffer in specific 45 * registers. 46 * 47 * Return: the exit code of the operation. 48 * * - 0 - success. 49 * * - -EIO - CSR mechanism is not enabled. 50 * * - -EBUSY - CSR mechanism is busy. 51 * * - -EINVAL - buf_size is too big or 52 * invalid argument buf or buf_size. 53 * * - -ETIME - Admin Command X command timeout. 54 * * - -EIO - Admin Command X invalid state of HICR register or 55 * Admin Command failed because of bad opcode was returned or 56 * Admin Command failed with error Y. 57 */ 58 static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, 59 struct ixgbe_aci_desc *desc, 60 void *buf, u16 buf_size) 61 { 62 u16 opcode, buf_tail_size = buf_size % 4; 63 u32 *raw_desc = (u32 *)desc; 64 u32 hicr, i, buf_tail = 0; 65 bool valid_buf = false; 66 67 hw->aci.last_status = IXGBE_ACI_RC_OK; 68 69 /* It's necessary to check if mechanism is enabled */ 70 hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR); 71 72 if (!(hicr & IXGBE_PF_HICR_EN)) 73 return -EIO; 74 75 if (hicr & IXGBE_PF_HICR_C) { 76 hw->aci.last_status = IXGBE_ACI_RC_EBUSY; 77 return -EBUSY; 78 } 79 80 opcode = le16_to_cpu(desc->opcode); 81 82 if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) 83 return -EINVAL; 84 85 if (buf) 86 desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF); 87 88 if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) { 89 if ((buf && !buf_size) || 90 (!buf && buf_size)) 91 return -EINVAL; 92 if (buf && buf_size) 93 valid_buf = true; 94 } 95 96 if (valid_buf) { 97 if (buf_tail_size) 98 memcpy(&buf_tail, buf + buf_size - buf_tail_size, 99 buf_tail_size); 100 101 if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF) 102 desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB); 103 104 desc->datalen = cpu_to_le16(buf_size); 105 106 if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) { 107 for (i = 0; i < buf_size / 4; i++) 108 IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]); 109 if (buf_tail_size) 110 IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail); 111 } 112 } 113 114 /* Descriptor is written to specific registers */ 115 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) 116 IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]); 117 118 /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and 119 * PF_HICR_EV 120 */ 121 hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) & 122 ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV); 123 IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr); 124 125 #define MAX_SLEEP_RESP_US 1000 126 #define MAX_TMOUT_RESP_SYNC_US 100000000 127 128 /* Wait for sync Admin Command response */ 129 read_poll_timeout(IXGBE_READ_REG, hicr, 130 (hicr & IXGBE_PF_HICR_SV) || 131 !(hicr & IXGBE_PF_HICR_C), 132 MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw, 133 IXGBE_PF_HICR); 134 135 #define MAX_TMOUT_RESP_ASYNC_US 150000000 136 137 /* Wait for async Admin Command response */ 138 read_poll_timeout(IXGBE_READ_REG, hicr, 139 (hicr & IXGBE_PF_HICR_EV) || 140 !(hicr & IXGBE_PF_HICR_C), 141 MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw, 142 IXGBE_PF_HICR); 143 144 /* Read sync Admin Command response */ 145 if ((hicr & IXGBE_PF_HICR_SV)) { 146 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { 147 raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i)); 148 raw_desc[i] = raw_desc[i]; 149 } 150 } 151 152 /* Read async Admin Command response */ 153 if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) { 154 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) { 155 raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i)); 156 raw_desc[i] = raw_desc[i]; 157 } 158 } 159 160 /* Handle timeout and invalid state of HICR register */ 161 if (hicr & IXGBE_PF_HICR_C) 162 return -ETIME; 163 164 if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV)) 165 return -EIO; 166 167 /* For every command other than 0x0014 treat opcode mismatch 168 * as an error. Response to 0x0014 command read from HIDA_2 169 * is a descriptor of an event which is expected to contain 170 * different opcode than the command. 171 */ 172 if (desc->opcode != cpu_to_le16(opcode) && 173 opcode != ixgbe_aci_opc_get_fw_event) 174 return -EIO; 175 176 if (desc->retval) { 177 hw->aci.last_status = (enum ixgbe_aci_err) 178 le16_to_cpu(desc->retval); 179 return -EIO; 180 } 181 182 /* Write a response values to a buf */ 183 if (valid_buf) { 184 for (i = 0; i < buf_size / 4; i++) 185 ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); 186 if (buf_tail_size) { 187 buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i)); 188 memcpy(buf + buf_size - buf_tail_size, &buf_tail, 189 buf_tail_size); 190 } 191 } 192 193 return 0; 194 } 195 196 /** 197 * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface 198 * @hw: pointer to the HW struct 199 * @desc: descriptor describing the command 200 * @buf: buffer to use for indirect commands (NULL for direct commands) 201 * @buf_size: size of buffer for indirect commands (0 for direct commands) 202 * 203 * Helper function to send FW Admin Commands to the FW Admin Command Interface. 204 * 205 * Retry sending the FW Admin Command multiple times to the FW ACI 206 * if the EBUSY Admin Command error is returned. 207 * 208 * Return: the exit code of the operation. 209 */ 210 int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc, 211 void *buf, u16 buf_size) 212 { 213 u16 opcode = le16_to_cpu(desc->opcode); 214 struct ixgbe_aci_desc desc_cpy; 215 enum ixgbe_aci_err last_status; 216 u8 idx = 0, *buf_cpy = NULL; 217 bool is_cmd_for_retry; 218 unsigned long timeout; 219 int err; 220 221 is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode); 222 if (is_cmd_for_retry) { 223 if (buf) { 224 buf_cpy = kmalloc(buf_size, GFP_KERNEL); 225 if (!buf_cpy) 226 return -ENOMEM; 227 *buf_cpy = *(u8 *)buf; 228 } 229 desc_cpy = *desc; 230 } 231 232 timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS); 233 do { 234 mutex_lock(&hw->aci.lock); 235 err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size); 236 last_status = hw->aci.last_status; 237 mutex_unlock(&hw->aci.lock); 238 239 if (!is_cmd_for_retry || !err || 240 last_status != IXGBE_ACI_RC_EBUSY) 241 break; 242 243 if (buf) 244 memcpy(buf, buf_cpy, buf_size); 245 *desc = desc_cpy; 246 247 msleep(IXGBE_ACI_SEND_DELAY_TIME_MS); 248 } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE && 249 time_before(jiffies, timeout)); 250 251 kfree(buf_cpy); 252 253 return err; 254 } 255 256 /** 257 * ixgbe_aci_check_event_pending - check if there are any pending events 258 * @hw: pointer to the HW struct 259 * 260 * Determine if there are any pending events. 261 * 262 * Return: true if there are any currently pending events 263 * otherwise false. 264 */ 265 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw) 266 { 267 u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0; 268 u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS); 269 270 return (fwsts & ep_bit_mask) ? true : false; 271 } 272 273 /** 274 * ixgbe_aci_get_event - get an event from ACI 275 * @hw: pointer to the HW struct 276 * @e: event information structure 277 * @pending: optional flag signaling that there are more pending events 278 * 279 * Obtain an event from ACI and return its content 280 * through 'e' using ACI command (0x0014). 281 * Provide information if there are more events 282 * to retrieve through 'pending'. 283 * 284 * Return: the exit code of the operation. 285 */ 286 int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e, 287 bool *pending) 288 { 289 struct ixgbe_aci_desc desc; 290 int err; 291 292 if (!e || (!e->msg_buf && e->buf_len)) 293 return -EINVAL; 294 295 mutex_lock(&hw->aci.lock); 296 297 /* Check if there are any events pending */ 298 if (!ixgbe_aci_check_event_pending(hw)) { 299 err = -ENOENT; 300 goto aci_get_event_exit; 301 } 302 303 /* Obtain pending event */ 304 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event); 305 err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len); 306 if (err) 307 goto aci_get_event_exit; 308 309 /* Returned 0x0014 opcode indicates that no event was obtained */ 310 if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) { 311 err = -ENOENT; 312 goto aci_get_event_exit; 313 } 314 315 /* Determine size of event data */ 316 e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len); 317 /* Write event descriptor to event info structure */ 318 memcpy(&e->desc, &desc, sizeof(e->desc)); 319 320 /* Check if there are any further events pending */ 321 if (pending) 322 *pending = ixgbe_aci_check_event_pending(hw); 323 324 aci_get_event_exit: 325 mutex_unlock(&hw->aci.lock); 326 327 return err; 328 } 329 330 /** 331 * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values. 332 * @desc: pointer to the temp descriptor (non DMA mem) 333 * @opcode: the opcode can be used to decide which flags to turn off or on 334 * 335 * Helper function to fill the descriptor desc with default values 336 * and the provided opcode. 337 */ 338 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode) 339 { 340 /* Zero out the desc. */ 341 memset(desc, 0, sizeof(*desc)); 342 desc->opcode = cpu_to_le16(opcode); 343 desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI); 344 } 345 346 /** 347 * ixgbe_aci_get_fw_ver - Get the firmware version 348 * @hw: pointer to the HW struct 349 * 350 * Get the firmware version using ACI command (0x0001). 351 * 352 * Return: the exit code of the operation. 353 */ 354 static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw) 355 { 356 struct ixgbe_aci_cmd_get_ver *resp; 357 struct ixgbe_aci_desc desc; 358 int err; 359 360 resp = &desc.params.get_ver; 361 362 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver); 363 364 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 365 if (!err) { 366 hw->fw_branch = resp->fw_branch; 367 hw->fw_maj_ver = resp->fw_major; 368 hw->fw_min_ver = resp->fw_minor; 369 hw->fw_patch = resp->fw_patch; 370 hw->fw_build = le32_to_cpu(resp->fw_build); 371 hw->api_branch = resp->api_branch; 372 hw->api_maj_ver = resp->api_major; 373 hw->api_min_ver = resp->api_minor; 374 hw->api_patch = resp->api_patch; 375 } 376 377 return err; 378 } 379 380 /** 381 * ixgbe_aci_req_res - request a common resource 382 * @hw: pointer to the HW struct 383 * @res: resource ID 384 * @access: access type 385 * @sdp_number: resource number 386 * @timeout: the maximum time in ms that the driver may hold the resource 387 * 388 * Requests a common resource using the ACI command (0x0008). 389 * Specifies the maximum time the driver may hold the resource. 390 * If the requested resource is currently occupied by some other driver, 391 * a busy return value is returned and the timeout field value indicates the 392 * maximum time the current owner has to free it. 393 * 394 * Return: the exit code of the operation. 395 */ 396 static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, 397 enum ixgbe_aci_res_access_type access, 398 u8 sdp_number, u32 *timeout) 399 { 400 struct ixgbe_aci_cmd_req_res *cmd_resp; 401 struct ixgbe_aci_desc desc; 402 int err; 403 404 cmd_resp = &desc.params.res_owner; 405 406 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res); 407 408 cmd_resp->res_id = cpu_to_le16(res); 409 cmd_resp->access_type = cpu_to_le16(access); 410 cmd_resp->res_number = cpu_to_le32(sdp_number); 411 cmd_resp->timeout = cpu_to_le32(*timeout); 412 *timeout = 0; 413 414 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 415 416 /* If the resource is held by some other driver, the command completes 417 * with a busy return value and the timeout field indicates the maximum 418 * time the current owner of the resource has to free it. 419 */ 420 if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY) 421 *timeout = le32_to_cpu(cmd_resp->timeout); 422 423 return err; 424 } 425 426 /** 427 * ixgbe_aci_release_res - release a common resource using ACI 428 * @hw: pointer to the HW struct 429 * @res: resource ID 430 * @sdp_number: resource number 431 * 432 * Release a common resource using ACI command (0x0009). 433 * 434 * Return: the exit code of the operation. 435 */ 436 static int ixgbe_aci_release_res(struct ixgbe_hw *hw, 437 enum ixgbe_aci_res_ids res, u8 sdp_number) 438 { 439 struct ixgbe_aci_cmd_req_res *cmd; 440 struct ixgbe_aci_desc desc; 441 442 cmd = &desc.params.res_owner; 443 444 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res); 445 446 cmd->res_id = cpu_to_le16(res); 447 cmd->res_number = cpu_to_le32(sdp_number); 448 449 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 450 } 451 452 /** 453 * ixgbe_acquire_res - acquire the ownership of a resource 454 * @hw: pointer to the HW structure 455 * @res: resource ID 456 * @access: access type (read or write) 457 * @timeout: timeout in milliseconds 458 * 459 * Make an attempt to acquire the ownership of a resource using 460 * the ixgbe_aci_req_res to utilize ACI. 461 * In case if some other driver has previously acquired the resource and 462 * performed any necessary updates, the -EALREADY is returned, 463 * and the caller does not obtain the resource and has no further work to do. 464 * If needed, the function will poll until the current lock owner timeouts. 465 * 466 * Return: the exit code of the operation. 467 */ 468 int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res, 469 enum ixgbe_aci_res_access_type access, u32 timeout) 470 { 471 #define IXGBE_RES_POLLING_DELAY_MS 10 472 u32 delay = IXGBE_RES_POLLING_DELAY_MS; 473 u32 res_timeout = timeout; 474 u32 retry_timeout; 475 int err; 476 477 err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); 478 479 /* A return code of -EALREADY means that another driver has 480 * previously acquired the resource and performed any necessary updates; 481 * in this case the caller does not obtain the resource and has no 482 * further work to do. 483 */ 484 if (err == -EALREADY) 485 return err; 486 487 /* If necessary, poll until the current lock owner timeouts. 488 * Set retry_timeout to the timeout value reported by the FW in the 489 * response to the "Request Resource Ownership" (0x0008) Admin Command 490 * as it indicates the maximum time the current owner of the resource 491 * is allowed to hold it. 492 */ 493 retry_timeout = res_timeout; 494 while (err && retry_timeout && res_timeout) { 495 msleep(delay); 496 retry_timeout = (retry_timeout > delay) ? 497 retry_timeout - delay : 0; 498 err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout); 499 500 /* Success - lock acquired. 501 * -EALREADY - lock free, no work to do. 502 */ 503 if (!err || err == -EALREADY) 504 break; 505 } 506 507 return err; 508 } 509 510 /** 511 * ixgbe_release_res - release a common resource 512 * @hw: pointer to the HW structure 513 * @res: resource ID 514 * 515 * Release a common resource using ixgbe_aci_release_res. 516 */ 517 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res) 518 { 519 u32 total_delay = 0; 520 int err; 521 522 err = ixgbe_aci_release_res(hw, res, 0); 523 524 /* There are some rare cases when trying to release the resource 525 * results in an admin command timeout, so handle them correctly. 526 */ 527 while (err == -ETIME && 528 total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) { 529 usleep_range(1000, 1500); 530 err = ixgbe_aci_release_res(hw, res, 0); 531 total_delay++; 532 } 533 } 534 535 /** 536 * ixgbe_parse_e610_caps - Parse common device/function capabilities 537 * @hw: pointer to the HW struct 538 * @caps: pointer to common capabilities structure 539 * @elem: the capability element to parse 540 * @prefix: message prefix for tracing capabilities 541 * 542 * Given a capability element, extract relevant details into the common 543 * capability structure. 544 * 545 * Return: true if the capability matches one of the common capability ids, 546 * false otherwise. 547 */ 548 static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw, 549 struct ixgbe_hw_caps *caps, 550 struct ixgbe_aci_cmd_list_caps_elem *elem, 551 const char *prefix) 552 { 553 u32 logical_id = le32_to_cpu(elem->logical_id); 554 u32 phys_id = le32_to_cpu(elem->phys_id); 555 u32 number = le32_to_cpu(elem->number); 556 u16 cap = le16_to_cpu(elem->cap); 557 558 switch (cap) { 559 case IXGBE_ACI_CAPS_VALID_FUNCTIONS: 560 caps->valid_functions = number; 561 break; 562 case IXGBE_ACI_CAPS_SRIOV: 563 caps->sr_iov_1_1 = (number == 1); 564 break; 565 case IXGBE_ACI_CAPS_VMDQ: 566 caps->vmdq = (number == 1); 567 break; 568 case IXGBE_ACI_CAPS_DCB: 569 caps->dcb = (number == 1); 570 caps->active_tc_bitmap = logical_id; 571 caps->maxtc = phys_id; 572 break; 573 case IXGBE_ACI_CAPS_RSS: 574 caps->rss_table_size = number; 575 caps->rss_table_entry_width = logical_id; 576 break; 577 case IXGBE_ACI_CAPS_RXQS: 578 caps->num_rxq = number; 579 caps->rxq_first_id = phys_id; 580 break; 581 case IXGBE_ACI_CAPS_TXQS: 582 caps->num_txq = number; 583 caps->txq_first_id = phys_id; 584 break; 585 case IXGBE_ACI_CAPS_MSIX: 586 caps->num_msix_vectors = number; 587 caps->msix_vector_first_id = phys_id; 588 break; 589 case IXGBE_ACI_CAPS_NVM_VER: 590 break; 591 case IXGBE_ACI_CAPS_MAX_MTU: 592 caps->max_mtu = number; 593 break; 594 case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE: 595 caps->pcie_reset_avoidance = (number > 0); 596 break; 597 case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT: 598 caps->reset_restrict_support = (number == 1); 599 break; 600 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0: 601 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1: 602 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2: 603 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3: 604 { 605 u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0; 606 607 caps->ext_topo_dev_img_ver_high[index] = number; 608 caps->ext_topo_dev_img_ver_low[index] = logical_id; 609 caps->ext_topo_dev_img_part_num[index] = 610 FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id); 611 caps->ext_topo_dev_img_load_en[index] = 612 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0; 613 caps->ext_topo_dev_img_prog_en[index] = 614 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0; 615 break; 616 } 617 default: 618 /* Not one of the recognized common capabilities */ 619 return false; 620 } 621 622 return true; 623 } 624 625 /** 626 * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps 627 * @hw: pointer to the HW struct 628 * @dev_p: pointer to device capabilities structure 629 * @cap: capability element to parse 630 * 631 * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities. 632 */ 633 static void 634 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw, 635 struct ixgbe_hw_dev_caps *dev_p, 636 struct ixgbe_aci_cmd_list_caps_elem *cap) 637 { 638 dev_p->num_funcs = hweight32(le32_to_cpu(cap->number)); 639 } 640 641 /** 642 * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps 643 * @hw: pointer to the HW struct 644 * @dev_p: pointer to device capabilities structure 645 * @cap: capability element to parse 646 * 647 * Parse IXGBE_ACI_CAPS_VF for device capabilities. 648 */ 649 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw, 650 struct ixgbe_hw_dev_caps *dev_p, 651 struct ixgbe_aci_cmd_list_caps_elem *cap) 652 { 653 dev_p->num_vfs_exposed = le32_to_cpu(cap->number); 654 } 655 656 /** 657 * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps 658 * @hw: pointer to the HW struct 659 * @dev_p: pointer to device capabilities structure 660 * @cap: capability element to parse 661 * 662 * Parse IXGBE_ACI_CAPS_VSI for device capabilities. 663 */ 664 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw, 665 struct ixgbe_hw_dev_caps *dev_p, 666 struct ixgbe_aci_cmd_list_caps_elem *cap) 667 { 668 dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number); 669 } 670 671 /** 672 * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps 673 * @hw: pointer to the HW struct 674 * @dev_p: pointer to device capabilities structure 675 * @cap: capability element to parse 676 * 677 * Parse IXGBE_ACI_CAPS_FD for device capabilities. 678 */ 679 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw, 680 struct ixgbe_hw_dev_caps *dev_p, 681 struct ixgbe_aci_cmd_list_caps_elem *cap) 682 { 683 dev_p->num_flow_director_fltr = le32_to_cpu(cap->number); 684 } 685 686 /** 687 * ixgbe_parse_dev_caps - Parse device capabilities 688 * @hw: pointer to the HW struct 689 * @dev_p: pointer to device capabilities structure 690 * @buf: buffer containing the device capability records 691 * @cap_count: the number of capabilities 692 * 693 * Helper device to parse device (0x000B) capabilities list. For 694 * capabilities shared between device and function, this relies on 695 * ixgbe_parse_e610_caps. 696 * 697 * Loop through the list of provided capabilities and extract the relevant 698 * data into the device capabilities structured. 699 */ 700 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw, 701 struct ixgbe_hw_dev_caps *dev_p, 702 void *buf, u32 cap_count) 703 { 704 struct ixgbe_aci_cmd_list_caps_elem *cap_resp; 705 u32 i; 706 707 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; 708 709 memset(dev_p, 0, sizeof(*dev_p)); 710 711 for (i = 0; i < cap_count; i++) { 712 u16 cap = le16_to_cpu(cap_resp[i].cap); 713 714 ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i], 715 "dev caps"); 716 717 switch (cap) { 718 case IXGBE_ACI_CAPS_VALID_FUNCTIONS: 719 ixgbe_parse_valid_functions_cap(hw, dev_p, 720 &cap_resp[i]); 721 break; 722 case IXGBE_ACI_CAPS_VF: 723 ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 724 break; 725 case IXGBE_ACI_CAPS_VSI: 726 ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 727 break; 728 case IXGBE_ACI_CAPS_FD: 729 ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 730 break; 731 default: 732 /* Don't list common capabilities as unknown */ 733 break; 734 } 735 } 736 } 737 738 /** 739 * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps 740 * @hw: pointer to the HW struct 741 * @func_p: pointer to function capabilities structure 742 * @cap: pointer to the capability element to parse 743 * 744 * Extract function capabilities for IXGBE_ACI_CAPS_VF. 745 */ 746 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw, 747 struct ixgbe_hw_func_caps *func_p, 748 struct ixgbe_aci_cmd_list_caps_elem *cap) 749 { 750 func_p->num_allocd_vfs = le32_to_cpu(cap->number); 751 func_p->vf_base_id = le32_to_cpu(cap->logical_id); 752 } 753 754 /** 755 * ixgbe_get_num_per_func - determine number of resources per PF 756 * @hw: pointer to the HW structure 757 * @max: value to be evenly split between each PF 758 * 759 * Determine the number of valid functions by going through the bitmap returned 760 * from parsing capabilities and use this to calculate the number of resources 761 * per PF based on the max value passed in. 762 * 763 * Return: the number of resources per PF or 0, if no PH are available. 764 */ 765 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max) 766 { 767 #define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0) 768 u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 769 IXGBE_CAPS_VALID_FUNCS_M); 770 771 return funcs ? (max / funcs) : 0; 772 } 773 774 /** 775 * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps 776 * @hw: pointer to the HW struct 777 * @func_p: pointer to function capabilities structure 778 * @cap: pointer to the capability element to parse 779 * 780 * Extract function capabilities for IXGBE_ACI_CAPS_VSI. 781 */ 782 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw, 783 struct ixgbe_hw_func_caps *func_p, 784 struct ixgbe_aci_cmd_list_caps_elem *cap) 785 { 786 func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI); 787 } 788 789 /** 790 * ixgbe_parse_func_caps - Parse function capabilities 791 * @hw: pointer to the HW struct 792 * @func_p: pointer to function capabilities structure 793 * @buf: buffer containing the function capability records 794 * @cap_count: the number of capabilities 795 * 796 * Helper function to parse function (0x000A) capabilities list. For 797 * capabilities shared between device and function, this relies on 798 * ixgbe_parse_e610_caps. 799 * 800 * Loop through the list of provided capabilities and extract the relevant 801 * data into the function capabilities structured. 802 */ 803 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw, 804 struct ixgbe_hw_func_caps *func_p, 805 void *buf, u32 cap_count) 806 { 807 struct ixgbe_aci_cmd_list_caps_elem *cap_resp; 808 u32 i; 809 810 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf; 811 812 memset(func_p, 0, sizeof(*func_p)); 813 814 for (i = 0; i < cap_count; i++) { 815 u16 cap = le16_to_cpu(cap_resp[i].cap); 816 817 ixgbe_parse_e610_caps(hw, &func_p->common_cap, 818 &cap_resp[i], "func caps"); 819 820 switch (cap) { 821 case IXGBE_ACI_CAPS_VF: 822 ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 823 break; 824 case IXGBE_ACI_CAPS_VSI: 825 ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 826 break; 827 default: 828 /* Don't list common capabilities as unknown */ 829 break; 830 } 831 } 832 } 833 834 /** 835 * ixgbe_aci_list_caps - query function/device capabilities 836 * @hw: pointer to the HW struct 837 * @buf: a buffer to hold the capabilities 838 * @buf_size: size of the buffer 839 * @cap_count: if not NULL, set to the number of capabilities reported 840 * @opc: capabilities type to discover, device or function 841 * 842 * Get the function (0x000A) or device (0x000B) capabilities description from 843 * firmware and store it in the buffer. 844 * 845 * If the cap_count pointer is not NULL, then it is set to the number of 846 * capabilities firmware will report. Note that if the buffer size is too 847 * small, it is possible the command will return -ENOMEM. The 848 * cap_count will still be updated in this case. It is recommended that the 849 * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible 850 * buffer that firmware could return) to avoid this. 851 * 852 * Return: the exit code of the operation. 853 * Exit code of -ENOMEM means the buffer size is too small. 854 */ 855 int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size, 856 u32 *cap_count, enum ixgbe_aci_opc opc) 857 { 858 struct ixgbe_aci_cmd_list_caps *cmd; 859 struct ixgbe_aci_desc desc; 860 int err; 861 862 cmd = &desc.params.get_cap; 863 864 if (opc != ixgbe_aci_opc_list_func_caps && 865 opc != ixgbe_aci_opc_list_dev_caps) 866 return -EINVAL; 867 868 ixgbe_fill_dflt_direct_cmd_desc(&desc, opc); 869 err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size); 870 871 if (cap_count) 872 *cap_count = le32_to_cpu(cmd->count); 873 874 return err; 875 } 876 877 /** 878 * ixgbe_discover_dev_caps - Read and extract device capabilities 879 * @hw: pointer to the hardware structure 880 * @dev_caps: pointer to device capabilities structure 881 * 882 * Read the device capabilities and extract them into the dev_caps structure 883 * for later use. 884 * 885 * Return: the exit code of the operation. 886 */ 887 int ixgbe_discover_dev_caps(struct ixgbe_hw *hw, 888 struct ixgbe_hw_dev_caps *dev_caps) 889 { 890 u32 cap_count; 891 u8 *cbuf; 892 int err; 893 894 cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); 895 if (!cbuf) 896 return -ENOMEM; 897 898 /* Although the driver doesn't know the number of capabilities the 899 * device will return, we can simply send a 4KB buffer, the maximum 900 * possible size that firmware can return. 901 */ 902 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / 903 sizeof(struct ixgbe_aci_cmd_list_caps_elem); 904 905 err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, 906 &cap_count, 907 ixgbe_aci_opc_list_dev_caps); 908 if (!err) 909 ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 910 911 kfree(cbuf); 912 913 return 0; 914 } 915 916 /** 917 * ixgbe_discover_func_caps - Read and extract function capabilities 918 * @hw: pointer to the hardware structure 919 * @func_caps: pointer to function capabilities structure 920 * 921 * Read the function capabilities and extract them into the func_caps structure 922 * for later use. 923 * 924 * Return: the exit code of the operation. 925 */ 926 int ixgbe_discover_func_caps(struct ixgbe_hw *hw, 927 struct ixgbe_hw_func_caps *func_caps) 928 { 929 u32 cap_count; 930 u8 *cbuf; 931 int err; 932 933 cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); 934 if (!cbuf) 935 return -ENOMEM; 936 937 /* Although the driver doesn't know the number of capabilities the 938 * device will return, we can simply send a 4KB buffer, the maximum 939 * possible size that firmware can return. 940 */ 941 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE / 942 sizeof(struct ixgbe_aci_cmd_list_caps_elem); 943 944 err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE, 945 &cap_count, 946 ixgbe_aci_opc_list_func_caps); 947 if (!err) 948 ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count); 949 950 kfree(cbuf); 951 952 return 0; 953 } 954 955 /** 956 * ixgbe_get_caps - get info about the HW 957 * @hw: pointer to the hardware structure 958 * 959 * Retrieve both device and function capabilities. 960 * 961 * Return: the exit code of the operation. 962 */ 963 int ixgbe_get_caps(struct ixgbe_hw *hw) 964 { 965 int err; 966 967 err = ixgbe_discover_dev_caps(hw, &hw->dev_caps); 968 if (err) 969 return err; 970 971 return ixgbe_discover_func_caps(hw, &hw->func_caps); 972 } 973 974 /** 975 * ixgbe_aci_disable_rxen - disable RX 976 * @hw: pointer to the HW struct 977 * 978 * Request a safe disable of Receive Enable using ACI command (0x000C). 979 * 980 * Return: the exit code of the operation. 981 */ 982 int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw) 983 { 984 struct ixgbe_aci_cmd_disable_rxen *cmd; 985 struct ixgbe_aci_desc desc; 986 987 cmd = &desc.params.disable_rxen; 988 989 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen); 990 991 cmd->lport_num = hw->bus.func; 992 993 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 994 } 995 996 /** 997 * ixgbe_aci_get_phy_caps - returns PHY capabilities 998 * @hw: pointer to the HW struct 999 * @qual_mods: report qualified modules 1000 * @report_mode: report mode capabilities 1001 * @pcaps: structure for PHY capabilities to be filled 1002 * 1003 * Returns the various PHY capabilities supported on the Port 1004 * using ACI command (0x0600). 1005 * 1006 * Return: the exit code of the operation. 1007 */ 1008 int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode, 1009 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps) 1010 { 1011 struct ixgbe_aci_cmd_get_phy_caps *cmd; 1012 u16 pcaps_size = sizeof(*pcaps); 1013 struct ixgbe_aci_desc desc; 1014 int err; 1015 1016 cmd = &desc.params.get_phy; 1017 1018 if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M)) 1019 return -EINVAL; 1020 1021 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps); 1022 1023 if (qual_mods) 1024 cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM); 1025 1026 cmd->param0 |= cpu_to_le16(report_mode); 1027 err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size); 1028 if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) { 1029 hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 1030 hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 1031 memcpy(hw->link.link_info.module_type, &pcaps->module_type, 1032 sizeof(hw->link.link_info.module_type)); 1033 } 1034 1035 return err; 1036 } 1037 1038 /** 1039 * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 1040 * @caps: PHY ability structure to copy data from 1041 * @cfg: PHY configuration structure to copy data to 1042 * 1043 * Helper function to copy data from PHY capabilities data structure 1044 * to PHY configuration data structure 1045 */ 1046 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps, 1047 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) 1048 { 1049 if (!caps || !cfg) 1050 return; 1051 1052 memset(cfg, 0, sizeof(*cfg)); 1053 cfg->phy_type_low = caps->phy_type_low; 1054 cfg->phy_type_high = caps->phy_type_high; 1055 cfg->caps = caps->caps; 1056 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 1057 cfg->eee_cap = caps->eee_cap; 1058 cfg->eeer_value = caps->eeer_value; 1059 cfg->link_fec_opt = caps->link_fec_options; 1060 cfg->module_compliance_enforcement = 1061 caps->module_compliance_enforcement; 1062 } 1063 1064 /** 1065 * ixgbe_aci_set_phy_cfg - set PHY configuration 1066 * @hw: pointer to the HW struct 1067 * @cfg: structure with PHY configuration data to be set 1068 * 1069 * Set the various PHY configuration parameters supported on the Port 1070 * using ACI command (0x0601). 1071 * One or more of the Set PHY config parameters may be ignored in an MFP 1072 * mode as the PF may not have the privilege to set some of the PHY Config 1073 * parameters. 1074 * 1075 * Return: the exit code of the operation. 1076 */ 1077 int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw, 1078 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg) 1079 { 1080 struct ixgbe_aci_desc desc; 1081 int err; 1082 1083 if (!cfg) 1084 return -EINVAL; 1085 1086 /* Ensure that only valid bits of cfg->caps can be turned on. */ 1087 cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK; 1088 1089 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg); 1090 desc.params.set_phy.lport_num = hw->bus.func; 1091 desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD); 1092 1093 err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg)); 1094 if (!err) 1095 hw->phy.curr_user_phy_cfg = *cfg; 1096 1097 return err; 1098 } 1099 1100 /** 1101 * ixgbe_aci_set_link_restart_an - set up link and restart AN 1102 * @hw: pointer to the HW struct 1103 * @ena_link: if true: enable link, if false: disable link 1104 * 1105 * Function sets up the link and restarts the Auto-Negotiation over the link. 1106 * 1107 * Return: the exit code of the operation. 1108 */ 1109 int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link) 1110 { 1111 struct ixgbe_aci_cmd_restart_an *cmd; 1112 struct ixgbe_aci_desc desc; 1113 1114 cmd = &desc.params.restart_an; 1115 1116 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an); 1117 1118 cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART; 1119 cmd->lport_num = hw->bus.func; 1120 if (ena_link) 1121 cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE; 1122 else 1123 cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE; 1124 1125 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 1126 } 1127 1128 /** 1129 * ixgbe_is_media_cage_present - check if media cage is present 1130 * @hw: pointer to the HW struct 1131 * 1132 * Identify presence of media cage using the ACI command (0x06E0). 1133 * 1134 * Return: true if media cage is present, else false. If no cage, then 1135 * media type is backplane or BASE-T. 1136 */ 1137 static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw) 1138 { 1139 struct ixgbe_aci_cmd_get_link_topo *cmd; 1140 struct ixgbe_aci_desc desc; 1141 1142 cmd = &desc.params.get_link_topo; 1143 1144 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); 1145 1146 cmd->addr.topo_params.node_type_ctx = 1147 FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M, 1148 IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT); 1149 1150 /* Set node type. */ 1151 cmd->addr.topo_params.node_type_ctx |= 1152 FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M, 1153 IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE); 1154 1155 /* Node type cage can be used to determine if cage is present. If AQC 1156 * returns error (ENOENT), then no cage present. If no cage present then 1157 * connection type is backplane or BASE-T. 1158 */ 1159 return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL); 1160 } 1161 1162 /** 1163 * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type 1164 * @hw: pointer to the HW struct 1165 * 1166 * Try to identify the media type based on the phy type. 1167 * If more than one media type, the ixgbe_media_type_unknown is returned. 1168 * First, phy_type_low is checked, then phy_type_high. 1169 * If none are identified, the ixgbe_media_type_unknown is returned 1170 * 1171 * Return: type of a media based on phy type in form of enum. 1172 */ 1173 static enum ixgbe_media_type 1174 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw) 1175 { 1176 struct ixgbe_link_status *hw_link_info; 1177 1178 if (!hw) 1179 return ixgbe_media_type_unknown; 1180 1181 hw_link_info = &hw->link.link_info; 1182 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 1183 /* If more than one media type is selected, report unknown */ 1184 return ixgbe_media_type_unknown; 1185 1186 if (hw_link_info->phy_type_low) { 1187 /* 1G SGMII is a special case where some DA cable PHYs 1188 * may show this as an option when it really shouldn't 1189 * be since SGMII is meant to be between a MAC and a PHY 1190 * in a backplane. Try to detect this case and handle it 1191 */ 1192 if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII && 1193 (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == 1194 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 1195 hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] == 1196 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 1197 return ixgbe_media_type_da; 1198 1199 switch (hw_link_info->phy_type_low) { 1200 case IXGBE_PHY_TYPE_LOW_1000BASE_SX: 1201 case IXGBE_PHY_TYPE_LOW_1000BASE_LX: 1202 case IXGBE_PHY_TYPE_LOW_10GBASE_SR: 1203 case IXGBE_PHY_TYPE_LOW_10GBASE_LR: 1204 case IXGBE_PHY_TYPE_LOW_25GBASE_SR: 1205 case IXGBE_PHY_TYPE_LOW_25GBASE_LR: 1206 return ixgbe_media_type_fiber; 1207 case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 1208 case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 1209 return ixgbe_media_type_fiber; 1210 case IXGBE_PHY_TYPE_LOW_100BASE_TX: 1211 case IXGBE_PHY_TYPE_LOW_1000BASE_T: 1212 case IXGBE_PHY_TYPE_LOW_2500BASE_T: 1213 case IXGBE_PHY_TYPE_LOW_5GBASE_T: 1214 case IXGBE_PHY_TYPE_LOW_10GBASE_T: 1215 case IXGBE_PHY_TYPE_LOW_25GBASE_T: 1216 return ixgbe_media_type_copper; 1217 case IXGBE_PHY_TYPE_LOW_10G_SFI_DA: 1218 case IXGBE_PHY_TYPE_LOW_25GBASE_CR: 1219 case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S: 1220 case IXGBE_PHY_TYPE_LOW_25GBASE_CR1: 1221 return ixgbe_media_type_da; 1222 case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C: 1223 if (ixgbe_is_media_cage_present(hw)) 1224 return ixgbe_media_type_aui; 1225 fallthrough; 1226 case IXGBE_PHY_TYPE_LOW_1000BASE_KX: 1227 case IXGBE_PHY_TYPE_LOW_2500BASE_KX: 1228 case IXGBE_PHY_TYPE_LOW_2500BASE_X: 1229 case IXGBE_PHY_TYPE_LOW_5GBASE_KR: 1230 case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1: 1231 case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C: 1232 case IXGBE_PHY_TYPE_LOW_25GBASE_KR: 1233 case IXGBE_PHY_TYPE_LOW_25GBASE_KR1: 1234 case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S: 1235 return ixgbe_media_type_backplane; 1236 } 1237 } else { 1238 switch (hw_link_info->phy_type_high) { 1239 case IXGBE_PHY_TYPE_HIGH_10BASE_T: 1240 return ixgbe_media_type_copper; 1241 } 1242 } 1243 return ixgbe_media_type_unknown; 1244 } 1245 1246 /** 1247 * ixgbe_update_link_info - update status of the HW network link 1248 * @hw: pointer to the HW struct 1249 * 1250 * Update the status of the HW network link. 1251 * 1252 * Return: the exit code of the operation. 1253 */ 1254 int ixgbe_update_link_info(struct ixgbe_hw *hw) 1255 { 1256 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps; 1257 struct ixgbe_link_status *li; 1258 int err; 1259 1260 if (!hw) 1261 return -EINVAL; 1262 1263 li = &hw->link.link_info; 1264 1265 err = ixgbe_aci_get_link_info(hw, true, NULL); 1266 if (err) 1267 return err; 1268 1269 if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE)) 1270 return 0; 1271 1272 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1273 if (!pcaps) 1274 return -ENOMEM; 1275 1276 err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, 1277 pcaps); 1278 1279 if (!err) 1280 memcpy(li->module_type, &pcaps->module_type, 1281 sizeof(li->module_type)); 1282 1283 kfree(pcaps); 1284 1285 return err; 1286 } 1287 1288 /** 1289 * ixgbe_get_link_status - get status of the HW network link 1290 * @hw: pointer to the HW struct 1291 * @link_up: pointer to bool (true/false = linkup/linkdown) 1292 * 1293 * Variable link_up is true if link is up, false if link is down. 1294 * The variable link_up is invalid if status is non zero. As a 1295 * result of this call, link status reporting becomes enabled 1296 * 1297 * Return: the exit code of the operation. 1298 */ 1299 int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up) 1300 { 1301 if (!hw || !link_up) 1302 return -EINVAL; 1303 1304 if (hw->link.get_link_info) { 1305 int err = ixgbe_update_link_info(hw); 1306 1307 if (err) 1308 return err; 1309 } 1310 1311 *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP; 1312 1313 return 0; 1314 } 1315 1316 /** 1317 * ixgbe_aci_get_link_info - get the link status 1318 * @hw: pointer to the HW struct 1319 * @ena_lse: enable/disable LinkStatusEvent reporting 1320 * @link: pointer to link status structure - optional 1321 * 1322 * Get the current Link Status using ACI command (0x607). 1323 * The current link can be optionally provided to update 1324 * the status. 1325 * 1326 * Return: the link status of the adapter. 1327 */ 1328 int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse, 1329 struct ixgbe_link_status *link) 1330 { 1331 struct ixgbe_aci_cmd_get_link_status_data link_data = {}; 1332 struct ixgbe_aci_cmd_get_link_status *resp; 1333 struct ixgbe_link_status *li_old, *li; 1334 struct ixgbe_fc_info *hw_fc_info; 1335 struct ixgbe_aci_desc desc; 1336 bool tx_pause, rx_pause; 1337 u8 cmd_flags; 1338 int err; 1339 1340 if (!hw) 1341 return -EINVAL; 1342 1343 li_old = &hw->link.link_info_old; 1344 li = &hw->link.link_info; 1345 hw_fc_info = &hw->fc; 1346 1347 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status); 1348 cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS; 1349 resp = &desc.params.get_link_status; 1350 resp->cmd_flags = cpu_to_le16(cmd_flags); 1351 resp->lport_num = hw->bus.func; 1352 1353 err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data)); 1354 if (err) 1355 return err; 1356 1357 /* Save off old link status information. */ 1358 *li_old = *li; 1359 1360 /* Update current link status information. */ 1361 li->link_speed = le16_to_cpu(link_data.link_speed); 1362 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 1363 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 1364 li->link_info = link_data.link_info; 1365 li->link_cfg_err = link_data.link_cfg_err; 1366 li->an_info = link_data.an_info; 1367 li->ext_info = link_data.ext_info; 1368 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 1369 li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK; 1370 li->topo_media_conflict = link_data.topo_media_conflict; 1371 li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M | 1372 IXGBE_ACI_CFG_PACING_TYPE_M); 1373 1374 /* Update fc info. */ 1375 tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX); 1376 rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX); 1377 if (tx_pause && rx_pause) 1378 hw_fc_info->current_mode = ixgbe_fc_full; 1379 else if (tx_pause) 1380 hw_fc_info->current_mode = ixgbe_fc_tx_pause; 1381 else if (rx_pause) 1382 hw_fc_info->current_mode = ixgbe_fc_rx_pause; 1383 else 1384 hw_fc_info->current_mode = ixgbe_fc_none; 1385 1386 li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) & 1387 IXGBE_ACI_LSE_IS_ENABLED); 1388 1389 /* Save link status information. */ 1390 if (link) 1391 *link = *li; 1392 1393 /* Flag cleared so calling functions don't call AQ again. */ 1394 hw->link.get_link_info = false; 1395 1396 return 0; 1397 } 1398 1399 /** 1400 * ixgbe_aci_set_event_mask - set event mask 1401 * @hw: pointer to the HW struct 1402 * @port_num: port number of the physical function 1403 * @mask: event mask to be set 1404 * 1405 * Set the event mask using ACI command (0x0613). 1406 * 1407 * Return: the exit code of the operation. 1408 */ 1409 int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask) 1410 { 1411 struct ixgbe_aci_cmd_set_event_mask *cmd; 1412 struct ixgbe_aci_desc desc; 1413 1414 cmd = &desc.params.set_event_mask; 1415 1416 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask); 1417 1418 cmd->lport_num = port_num; 1419 1420 cmd->event_mask = cpu_to_le16(mask); 1421 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 1422 } 1423 1424 /** 1425 * ixgbe_configure_lse - enable/disable link status events 1426 * @hw: pointer to the HW struct 1427 * @activate: true for enable lse, false otherwise 1428 * @mask: event mask to be set; a set bit means deactivation of the 1429 * corresponding event 1430 * 1431 * Set the event mask and then enable or disable link status events 1432 * 1433 * Return: the exit code of the operation. 1434 */ 1435 int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask) 1436 { 1437 int err; 1438 1439 err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask); 1440 if (err) 1441 return err; 1442 1443 /* Enabling link status events generation by fw. */ 1444 return ixgbe_aci_get_link_info(hw, activate, NULL); 1445 } 1446 1447 /** 1448 * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx 1449 * @hw: pointer to hardware structure 1450 * 1451 * Get firmware version and start the hardware using the generic 1452 * start_hw() and ixgbe_start_hw_gen2() functions. 1453 * 1454 * Return: the exit code of the operation. 1455 */ 1456 static int ixgbe_start_hw_e610(struct ixgbe_hw *hw) 1457 { 1458 int err; 1459 1460 err = ixgbe_aci_get_fw_ver(hw); 1461 if (err) 1462 return err; 1463 1464 err = ixgbe_start_hw_generic(hw); 1465 if (err) 1466 return err; 1467 1468 ixgbe_start_hw_gen2(hw); 1469 1470 return 0; 1471 } 1472 1473 /** 1474 * ixgbe_get_media_type_e610 - Gets media type 1475 * @hw: pointer to the HW struct 1476 * 1477 * In order to get the media type, the function gets PHY 1478 * capabilities and later on use them to identify the PHY type 1479 * checking phy_type_high and phy_type_low. 1480 * 1481 * Return: the type of media in form of ixgbe_media_type enum 1482 * or ixgbe_media_type_unknown in case of an error. 1483 */ 1484 enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw) 1485 { 1486 struct ixgbe_aci_cmd_get_phy_caps_data pcaps; 1487 int rc; 1488 1489 rc = ixgbe_update_link_info(hw); 1490 if (rc) 1491 return ixgbe_media_type_unknown; 1492 1493 /* If there is no link but PHY (dongle) is available SW should use 1494 * Get PHY Caps admin command instead of Get Link Status, find most 1495 * significant bit that is set in PHY types reported by the command 1496 * and use it to discover media type. 1497 */ 1498 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) && 1499 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) { 1500 int highest_bit; 1501 1502 /* Get PHY Capabilities */ 1503 rc = ixgbe_aci_get_phy_caps(hw, false, 1504 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, 1505 &pcaps); 1506 if (rc) 1507 return ixgbe_media_type_unknown; 1508 1509 highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high)); 1510 if (highest_bit) { 1511 hw->link.link_info.phy_type_high = 1512 BIT_ULL(highest_bit - 1); 1513 hw->link.link_info.phy_type_low = 0; 1514 } else { 1515 highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low)); 1516 if (highest_bit) { 1517 hw->link.link_info.phy_type_low = 1518 BIT_ULL(highest_bit - 1); 1519 hw->link.link_info.phy_type_high = 0; 1520 } 1521 } 1522 } 1523 1524 /* Based on link status or search above try to discover media type. */ 1525 hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw); 1526 1527 return hw->phy.media_type; 1528 } 1529 1530 /** 1531 * ixgbe_setup_link_e610 - Set up link 1532 * @hw: pointer to hardware structure 1533 * @speed: new link speed 1534 * @autoneg_wait: true when waiting for completion is needed 1535 * 1536 * Set up the link with the specified speed. 1537 * 1538 * Return: the exit code of the operation. 1539 */ 1540 int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed, 1541 bool autoneg_wait) 1542 { 1543 /* Simply request FW to perform proper PHY setup */ 1544 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); 1545 } 1546 1547 /** 1548 * ixgbe_check_link_e610 - Determine link and speed status 1549 * @hw: pointer to hardware structure 1550 * @speed: pointer to link speed 1551 * @link_up: true when link is up 1552 * @link_up_wait_to_complete: bool used to wait for link up or not 1553 * 1554 * Determine if the link is up and the current link speed 1555 * using ACI command (0x0607). 1556 * 1557 * Return: the exit code of the operation. 1558 */ 1559 int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 1560 bool *link_up, bool link_up_wait_to_complete) 1561 { 1562 int err; 1563 u32 i; 1564 1565 if (!speed || !link_up) 1566 return -EINVAL; 1567 1568 /* Set get_link_info flag to ensure that fresh 1569 * link information will be obtained from FW 1570 * by sending Get Link Status admin command. 1571 */ 1572 hw->link.get_link_info = true; 1573 1574 /* Update link information in adapter context. */ 1575 err = ixgbe_get_link_status(hw, link_up); 1576 if (err) 1577 return err; 1578 1579 /* Wait for link up if it was requested. */ 1580 if (link_up_wait_to_complete && !(*link_up)) { 1581 for (i = 0; i < hw->mac.max_link_up_time; i++) { 1582 msleep(100); 1583 hw->link.get_link_info = true; 1584 err = ixgbe_get_link_status(hw, link_up); 1585 if (err) 1586 return err; 1587 if (*link_up) 1588 break; 1589 } 1590 } 1591 1592 /* Use link information in adapter context updated by the call 1593 * to ixgbe_get_link_status() to determine current link speed. 1594 * Link speed information is valid only when link up was 1595 * reported by FW. 1596 */ 1597 if (*link_up) { 1598 switch (hw->link.link_info.link_speed) { 1599 case IXGBE_ACI_LINK_SPEED_10MB: 1600 *speed = IXGBE_LINK_SPEED_10_FULL; 1601 break; 1602 case IXGBE_ACI_LINK_SPEED_100MB: 1603 *speed = IXGBE_LINK_SPEED_100_FULL; 1604 break; 1605 case IXGBE_ACI_LINK_SPEED_1000MB: 1606 *speed = IXGBE_LINK_SPEED_1GB_FULL; 1607 break; 1608 case IXGBE_ACI_LINK_SPEED_2500MB: 1609 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 1610 break; 1611 case IXGBE_ACI_LINK_SPEED_5GB: 1612 *speed = IXGBE_LINK_SPEED_5GB_FULL; 1613 break; 1614 case IXGBE_ACI_LINK_SPEED_10GB: 1615 *speed = IXGBE_LINK_SPEED_10GB_FULL; 1616 break; 1617 default: 1618 *speed = IXGBE_LINK_SPEED_UNKNOWN; 1619 break; 1620 } 1621 } else { 1622 *speed = IXGBE_LINK_SPEED_UNKNOWN; 1623 } 1624 1625 return 0; 1626 } 1627 1628 /** 1629 * ixgbe_get_link_capabilities_e610 - Determine link capabilities 1630 * @hw: pointer to hardware structure 1631 * @speed: pointer to link speed 1632 * @autoneg: true when autoneg or autotry is enabled 1633 * 1634 * Determine speed and AN parameters of a link. 1635 * 1636 * Return: the exit code of the operation. 1637 */ 1638 int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw, 1639 ixgbe_link_speed *speed, 1640 bool *autoneg) 1641 { 1642 if (!speed || !autoneg) 1643 return -EINVAL; 1644 1645 *autoneg = true; 1646 *speed = hw->phy.speeds_supported; 1647 1648 return 0; 1649 } 1650 1651 /** 1652 * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode 1653 * @hw: pointer to hardware structure 1654 * @cfg: PHY configuration data to set FC mode 1655 * @req_mode: FC mode to configure 1656 * 1657 * Configures PHY Flow Control according to the provided configuration. 1658 * 1659 * Return: the exit code of the operation. 1660 */ 1661 int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw, 1662 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg, 1663 enum ixgbe_fc_mode req_mode) 1664 { 1665 u8 pause_mask = 0x0; 1666 1667 if (!cfg) 1668 return -EINVAL; 1669 1670 switch (req_mode) { 1671 case ixgbe_fc_full: 1672 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; 1673 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; 1674 break; 1675 case ixgbe_fc_rx_pause: 1676 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE; 1677 break; 1678 case ixgbe_fc_tx_pause: 1679 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE; 1680 break; 1681 default: 1682 break; 1683 } 1684 1685 /* Clear the old pause settings. */ 1686 cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE | 1687 IXGBE_ACI_PHY_EN_RX_LINK_PAUSE); 1688 1689 /* Set the new capabilities. */ 1690 cfg->caps |= pause_mask; 1691 1692 return 0; 1693 } 1694 1695 /** 1696 * ixgbe_setup_fc_e610 - Set up flow control 1697 * @hw: pointer to hardware structure 1698 * 1699 * Set up flow control. This has to be done during init time. 1700 * 1701 * Return: the exit code of the operation. 1702 */ 1703 int ixgbe_setup_fc_e610(struct ixgbe_hw *hw) 1704 { 1705 struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {}; 1706 struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {}; 1707 int err; 1708 1709 /* Get the current PHY config */ 1710 err = ixgbe_aci_get_phy_caps(hw, false, 1711 IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps); 1712 if (err) 1713 return err; 1714 1715 ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg); 1716 1717 /* Configure the set PHY data */ 1718 err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode); 1719 if (err) 1720 return err; 1721 1722 /* If the capabilities have changed, then set the new config */ 1723 if (cfg.caps != pcaps.caps) { 1724 cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; 1725 1726 err = ixgbe_aci_set_phy_cfg(hw, &cfg); 1727 if (err) 1728 return err; 1729 } 1730 1731 return err; 1732 } 1733 1734 /** 1735 * ixgbe_fc_autoneg_e610 - Configure flow control 1736 * @hw: pointer to hardware structure 1737 * 1738 * Configure Flow Control. 1739 */ 1740 void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw) 1741 { 1742 int err; 1743 1744 /* Get current link err. 1745 * Current FC mode will be stored in the hw context. 1746 */ 1747 err = ixgbe_aci_get_link_info(hw, false, NULL); 1748 if (err) 1749 goto no_autoneg; 1750 1751 /* Check if the link is up */ 1752 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) 1753 goto no_autoneg; 1754 1755 /* Check if auto-negotiation has completed */ 1756 if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) 1757 goto no_autoneg; 1758 1759 hw->fc.fc_was_autonegged = true; 1760 return; 1761 1762 no_autoneg: 1763 hw->fc.fc_was_autonegged = false; 1764 hw->fc.current_mode = hw->fc.requested_mode; 1765 } 1766 1767 /** 1768 * ixgbe_disable_rx_e610 - Disable RX unit 1769 * @hw: pointer to hardware structure 1770 * 1771 * Disable RX DMA unit on E610 with use of ACI command (0x000C). 1772 * 1773 * Return: the exit code of the operation. 1774 */ 1775 void ixgbe_disable_rx_e610(struct ixgbe_hw *hw) 1776 { 1777 u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1778 u32 pfdtxgswc; 1779 int err; 1780 1781 if (!(rxctrl & IXGBE_RXCTRL_RXEN)) 1782 return; 1783 1784 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 1785 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 1786 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 1787 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 1788 hw->mac.set_lben = true; 1789 } else { 1790 hw->mac.set_lben = false; 1791 } 1792 1793 err = ixgbe_aci_disable_rxen(hw); 1794 1795 /* If we fail - disable RX using register write */ 1796 if (err) { 1797 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1798 if (rxctrl & IXGBE_RXCTRL_RXEN) { 1799 rxctrl &= ~IXGBE_RXCTRL_RXEN; 1800 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 1801 } 1802 } 1803 } 1804 1805 /** 1806 * ixgbe_init_phy_ops_e610 - PHY specific init 1807 * @hw: pointer to hardware structure 1808 * 1809 * Initialize any function pointers that were not able to be 1810 * set during init_shared_code because the PHY type was not known. 1811 * 1812 * Return: the exit code of the operation. 1813 */ 1814 int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw) 1815 { 1816 struct ixgbe_mac_info *mac = &hw->mac; 1817 struct ixgbe_phy_info *phy = &hw->phy; 1818 1819 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) 1820 phy->ops.set_phy_power = ixgbe_set_phy_power_e610; 1821 else 1822 phy->ops.set_phy_power = NULL; 1823 1824 /* Identify the PHY */ 1825 return phy->ops.identify(hw); 1826 } 1827 1828 /** 1829 * ixgbe_identify_phy_e610 - Identify PHY 1830 * @hw: pointer to hardware structure 1831 * 1832 * Determine PHY type, supported speeds and PHY ID. 1833 * 1834 * Return: the exit code of the operation. 1835 */ 1836 int ixgbe_identify_phy_e610(struct ixgbe_hw *hw) 1837 { 1838 struct ixgbe_aci_cmd_get_phy_caps_data pcaps; 1839 u64 phy_type_low, phy_type_high; 1840 int err; 1841 1842 /* Set PHY type */ 1843 hw->phy.type = ixgbe_phy_fw; 1844 1845 err = ixgbe_aci_get_phy_caps(hw, false, 1846 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps); 1847 if (err) 1848 return err; 1849 1850 if (!(pcaps.module_compliance_enforcement & 1851 IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) { 1852 /* Handle lenient mode */ 1853 err = ixgbe_aci_get_phy_caps(hw, false, 1854 IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA, 1855 &pcaps); 1856 if (err) 1857 return err; 1858 } 1859 1860 /* Determine supported speeds */ 1861 hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN; 1862 phy_type_high = le64_to_cpu(pcaps.phy_type_high); 1863 phy_type_low = le64_to_cpu(pcaps.phy_type_low); 1864 1865 if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T || 1866 phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII) 1867 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL; 1868 if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX || 1869 phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII || 1870 phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII) 1871 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; 1872 if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T || 1873 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX || 1874 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX || 1875 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX || 1876 phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII || 1877 phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII) 1878 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; 1879 if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T || 1880 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA || 1881 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR || 1882 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR || 1883 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 || 1884 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || 1885 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C || 1886 phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII) 1887 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; 1888 1889 /* 2.5 and 5 Gbps link speeds must be excluded from the 1890 * auto-negotiation set used during driver initialization due to 1891 * compatibility issues with certain switches. Those issues do not 1892 * exist in case of E610 2.5G SKU device (0x57b1). 1893 */ 1894 if (!hw->phy.autoneg_advertised && 1895 hw->device_id != IXGBE_DEV_ID_E610_2_5G_T) 1896 hw->phy.autoneg_advertised = hw->phy.speeds_supported; 1897 1898 if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T || 1899 phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X || 1900 phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX || 1901 phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII || 1902 phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII) 1903 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; 1904 1905 if (!hw->phy.autoneg_advertised && 1906 hw->device_id == IXGBE_DEV_ID_E610_2_5G_T) 1907 hw->phy.autoneg_advertised = hw->phy.speeds_supported; 1908 1909 if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T || 1910 phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR || 1911 phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII) 1912 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; 1913 1914 /* Set PHY ID */ 1915 memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32)); 1916 1917 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL | 1918 IXGBE_LINK_SPEED_100_FULL | 1919 IXGBE_LINK_SPEED_1GB_FULL; 1920 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; 1921 1922 return 0; 1923 } 1924 1925 /** 1926 * ixgbe_identify_module_e610 - Identify SFP module type 1927 * @hw: pointer to hardware structure 1928 * 1929 * Identify the SFP module type. 1930 * 1931 * Return: the exit code of the operation. 1932 */ 1933 int ixgbe_identify_module_e610(struct ixgbe_hw *hw) 1934 { 1935 bool media_available; 1936 u8 module_type; 1937 int err; 1938 1939 err = ixgbe_update_link_info(hw); 1940 if (err) 1941 return err; 1942 1943 media_available = 1944 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE); 1945 1946 if (media_available) { 1947 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 1948 1949 /* Get module type from hw context updated by 1950 * ixgbe_update_link_info() 1951 */ 1952 module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT]; 1953 1954 if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) || 1955 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) { 1956 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 1957 } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) { 1958 hw->phy.sfp_type = ixgbe_sfp_type_sr; 1959 } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) || 1960 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) { 1961 hw->phy.sfp_type = ixgbe_sfp_type_lr; 1962 } 1963 } else { 1964 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 1965 return -ENOENT; 1966 } 1967 1968 return 0; 1969 } 1970 1971 /** 1972 * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs 1973 * @hw: pointer to hardware structure 1974 * 1975 * Set the parameters for the firmware-controlled PHYs. 1976 * 1977 * Return: the exit code of the operation. 1978 */ 1979 int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw) 1980 { 1981 struct ixgbe_aci_cmd_get_phy_caps_data pcaps; 1982 struct ixgbe_aci_cmd_set_phy_cfg_data pcfg; 1983 u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA; 1984 u64 sup_phy_type_low, sup_phy_type_high; 1985 u64 phy_type_low = 0, phy_type_high = 0; 1986 int err; 1987 1988 err = ixgbe_aci_get_link_info(hw, false, NULL); 1989 if (err) 1990 return err; 1991 1992 /* If media is not available get default config. */ 1993 if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) 1994 rmode = IXGBE_ACI_REPORT_DFLT_CFG; 1995 1996 err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps); 1997 if (err) 1998 return err; 1999 2000 sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low); 2001 sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high); 2002 2003 /* Get Active configuration to avoid unintended changes. */ 2004 err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG, 2005 &pcaps); 2006 if (err) 2007 return err; 2008 2009 ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg); 2010 2011 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) { 2012 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T; 2013 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII; 2014 } 2015 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) { 2016 phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX; 2017 phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII; 2018 phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII; 2019 } 2020 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { 2021 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T; 2022 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX; 2023 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX; 2024 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX; 2025 phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII; 2026 phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII; 2027 } 2028 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) { 2029 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T; 2030 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X; 2031 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX; 2032 phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII; 2033 phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII; 2034 } 2035 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) { 2036 phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T; 2037 phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR; 2038 phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII; 2039 } 2040 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) { 2041 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T; 2042 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA; 2043 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR; 2044 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR; 2045 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1; 2046 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC; 2047 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C; 2048 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII; 2049 } 2050 2051 /* Mask the set values to avoid requesting unsupported link types. */ 2052 phy_type_low &= sup_phy_type_low; 2053 pcfg.phy_type_low = cpu_to_le64(phy_type_low); 2054 phy_type_high &= sup_phy_type_high; 2055 pcfg.phy_type_high = cpu_to_le64(phy_type_high); 2056 2057 if (pcfg.phy_type_high != pcaps.phy_type_high || 2058 pcfg.phy_type_low != pcaps.phy_type_low || 2059 pcfg.caps != pcaps.caps) { 2060 pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK; 2061 pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; 2062 2063 err = ixgbe_aci_set_phy_cfg(hw, &pcfg); 2064 if (err) 2065 return err; 2066 } 2067 2068 return 0; 2069 } 2070 2071 /** 2072 * ixgbe_set_phy_power_e610 - Control power for copper PHY 2073 * @hw: pointer to hardware structure 2074 * @on: true for on, false for off 2075 * 2076 * Set the power on/off of the PHY 2077 * by getting its capabilities and setting the appropriate 2078 * configuration parameters. 2079 * 2080 * Return: the exit code of the operation. 2081 */ 2082 int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on) 2083 { 2084 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; 2085 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; 2086 int err; 2087 2088 err = ixgbe_aci_get_phy_caps(hw, false, 2089 IXGBE_ACI_REPORT_ACTIVE_CFG, 2090 &phy_caps); 2091 if (err) 2092 return err; 2093 2094 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); 2095 2096 if (on) 2097 phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER; 2098 else 2099 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER; 2100 2101 /* PHY is already in requested power mode. */ 2102 if (phy_caps.caps == phy_cfg.caps) 2103 return 0; 2104 2105 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK; 2106 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT; 2107 2108 return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); 2109 } 2110 2111 /** 2112 * ixgbe_enter_lplu_e610 - Transition to low power states 2113 * @hw: pointer to hardware structure 2114 * 2115 * Configures Low Power Link Up on transition to low power states 2116 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the 2117 * X557 PHY immediately prior to entering LPLU. 2118 * 2119 * Return: the exit code of the operation. 2120 */ 2121 int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw) 2122 { 2123 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {}; 2124 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {}; 2125 int err; 2126 2127 err = ixgbe_aci_get_phy_caps(hw, false, 2128 IXGBE_ACI_REPORT_ACTIVE_CFG, 2129 &phy_caps); 2130 if (err) 2131 return err; 2132 2133 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg); 2134 2135 phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG; 2136 2137 return ixgbe_aci_set_phy_cfg(hw, &phy_cfg); 2138 } 2139 2140 /** 2141 * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params 2142 * @hw: pointer to hardware structure 2143 * 2144 * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw 2145 * struct in order to set up EEPROM access. 2146 * 2147 * Return: the operation exit code. 2148 */ 2149 int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw) 2150 { 2151 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2152 u32 gens_stat; 2153 u8 sr_size; 2154 2155 if (eeprom->type != ixgbe_eeprom_uninitialized) 2156 return 0; 2157 2158 eeprom->type = ixgbe_flash; 2159 2160 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS); 2161 sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); 2162 2163 /* Switching to words (sr_size contains power of 2). */ 2164 eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB; 2165 2166 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, 2167 eeprom->word_size); 2168 2169 return 0; 2170 } 2171 2172 /** 2173 * ixgbe_aci_get_netlist_node - get a node handle 2174 * @hw: pointer to the hw struct 2175 * @cmd: get_link_topo AQ structure 2176 * @node_part_number: output node part number if node found 2177 * @node_handle: output node handle parameter if node found 2178 * 2179 * Get the netlist node and assigns it to 2180 * the provided handle using ACI command (0x06E0). 2181 * 2182 * Return: the exit code of the operation. 2183 */ 2184 int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw, 2185 struct ixgbe_aci_cmd_get_link_topo *cmd, 2186 u8 *node_part_number, u16 *node_handle) 2187 { 2188 struct ixgbe_aci_desc desc; 2189 2190 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo); 2191 desc.params.get_link_topo = *cmd; 2192 2193 if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0)) 2194 return -EOPNOTSUPP; 2195 2196 if (node_handle) 2197 *node_handle = 2198 le16_to_cpu(desc.params.get_link_topo.addr.handle); 2199 if (node_part_number) 2200 *node_part_number = desc.params.get_link_topo.node_part_num; 2201 2202 return 0; 2203 } 2204 2205 /** 2206 * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership 2207 * @hw: pointer to the HW structure 2208 * @access: NVM access type (read or write) 2209 * 2210 * Request NVM ownership. 2211 * 2212 * Return: the exit code of the operation. 2213 */ 2214 int ixgbe_acquire_nvm(struct ixgbe_hw *hw, 2215 enum ixgbe_aci_res_access_type access) 2216 { 2217 u32 fla; 2218 2219 /* Skip if we are in blank NVM programming mode */ 2220 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); 2221 if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) 2222 return 0; 2223 2224 return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access, 2225 IXGBE_NVM_TIMEOUT); 2226 } 2227 2228 /** 2229 * ixgbe_release_nvm - Generic request for releasing the NVM ownership 2230 * @hw: pointer to the HW structure 2231 * 2232 * Release NVM ownership. 2233 */ 2234 void ixgbe_release_nvm(struct ixgbe_hw *hw) 2235 { 2236 u32 fla; 2237 2238 /* Skip if we are in blank NVM programming mode */ 2239 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); 2240 if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0) 2241 return; 2242 2243 ixgbe_release_res(hw, IXGBE_NVM_RES_ID); 2244 } 2245 2246 /** 2247 * ixgbe_aci_read_nvm - read NVM 2248 * @hw: pointer to the HW struct 2249 * @module_typeid: module pointer location in words from the NVM beginning 2250 * @offset: byte offset from the module beginning 2251 * @length: length of the section to be read (in bytes from the offset) 2252 * @data: command buffer (size [bytes] = length) 2253 * @last_command: tells if this is the last command in a series 2254 * @read_shadow_ram: tell if this is a shadow RAM read 2255 * 2256 * Read the NVM using ACI command (0x0701). 2257 * 2258 * Return: the exit code of the operation. 2259 */ 2260 int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, 2261 u16 length, void *data, bool last_command, 2262 bool read_shadow_ram) 2263 { 2264 struct ixgbe_aci_cmd_nvm *cmd; 2265 struct ixgbe_aci_desc desc; 2266 2267 if (offset > IXGBE_ACI_NVM_MAX_OFFSET) 2268 return -EINVAL; 2269 2270 cmd = &desc.params.nvm; 2271 2272 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read); 2273 2274 if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT) 2275 cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY; 2276 2277 /* If this is the last command in a series, set the proper flag. */ 2278 if (last_command) 2279 cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD; 2280 cmd->module_typeid = cpu_to_le16(module_typeid); 2281 cmd->offset_low = cpu_to_le16(offset & 0xFFFF); 2282 cmd->offset_high = (offset >> 16) & 0xFF; 2283 cmd->length = cpu_to_le16(length); 2284 2285 return ixgbe_aci_send_cmd(hw, &desc, data, length); 2286 } 2287 2288 /** 2289 * ixgbe_nvm_validate_checksum - validate checksum 2290 * @hw: pointer to the HW struct 2291 * 2292 * Verify NVM PFA checksum validity using ACI command (0x0706). 2293 * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned. 2294 * The function acquires and then releases the NVM ownership. 2295 * 2296 * Return: the exit code of the operation. 2297 */ 2298 int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw) 2299 { 2300 struct ixgbe_aci_cmd_nvm_checksum *cmd; 2301 struct ixgbe_aci_desc desc; 2302 int err; 2303 2304 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 2305 if (err) 2306 return err; 2307 2308 cmd = &desc.params.nvm_checksum; 2309 2310 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum); 2311 cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY; 2312 2313 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); 2314 2315 ixgbe_release_nvm(hw); 2316 2317 if (!err && cmd->checksum != 2318 cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) { 2319 struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, 2320 hw); 2321 2322 err = -EIO; 2323 netdev_err(adapter->netdev, "Invalid Shadow Ram checksum"); 2324 } 2325 2326 return err; 2327 } 2328 2329 /** 2330 * ixgbe_discover_flash_size - Discover the available flash size 2331 * @hw: pointer to the HW struct 2332 * 2333 * The device flash could be up to 16MB in size. However, it is possible that 2334 * the actual size is smaller. Use bisection to determine the accessible size 2335 * of flash memory. 2336 * 2337 * Return: the exit code of the operation. 2338 */ 2339 static int ixgbe_discover_flash_size(struct ixgbe_hw *hw) 2340 { 2341 u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1; 2342 int err; 2343 2344 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 2345 if (err) 2346 return err; 2347 2348 while ((max_size - min_size) > 1) { 2349 u32 offset = (max_size + min_size) / 2; 2350 u32 len = 1; 2351 u8 data; 2352 2353 err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false); 2354 if (err == -EIO && 2355 hw->aci.last_status == IXGBE_ACI_RC_EINVAL) { 2356 err = 0; 2357 max_size = offset; 2358 } else if (!err) { 2359 min_size = offset; 2360 } else { 2361 /* an unexpected error occurred */ 2362 goto err_read_flat_nvm; 2363 } 2364 } 2365 2366 hw->flash.flash_size = max_size; 2367 2368 err_read_flat_nvm: 2369 ixgbe_release_nvm(hw); 2370 2371 return err; 2372 } 2373 2374 /** 2375 * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word 2376 * @hw: pointer to the HW structure 2377 * @offset: the word offset of the Shadow RAM word to read 2378 * @pointer: pointer value read from Shadow RAM 2379 * 2380 * Read the given Shadow RAM word, and convert it to a pointer value specified 2381 * in bytes. This function assumes the specified offset is a valid pointer 2382 * word. 2383 * 2384 * Each pointer word specifies whether it is stored in word size or 4KB 2385 * sector size by using the highest bit. The reported pointer value will be in 2386 * bytes, intended for flat NVM reads. 2387 * 2388 * Return: the exit code of the operation. 2389 */ 2390 static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset, 2391 u32 *pointer) 2392 { 2393 u16 value; 2394 int err; 2395 2396 err = ixgbe_read_ee_aci_e610(hw, offset, &value); 2397 if (err) 2398 return err; 2399 2400 /* Determine if the pointer is in 4KB or word units */ 2401 if (value & IXGBE_SR_NVM_PTR_4KB_UNITS) 2402 *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K; 2403 else 2404 *pointer = value * sizeof(u16); 2405 2406 return 0; 2407 } 2408 2409 /** 2410 * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word 2411 * @hw: pointer to the HW structure 2412 * @offset: the word offset of the Shadow RAM to read 2413 * @size: size value read from the Shadow RAM 2414 * 2415 * Read the given Shadow RAM word, and convert it to an area size value 2416 * specified in bytes. This function assumes the specified offset is a valid 2417 * area size word. 2418 * 2419 * Each area size word is specified in 4KB sector units. This function reports 2420 * the size in bytes, intended for flat NVM reads. 2421 * 2422 * Return: the exit code of the operation. 2423 */ 2424 static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size) 2425 { 2426 u16 value; 2427 int err; 2428 2429 err = ixgbe_read_ee_aci_e610(hw, offset, &value); 2430 if (err) 2431 return err; 2432 2433 /* Area sizes are always specified in 4KB units */ 2434 *size = value * SZ_4K; 2435 2436 return 0; 2437 } 2438 2439 /** 2440 * ixgbe_determine_active_flash_banks - Discover active bank for each module 2441 * @hw: pointer to the HW struct 2442 * 2443 * Read the Shadow RAM control word and determine which banks are active for 2444 * the NVM, OROM, and Netlist modules. Also read and calculate the associated 2445 * pointer and size. These values are then cached into the ixgbe_flash_info 2446 * structure for later use in order to calculate the correct offset to read 2447 * from the active module. 2448 * 2449 * Return: the exit code of the operation. 2450 */ 2451 static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw) 2452 { 2453 struct ixgbe_bank_info *banks = &hw->flash.banks; 2454 u16 ctrl_word; 2455 int err; 2456 2457 err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD, 2458 &ctrl_word); 2459 if (err) 2460 return err; 2461 2462 if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) != 2463 IXGBE_SR_CTRL_WORD_VALID) 2464 return -ENODATA; 2465 2466 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK)) 2467 banks->nvm_bank = IXGBE_1ST_FLASH_BANK; 2468 else 2469 banks->nvm_bank = IXGBE_2ND_FLASH_BANK; 2470 2471 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK)) 2472 banks->orom_bank = IXGBE_1ST_FLASH_BANK; 2473 else 2474 banks->orom_bank = IXGBE_2ND_FLASH_BANK; 2475 2476 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK)) 2477 banks->netlist_bank = IXGBE_1ST_FLASH_BANK; 2478 else 2479 banks->netlist_bank = IXGBE_2ND_FLASH_BANK; 2480 2481 err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR, 2482 &banks->nvm_ptr); 2483 if (err) 2484 return err; 2485 2486 err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE, 2487 &banks->nvm_size); 2488 if (err) 2489 return err; 2490 2491 err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR, 2492 &banks->orom_ptr); 2493 if (err) 2494 return err; 2495 2496 err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE, 2497 &banks->orom_size); 2498 if (err) 2499 return err; 2500 2501 err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR, 2502 &banks->netlist_ptr); 2503 if (err) 2504 return err; 2505 2506 err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE, 2507 &banks->netlist_size); 2508 2509 return err; 2510 } 2511 2512 /** 2513 * ixgbe_get_flash_bank_offset - Get offset into requested flash bank 2514 * @hw: pointer to the HW structure 2515 * @bank: whether to read from the active or inactive flash bank 2516 * @module: the module to read from 2517 * 2518 * Based on the module, lookup the module offset from the beginning of the 2519 * flash. 2520 * 2521 * Return: the flash offset. Note that a value of zero is invalid and must be 2522 * treated as an error. 2523 */ 2524 static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw, 2525 enum ixgbe_bank_select bank, 2526 u16 module) 2527 { 2528 struct ixgbe_bank_info *banks = &hw->flash.banks; 2529 enum ixgbe_flash_bank active_bank; 2530 bool second_bank_active; 2531 u32 offset, size; 2532 2533 switch (module) { 2534 case IXGBE_E610_SR_1ST_NVM_BANK_PTR: 2535 offset = banks->nvm_ptr; 2536 size = banks->nvm_size; 2537 active_bank = banks->nvm_bank; 2538 break; 2539 case IXGBE_E610_SR_1ST_OROM_BANK_PTR: 2540 offset = banks->orom_ptr; 2541 size = banks->orom_size; 2542 active_bank = banks->orom_bank; 2543 break; 2544 case IXGBE_E610_SR_NETLIST_BANK_PTR: 2545 offset = banks->netlist_ptr; 2546 size = banks->netlist_size; 2547 active_bank = banks->netlist_bank; 2548 break; 2549 default: 2550 return 0; 2551 } 2552 2553 switch (active_bank) { 2554 case IXGBE_1ST_FLASH_BANK: 2555 second_bank_active = false; 2556 break; 2557 case IXGBE_2ND_FLASH_BANK: 2558 second_bank_active = true; 2559 break; 2560 default: 2561 return 0; 2562 } 2563 2564 /* The second flash bank is stored immediately following the first 2565 * bank. Based on whether the 1st or 2nd bank is active, and whether 2566 * we want the active or inactive bank, calculate the desired offset. 2567 */ 2568 switch (bank) { 2569 case IXGBE_ACTIVE_FLASH_BANK: 2570 return offset + (second_bank_active ? size : 0); 2571 case IXGBE_INACTIVE_FLASH_BANK: 2572 return offset + (second_bank_active ? 0 : size); 2573 } 2574 2575 return 0; 2576 } 2577 2578 /** 2579 * ixgbe_read_flash_module - Read a word from one of the main NVM modules 2580 * @hw: pointer to the HW structure 2581 * @bank: which bank of the module to read 2582 * @module: the module to read 2583 * @offset: the offset into the module in bytes 2584 * @data: storage for the word read from the flash 2585 * @length: bytes of data to read 2586 * 2587 * Read data from the specified flash module. The bank parameter indicates 2588 * whether or not to read from the active bank or the inactive bank of that 2589 * module. 2590 * 2591 * The word will be read using flat NVM access, and relies on the 2592 * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks() 2593 * during initialization. 2594 * 2595 * Return: the exit code of the operation. 2596 */ 2597 static int ixgbe_read_flash_module(struct ixgbe_hw *hw, 2598 enum ixgbe_bank_select bank, 2599 u16 module, u32 offset, u8 *data, u32 length) 2600 { 2601 u32 start; 2602 int err; 2603 2604 start = ixgbe_get_flash_bank_offset(hw, bank, module); 2605 if (!start) 2606 return -EINVAL; 2607 2608 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 2609 if (err) 2610 return err; 2611 2612 err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false); 2613 2614 ixgbe_release_nvm(hw); 2615 2616 return err; 2617 } 2618 2619 /** 2620 * ixgbe_read_nvm_module - Read from the active main NVM module 2621 * @hw: pointer to the HW structure 2622 * @bank: whether to read from active or inactive NVM module 2623 * @offset: offset into the NVM module to read, in words 2624 * @data: storage for returned word value 2625 * 2626 * Read the specified word from the active NVM module. This includes the CSS 2627 * header at the start of the NVM module. 2628 * 2629 * Return: the exit code of the operation. 2630 */ 2631 static int ixgbe_read_nvm_module(struct ixgbe_hw *hw, 2632 enum ixgbe_bank_select bank, 2633 u32 offset, u16 *data) 2634 { 2635 __le16 data_local; 2636 int err; 2637 2638 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR, 2639 offset * sizeof(data_local), 2640 (u8 *)&data_local, 2641 sizeof(data_local)); 2642 if (!err) 2643 *data = le16_to_cpu(data_local); 2644 2645 return err; 2646 } 2647 2648 /** 2649 * ixgbe_read_netlist_module - Read data from the netlist module area 2650 * @hw: pointer to the HW structure 2651 * @bank: whether to read from the active or inactive module 2652 * @offset: offset into the netlist to read from 2653 * @data: storage for returned word value 2654 * 2655 * Read a word from the specified netlist bank. 2656 * 2657 * Return: the exit code of the operation. 2658 */ 2659 static int ixgbe_read_netlist_module(struct ixgbe_hw *hw, 2660 enum ixgbe_bank_select bank, 2661 u32 offset, u16 *data) 2662 { 2663 __le16 data_local; 2664 int err; 2665 2666 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR, 2667 offset * sizeof(data_local), 2668 (u8 *)&data_local, sizeof(data_local)); 2669 if (!err) 2670 *data = le16_to_cpu(data_local); 2671 2672 return err; 2673 } 2674 2675 /** 2676 * ixgbe_read_orom_module - Read from the active Option ROM module 2677 * @hw: pointer to the HW structure 2678 * @bank: whether to read from active or inactive OROM module 2679 * @offset: offset into the OROM module to read, in words 2680 * @data: storage for returned word value 2681 * 2682 * Read the specified word from the active Option ROM module of the flash. 2683 * Note that unlike the NVM module, the CSS data is stored at the end of the 2684 * module instead of at the beginning. 2685 * 2686 * Return: the exit code of the operation. 2687 */ 2688 static int ixgbe_read_orom_module(struct ixgbe_hw *hw, 2689 enum ixgbe_bank_select bank, 2690 u32 offset, u16 *data) 2691 { 2692 __le16 data_local; 2693 int err; 2694 2695 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR, 2696 offset * sizeof(data_local), 2697 (u8 *)&data_local, sizeof(data_local)); 2698 if (!err) 2699 *data = le16_to_cpu(data_local); 2700 2701 return err; 2702 } 2703 2704 /** 2705 * ixgbe_get_nvm_css_hdr_len - Read the CSS header length 2706 * @hw: pointer to the HW struct 2707 * @bank: whether to read from the active or inactive flash bank 2708 * @hdr_len: storage for header length in words 2709 * 2710 * Read the CSS header length from the NVM CSS header and add the 2711 * Authentication header size, and then convert to words. 2712 * 2713 * Return: the exit code of the operation. 2714 */ 2715 static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw, 2716 enum ixgbe_bank_select bank, 2717 u32 *hdr_len) 2718 { 2719 u16 hdr_len_l, hdr_len_h; 2720 u32 hdr_len_dword; 2721 int err; 2722 2723 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L, 2724 &hdr_len_l); 2725 if (err) 2726 return err; 2727 2728 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H, 2729 &hdr_len_h); 2730 if (err) 2731 return err; 2732 2733 /* CSS header length is in DWORD, so convert to words and add 2734 * authentication header size. 2735 */ 2736 hdr_len_dword = (hdr_len_h << 16) | hdr_len_l; 2737 *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN; 2738 2739 return 0; 2740 } 2741 2742 /** 2743 * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy 2744 * @hw: pointer to the HW structure 2745 * @bank: whether to read from the active or inactive NVM module 2746 * @offset: offset into the Shadow RAM copy to read, in words 2747 * @data: storage for returned word value 2748 * 2749 * Read the specified word from the copy of the Shadow RAM found in the 2750 * specified NVM module. 2751 * 2752 * Return: the exit code of the operation. 2753 */ 2754 static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw, 2755 enum ixgbe_bank_select bank, 2756 u32 offset, u16 *data) 2757 { 2758 u32 hdr_len; 2759 int err; 2760 2761 err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len); 2762 if (err) 2763 return err; 2764 2765 hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP); 2766 2767 return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data); 2768 } 2769 2770 /** 2771 * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header 2772 * @hw: pointer to the HW struct 2773 * @bank: whether to read from the active or inactive flash bank 2774 * @srev: storage for security revision 2775 * 2776 * Read the security revision out of the CSS header of the active NVM module 2777 * bank. 2778 * 2779 * Return: the exit code of the operation. 2780 */ 2781 static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw, 2782 enum ixgbe_bank_select bank, u32 *srev) 2783 { 2784 u16 srev_l, srev_h; 2785 int err; 2786 2787 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l); 2788 if (err) 2789 return err; 2790 2791 err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h); 2792 if (err) 2793 return err; 2794 2795 *srev = (srev_h << 16) | srev_l; 2796 2797 return 0; 2798 } 2799 2800 /** 2801 * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM 2802 * @hw: pointer to the HW struct 2803 * @bank: whether to read from the active or inactive flash module 2804 * @civd: storage for the Option ROM CIVD data. 2805 * 2806 * Searches through the Option ROM flash contents to locate the CIVD data for 2807 * the image. 2808 * 2809 * Return: the exit code of the operation. 2810 */ 2811 static int 2812 ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank, 2813 struct ixgbe_orom_civd_info *civd) 2814 { 2815 struct ixgbe_orom_civd_info tmp; 2816 u32 offset; 2817 int err; 2818 2819 /* The CIVD section is located in the Option ROM aligned to 512 bytes. 2820 * The first 4 bytes must contain the ASCII characters "$CIV". 2821 * A simple modulo 256 sum of all of the bytes of the structure must 2822 * equal 0. 2823 */ 2824 for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size; 2825 offset += SZ_512) { 2826 u8 sum = 0; 2827 u32 i; 2828 2829 err = ixgbe_read_flash_module(hw, bank, 2830 IXGBE_E610_SR_1ST_OROM_BANK_PTR, 2831 offset, 2832 (u8 *)&tmp, sizeof(tmp)); 2833 if (err) 2834 return err; 2835 2836 /* Skip forward until we find a matching signature */ 2837 if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature, 2838 sizeof(tmp.signature))) 2839 continue; 2840 2841 /* Verify that the simple checksum is zero */ 2842 for (i = 0; i < sizeof(tmp); i++) 2843 sum += ((u8 *)&tmp)[i]; 2844 2845 if (sum) 2846 return -EDOM; 2847 2848 *civd = tmp; 2849 return 0; 2850 } 2851 2852 return -ENODATA; 2853 } 2854 2855 /** 2856 * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header 2857 * @hw: pointer to the HW struct 2858 * @bank: whether to read from active or inactive flash module 2859 * @srev: storage for security revision 2860 * 2861 * Read the security revision out of the CSS header of the active OROM module 2862 * bank. 2863 * 2864 * Return: the exit code of the operation. 2865 */ 2866 static int ixgbe_get_orom_srev(struct ixgbe_hw *hw, 2867 enum ixgbe_bank_select bank, 2868 u32 *srev) 2869 { 2870 u32 orom_size_word = hw->flash.banks.orom_size / 2; 2871 u32 css_start, hdr_len; 2872 u16 srev_l, srev_h; 2873 int err; 2874 2875 err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len); 2876 if (err) 2877 return err; 2878 2879 if (orom_size_word < hdr_len) 2880 return -EINVAL; 2881 2882 /* Calculate how far into the Option ROM the CSS header starts. Note 2883 * that ixgbe_read_orom_module takes a word offset. 2884 */ 2885 css_start = orom_size_word - hdr_len; 2886 err = ixgbe_read_orom_module(hw, bank, 2887 css_start + IXGBE_NVM_CSS_SREV_L, 2888 &srev_l); 2889 if (err) 2890 return err; 2891 2892 err = ixgbe_read_orom_module(hw, bank, 2893 css_start + IXGBE_NVM_CSS_SREV_H, 2894 &srev_h); 2895 if (err) 2896 return err; 2897 2898 *srev = srev_h << 16 | srev_l; 2899 2900 return 0; 2901 } 2902 2903 /** 2904 * ixgbe_get_orom_ver_info - Read Option ROM version information 2905 * @hw: pointer to the HW struct 2906 * @bank: whether to read from the active or inactive flash module 2907 * @orom: pointer to Option ROM info structure 2908 * 2909 * Read Option ROM version and security revision from the Option ROM flash 2910 * section. 2911 * 2912 * Return: the exit code of the operation. 2913 */ 2914 static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw, 2915 enum ixgbe_bank_select bank, 2916 struct ixgbe_orom_info *orom) 2917 { 2918 struct ixgbe_orom_civd_info civd; 2919 u32 combo_ver; 2920 int err; 2921 2922 err = ixgbe_get_orom_civd_data(hw, bank, &civd); 2923 if (err) 2924 return err; 2925 2926 combo_ver = le32_to_cpu(civd.combo_ver); 2927 2928 orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver); 2929 orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver); 2930 orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver); 2931 2932 return ixgbe_get_orom_srev(hw, bank, &orom->srev); 2933 } 2934 2935 /** 2936 * ixgbe_get_nvm_ver_info - Read NVM version information 2937 * @hw: pointer to the HW struct 2938 * @bank: whether to read from the active or inactive flash bank 2939 * @nvm: pointer to NVM info structure 2940 * 2941 * Read the NVM EETRACK ID and map version of the main NVM image bank, filling 2942 * in the nvm info structure. 2943 * 2944 * Return: the exit code of the operation. 2945 */ 2946 static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw, 2947 enum ixgbe_bank_select bank, 2948 struct ixgbe_nvm_info *nvm) 2949 { 2950 u16 eetrack_lo, eetrack_hi, ver; 2951 int err; 2952 2953 err = ixgbe_read_nvm_sr_copy(hw, bank, 2954 IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver); 2955 if (err) 2956 return err; 2957 2958 nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver); 2959 nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver); 2960 2961 err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO, 2962 &eetrack_lo); 2963 if (err) 2964 return err; 2965 2966 err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI, 2967 &eetrack_hi); 2968 if (err) 2969 return err; 2970 2971 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; 2972 2973 ixgbe_get_nvm_srev(hw, bank, &nvm->srev); 2974 2975 return 0; 2976 } 2977 2978 /** 2979 * ixgbe_get_netlist_info - Read the netlist version information 2980 * @hw: pointer to the HW struct 2981 * @bank: whether to read from the active or inactive flash bank 2982 * @netlist: pointer to netlist version info structure 2983 * 2984 * Get the netlist version information from the requested bank. Reads the Link 2985 * Topology section to find the Netlist ID block and extract the relevant 2986 * information into the netlist version structure. 2987 * 2988 * Return: the exit code of the operation. 2989 */ 2990 static int ixgbe_get_netlist_info(struct ixgbe_hw *hw, 2991 enum ixgbe_bank_select bank, 2992 struct ixgbe_netlist_info *netlist) 2993 { 2994 u16 module_id, length, node_count, i; 2995 u16 *id_blk; 2996 int err; 2997 2998 err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET, 2999 &module_id); 3000 if (err) 3001 return err; 3002 3003 if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) 3004 return -EIO; 3005 3006 err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN, 3007 &length); 3008 if (err) 3009 return err; 3010 3011 /* Sanity check that we have at least enough words to store the 3012 * netlist ID block. 3013 */ 3014 if (length < IXGBE_NETLIST_ID_BLK_SIZE) 3015 return -EIO; 3016 3017 err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT, 3018 &node_count); 3019 if (err) 3020 return err; 3021 3022 node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M; 3023 3024 id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); 3025 if (!id_blk) 3026 return -ENOMEM; 3027 3028 /* Read out the entire Netlist ID Block at once. */ 3029 err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR, 3030 IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * 3031 sizeof(*id_blk), (u8 *)id_blk, 3032 IXGBE_NETLIST_ID_BLK_SIZE * 3033 sizeof(*id_blk)); 3034 if (err) 3035 goto free_id_blk; 3036 3037 for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++) 3038 id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]); 3039 3040 netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 | 3041 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW]; 3042 netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 | 3043 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW]; 3044 netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 | 3045 id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW]; 3046 netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 | 3047 id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW]; 3048 netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER]; 3049 /* Read the left most 4 bytes of SHA */ 3050 netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 | 3051 id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)]; 3052 3053 free_id_blk: 3054 kfree(id_blk); 3055 return err; 3056 } 3057 3058 /** 3059 * ixgbe_get_flash_data - get flash data 3060 * @hw: pointer to the HW struct 3061 * 3062 * Read and populate flash data such as Shadow RAM size, 3063 * max_timeout and blank_nvm_mode 3064 * 3065 * Return: the exit code of the operation. 3066 */ 3067 int ixgbe_get_flash_data(struct ixgbe_hw *hw) 3068 { 3069 struct ixgbe_flash_info *flash = &hw->flash; 3070 u32 fla, gens_stat; 3071 u8 sr_size; 3072 int err; 3073 3074 /* The SR size is stored regardless of the NVM programming mode 3075 * as the blank mode may be used in the factory line. 3076 */ 3077 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS); 3078 sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); 3079 3080 /* Switching to words (sr_size contains power of 2) */ 3081 flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16)); 3082 3083 /* Check if we are in the normal or blank NVM programming mode */ 3084 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); 3085 if (fla & IXGBE_GLNVM_FLA_LOCKED_M) { 3086 flash->blank_nvm_mode = false; 3087 } else { 3088 flash->blank_nvm_mode = true; 3089 return -EIO; 3090 } 3091 3092 err = ixgbe_discover_flash_size(hw); 3093 if (err) 3094 return err; 3095 3096 err = ixgbe_determine_active_flash_banks(hw); 3097 if (err) 3098 return err; 3099 3100 err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, 3101 &flash->nvm); 3102 if (err) 3103 return err; 3104 3105 err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, 3106 &flash->orom); 3107 if (err) 3108 return err; 3109 3110 err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK, 3111 &flash->netlist); 3112 return err; 3113 } 3114 3115 /** 3116 * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI 3117 * @hw: pointer to the HW structure 3118 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 3119 * @data: word read from the Shadow RAM 3120 * 3121 * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm. 3122 * 3123 * Return: the exit code of the operation. 3124 */ 3125 int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data) 3126 { 3127 u32 bytes = sizeof(u16); 3128 u16 data_local; 3129 int err; 3130 3131 err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes, 3132 (u8 *)&data_local, true); 3133 if (err) 3134 return err; 3135 3136 *data = data_local; 3137 return 0; 3138 } 3139 3140 /** 3141 * ixgbe_read_flat_nvm - Read portion of NVM by flat offset 3142 * @hw: pointer to the HW struct 3143 * @offset: offset from beginning of NVM 3144 * @length: (in) number of bytes to read; (out) number of bytes actually read 3145 * @data: buffer to return data in (sized to fit the specified length) 3146 * @read_shadow_ram: if true, read from shadow RAM instead of NVM 3147 * 3148 * Reads a portion of the NVM, as a flat memory space. This function correctly 3149 * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size 3150 * from being exceeded in case of Shadow RAM read requests and ensures that no 3151 * single read request exceeds the maximum 4KB read for a single admin command. 3152 * 3153 * Returns an error code on failure. Note that the data pointer may be 3154 * partially updated if some reads succeed before a failure. 3155 * 3156 * Return: the exit code of the operation. 3157 */ 3158 int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, 3159 u8 *data, bool read_shadow_ram) 3160 { 3161 u32 inlen = *length; 3162 u32 bytes_read = 0; 3163 bool last_cmd; 3164 int err; 3165 3166 /* Verify the length of the read if this is for the Shadow RAM */ 3167 if (read_shadow_ram && ((offset + inlen) > 3168 (hw->eeprom.word_size * 2u))) 3169 return -EINVAL; 3170 3171 do { 3172 u32 read_size, sector_offset; 3173 3174 /* ixgbe_aci_read_nvm cannot read more than 4KB at a time. 3175 * Additionally, a read from the Shadow RAM may not cross over 3176 * a sector boundary. Conveniently, the sector size is also 4KB. 3177 */ 3178 sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE; 3179 read_size = min_t(u32, 3180 IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset, 3181 inlen - bytes_read); 3182 3183 last_cmd = !(bytes_read + read_size < inlen); 3184 3185 /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size 3186 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE 3187 * maximum size guarantees that it will fit within the 2 bytes. 3188 */ 3189 err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT, 3190 offset, (u16)read_size, 3191 data + bytes_read, last_cmd, 3192 read_shadow_ram); 3193 if (err) 3194 break; 3195 3196 bytes_read += read_size; 3197 offset += read_size; 3198 } while (!last_cmd); 3199 3200 *length = bytes_read; 3201 return err; 3202 } 3203 3204 /** 3205 * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI 3206 * @hw: pointer to the HW structure 3207 * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF) 3208 * @words: (in) number of words to read; (out) number of words actually read 3209 * @data: words read from the Shadow RAM 3210 * 3211 * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM 3212 * ownership. 3213 * 3214 * Return: the operation exit code. 3215 */ 3216 int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, 3217 u16 *data) 3218 { 3219 u32 bytes = *words * 2; 3220 int err; 3221 3222 err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); 3223 if (err) 3224 return err; 3225 3226 *words = bytes / 2; 3227 3228 for (int i = 0; i < *words; i++) 3229 data[i] = le16_to_cpu(((__le16 *)data)[i]); 3230 3231 return 0; 3232 } 3233 3234 /** 3235 * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command. 3236 * @hw: pointer to hardware structure 3237 * @offset: offset of word in the EEPROM to read 3238 * @data: word read from the EEPROM 3239 * 3240 * Reads a 16 bit word from the EEPROM using the ACI. 3241 * If the EEPROM params are not initialized, the function 3242 * initialize them before proceeding with reading. 3243 * The function acquires and then releases the NVM ownership. 3244 * 3245 * Return: the exit code of the operation. 3246 */ 3247 int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data) 3248 { 3249 int err; 3250 3251 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { 3252 err = hw->eeprom.ops.init_params(hw); 3253 if (err) 3254 return err; 3255 } 3256 3257 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 3258 if (err) 3259 return err; 3260 3261 err = ixgbe_read_sr_word_aci(hw, offset, data); 3262 ixgbe_release_nvm(hw); 3263 3264 return err; 3265 } 3266 3267 /** 3268 * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI 3269 * @hw: pointer to hardware structure 3270 * @offset: offset of words in the EEPROM to read 3271 * @words: number of words to read 3272 * @data: words to read from the EEPROM 3273 * 3274 * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params 3275 * prior to the read. Acquire/release the NVM ownership. 3276 * 3277 * Return: the operation exit code. 3278 */ 3279 int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset, 3280 u16 words, u16 *data) 3281 { 3282 int err; 3283 3284 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { 3285 err = hw->eeprom.ops.init_params(hw); 3286 if (err) 3287 return err; 3288 } 3289 3290 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 3291 if (err) 3292 return err; 3293 3294 err = ixgbe_read_sr_buf_aci(hw, offset, &words, data); 3295 ixgbe_release_nvm(hw); 3296 3297 return err; 3298 } 3299 3300 /** 3301 * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum 3302 * @hw: pointer to hardware structure 3303 * @checksum_val: calculated checksum 3304 * 3305 * Performs checksum calculation and validates the EEPROM checksum. If the 3306 * caller does not need checksum_val, the value can be NULL. 3307 * If the EEPROM params are not initialized, the function 3308 * initialize them before proceeding. 3309 * The function acquires and then releases the NVM ownership. 3310 * 3311 * Return: the exit code of the operation. 3312 */ 3313 int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val) 3314 { 3315 int err; 3316 3317 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { 3318 err = hw->eeprom.ops.init_params(hw); 3319 if (err) 3320 return err; 3321 } 3322 3323 err = ixgbe_nvm_validate_checksum(hw); 3324 if (err) 3325 return err; 3326 3327 if (checksum_val) { 3328 u16 tmp_checksum; 3329 3330 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); 3331 if (err) 3332 return err; 3333 3334 err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD, 3335 &tmp_checksum); 3336 ixgbe_release_nvm(hw); 3337 3338 if (!err) 3339 *checksum_val = tmp_checksum; 3340 } 3341 3342 return err; 3343 } 3344 3345 /** 3346 * ixgbe_reset_hw_e610 - Perform hardware reset 3347 * @hw: pointer to hardware structure 3348 * 3349 * Resets the hardware by resetting the transmit and receive units, masks 3350 * and clears all interrupts, and performs a reset. 3351 * 3352 * Return: the exit code of the operation. 3353 */ 3354 int ixgbe_reset_hw_e610(struct ixgbe_hw *hw) 3355 { 3356 u32 swfw_mask = hw->phy.phy_semaphore_mask; 3357 u32 ctrl, i; 3358 int err; 3359 3360 /* Call adapter stop to disable tx/rx and clear interrupts */ 3361 err = hw->mac.ops.stop_adapter(hw); 3362 if (err) 3363 goto reset_hw_out; 3364 3365 /* Flush pending Tx transactions. */ 3366 ixgbe_clear_tx_pending(hw); 3367 3368 hw->phy.ops.init(hw); 3369 mac_reset_top: 3370 err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); 3371 if (err) 3372 return -EBUSY; 3373 ctrl = IXGBE_CTRL_RST; 3374 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 3375 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 3376 IXGBE_WRITE_FLUSH(hw); 3377 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 3378 3379 /* Poll for reset bit to self-clear indicating reset is complete */ 3380 for (i = 0; i < 10; i++) { 3381 udelay(1); 3382 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 3383 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 3384 break; 3385 } 3386 3387 if (ctrl & IXGBE_CTRL_RST_MASK) { 3388 struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter, 3389 hw); 3390 3391 err = -EIO; 3392 netdev_err(adapter->netdev, "Reset polling failed to complete."); 3393 } 3394 3395 /* Double resets are required for recovery from certain error 3396 * conditions. Between resets, it is necessary to stall to allow time 3397 * for any pending HW events to complete. 3398 */ 3399 msleep(100); 3400 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 3401 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 3402 goto mac_reset_top; 3403 } 3404 3405 /* Set the Rx packet buffer size. */ 3406 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17)); 3407 3408 /* Store the permanent mac address */ 3409 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 3410 3411 /* Maximum number of Receive Address Registers. */ 3412 #define IXGBE_MAX_NUM_RAR 128 3413 3414 /* Store MAC address from RAR0, clear receive address registers, and 3415 * clear the multicast table. Also reset num_rar_entries to the 3416 * maximum number of Receive Address Registers, since we modify this 3417 * value when programming the SAN MAC address. 3418 */ 3419 hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR; 3420 hw->mac.ops.init_rx_addrs(hw); 3421 3422 /* Initialize bus function number */ 3423 hw->mac.ops.set_lan_id(hw); 3424 3425 reset_hw_out: 3426 return err; 3427 } 3428 3429 /** 3430 * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA 3431 * @hw: pointer to hardware structure 3432 * @module_tlv: pointer to module TLV to return 3433 * @module_tlv_len: pointer to module TLV length to return 3434 * @module_type: module type requested 3435 * 3436 * Find the requested sub module TLV type from the Preserved Field 3437 * Area (PFA) and returns the TLV pointer and length. The caller can 3438 * use these to read the variable length TLV value. 3439 * 3440 * Return: the exit code of the operation. 3441 */ 3442 static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv, 3443 u16 *module_tlv_len, u16 module_type) 3444 { 3445 u16 pfa_len, pfa_ptr, pfa_end_ptr; 3446 u16 next_tlv; 3447 int err; 3448 3449 err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr); 3450 if (err) 3451 return err; 3452 3453 err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len); 3454 if (err) 3455 return err; 3456 3457 /* Starting with first TLV after PFA length, iterate through the list 3458 * of TLVs to find the requested one. 3459 */ 3460 next_tlv = pfa_ptr + 1; 3461 pfa_end_ptr = pfa_ptr + pfa_len; 3462 while (next_tlv < pfa_end_ptr) { 3463 u16 tlv_sub_module_type, tlv_len; 3464 3465 /* Read TLV type */ 3466 err = ixgbe_read_ee_aci_e610(hw, next_tlv, 3467 &tlv_sub_module_type); 3468 if (err) 3469 break; 3470 3471 /* Read TLV length */ 3472 err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len); 3473 if (err) 3474 break; 3475 3476 if (tlv_sub_module_type == module_type) { 3477 if (tlv_len) { 3478 *module_tlv = next_tlv; 3479 *module_tlv_len = tlv_len; 3480 return 0; 3481 } 3482 return -EIO; 3483 } 3484 /* Check next TLV, i.e. current TLV pointer + length + 2 words 3485 * (for current TLV's type and length). 3486 */ 3487 next_tlv = next_tlv + tlv_len + 2; 3488 } 3489 /* Module does not exist */ 3490 return -ENODATA; 3491 } 3492 3493 /** 3494 * ixgbe_read_pba_string_e610 - Read PBA string from NVM 3495 * @hw: pointer to hardware structure 3496 * @pba_num: stores the part number string from the NVM 3497 * @pba_num_size: part number string buffer length 3498 * 3499 * Read the part number string from the NVM. 3500 * 3501 * Return: the exit code of the operation. 3502 */ 3503 static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num, 3504 u32 pba_num_size) 3505 { 3506 u16 pba_tlv, pba_tlv_len; 3507 u16 pba_word, pba_size; 3508 int err; 3509 3510 *pba_num = '\0'; 3511 3512 err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, 3513 IXGBE_E610_SR_PBA_BLOCK_PTR); 3514 if (err) 3515 return err; 3516 3517 /* pba_size is the next word */ 3518 err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size); 3519 if (err) 3520 return err; 3521 3522 if (pba_tlv_len < pba_size) 3523 return -EINVAL; 3524 3525 /* Subtract one to get PBA word count (PBA Size word is included in 3526 * total size). 3527 */ 3528 pba_size--; 3529 3530 if (pba_num_size < (((u32)pba_size * 2) + 1)) 3531 return -EINVAL; 3532 3533 for (u16 i = 0; i < pba_size; i++) { 3534 err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i, 3535 &pba_word); 3536 if (err) 3537 return err; 3538 3539 pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK, 3540 pba_word); 3541 pba_num[(i * 2) + 1] = pba_word & 0xFF; 3542 } 3543 3544 pba_num[(pba_size * 2)] = '\0'; 3545 3546 return err; 3547 } 3548 3549 static const struct ixgbe_mac_operations mac_ops_e610 = { 3550 .init_hw = ixgbe_init_hw_generic, 3551 .start_hw = ixgbe_start_hw_e610, 3552 .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic, 3553 .enable_rx_dma = ixgbe_enable_rx_dma_generic, 3554 .get_mac_addr = ixgbe_get_mac_addr_generic, 3555 .get_device_caps = ixgbe_get_device_caps_generic, 3556 .stop_adapter = ixgbe_stop_adapter_generic, 3557 .set_lan_id = ixgbe_set_lan_id_multi_port_pcie, 3558 .set_rxpba = ixgbe_set_rxpba_generic, 3559 .check_link = ixgbe_check_link_e610, 3560 .blink_led_start = ixgbe_blink_led_start_X540, 3561 .blink_led_stop = ixgbe_blink_led_stop_X540, 3562 .set_rar = ixgbe_set_rar_generic, 3563 .clear_rar = ixgbe_clear_rar_generic, 3564 .set_vmdq = ixgbe_set_vmdq_generic, 3565 .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic, 3566 .clear_vmdq = ixgbe_clear_vmdq_generic, 3567 .init_rx_addrs = ixgbe_init_rx_addrs_generic, 3568 .update_mc_addr_list = ixgbe_update_mc_addr_list_generic, 3569 .enable_mc = ixgbe_enable_mc_generic, 3570 .disable_mc = ixgbe_disable_mc_generic, 3571 .clear_vfta = ixgbe_clear_vfta_generic, 3572 .set_vfta = ixgbe_set_vfta_generic, 3573 .fc_enable = ixgbe_fc_enable_generic, 3574 .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550, 3575 .init_uta_tables = ixgbe_init_uta_tables_generic, 3576 .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing, 3577 .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing, 3578 .set_source_address_pruning = 3579 ixgbe_set_source_address_pruning_x550, 3580 .set_ethertype_anti_spoofing = 3581 ixgbe_set_ethertype_anti_spoofing_x550, 3582 .disable_rx_buff = ixgbe_disable_rx_buff_generic, 3583 .enable_rx_buff = ixgbe_enable_rx_buff_generic, 3584 .enable_rx = ixgbe_enable_rx_generic, 3585 .disable_rx = ixgbe_disable_rx_e610, 3586 .led_on = ixgbe_led_on_generic, 3587 .led_off = ixgbe_led_off_generic, 3588 .init_led_link_act = ixgbe_init_led_link_act_generic, 3589 .reset_hw = ixgbe_reset_hw_e610, 3590 .get_media_type = ixgbe_get_media_type_e610, 3591 .setup_link = ixgbe_setup_link_e610, 3592 .get_link_capabilities = ixgbe_get_link_capabilities_e610, 3593 .get_bus_info = ixgbe_get_bus_info_generic, 3594 .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540, 3595 .release_swfw_sync = ixgbe_release_swfw_sync_X540, 3596 .init_swfw_sync = ixgbe_init_swfw_sync_X540, 3597 .prot_autoc_read = prot_autoc_read_generic, 3598 .prot_autoc_write = prot_autoc_write_generic, 3599 .setup_fc = ixgbe_setup_fc_e610, 3600 .fc_autoneg = ixgbe_fc_autoneg_e610, 3601 }; 3602 3603 static const struct ixgbe_phy_operations phy_ops_e610 = { 3604 .init = ixgbe_init_phy_ops_e610, 3605 .identify = ixgbe_identify_phy_e610, 3606 .identify_sfp = ixgbe_identify_module_e610, 3607 .setup_link_speed = ixgbe_setup_phy_link_speed_generic, 3608 .setup_link = ixgbe_setup_phy_link_e610, 3609 .enter_lplu = ixgbe_enter_lplu_e610, 3610 }; 3611 3612 static const struct ixgbe_eeprom_operations eeprom_ops_e610 = { 3613 .read = ixgbe_read_ee_aci_e610, 3614 .read_buffer = ixgbe_read_ee_aci_buffer_e610, 3615 .validate_checksum = ixgbe_validate_eeprom_checksum_e610, 3616 .read_pba_string = ixgbe_read_pba_string_e610, 3617 .init_params = ixgbe_init_eeprom_params_e610, 3618 }; 3619 3620 const struct ixgbe_info ixgbe_e610_info = { 3621 .mac = ixgbe_mac_e610, 3622 .get_invariants = ixgbe_get_invariants_X540, 3623 .mac_ops = &mac_ops_e610, 3624 .eeprom_ops = &eeprom_ops_e610, 3625 .phy_ops = &phy_ops_e610, 3626 .mbx_ops = &mbx_ops_generic, 3627 .mvals = ixgbe_mvals_x550em_a, 3628 }; 3629