1 /****************************************************************************** 2 3 Copyright (c) 2001-2012, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/ 34 35 #include "ixgbe_type.h" 36 #include "ixgbe_82598.h" 37 #include "ixgbe_api.h" 38 #include "ixgbe_common.h" 39 #include "ixgbe_phy.h" 40 41 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 42 ixgbe_link_speed *speed, 43 bool *autoneg); 44 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); 45 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 46 bool autoneg_wait_to_complete); 47 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 48 ixgbe_link_speed *speed, bool *link_up, 49 bool link_up_wait_to_complete); 50 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 51 ixgbe_link_speed speed, 52 bool autoneg, 53 bool autoneg_wait_to_complete); 54 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 55 ixgbe_link_speed speed, 56 bool autoneg, 57 bool autoneg_wait_to_complete); 58 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 59 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 60 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); 61 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, 62 u32 headroom, int strategy); 63 64 /** 65 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 66 * @hw: pointer to the HW structure 67 * 68 * The defaults for 82598 should be in the range of 50us to 50ms, 69 * however the hardware default for these parts is 500us to 1ms which is less 70 * than the 10ms recommended by the pci-e spec. To address this we need to 71 * increase the value to either 10ms to 250ms for capability version 1 config, 72 * or 16ms to 55ms for version 2. 73 **/ 74 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 75 { 76 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 77 u16 pcie_devctl2; 78 79 /* only take action if timeout value is defaulted to 0 */ 80 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 81 goto out; 82 83 /* 84 * if capababilities version is type 1 we can write the 85 * timeout of 10ms to 250ms through the GCR register 86 */ 87 if (!(gcr & IXGBE_GCR_CAP_VER2)) { 88 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 89 goto out; 90 } 91 92 /* 93 * for version 2 capabilities we need to write the config space 94 * directly in order to set the completion timeout value for 95 * 16ms to 55ms 96 */ 97 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 98 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 99 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 100 out: 101 /* disable completion timeout resend */ 102 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 103 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 104 } 105 106 /** 107 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type 108 * @hw: pointer to hardware structure 109 * 110 * Initialize the function pointers and assign the MAC type for 82598. 111 * Does not touch the hardware. 112 **/ 113 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) 114 { 115 struct ixgbe_mac_info *mac = &hw->mac; 116 struct ixgbe_phy_info *phy = &hw->phy; 117 s32 ret_val; 118 119 DEBUGFUNC("ixgbe_init_ops_82598"); 120 121 ret_val = ixgbe_init_phy_ops_generic(hw); 122 ret_val = ixgbe_init_ops_generic(hw); 123 124 /* PHY */ 125 phy->ops.init = &ixgbe_init_phy_ops_82598; 126 127 /* MAC */ 128 mac->ops.start_hw = &ixgbe_start_hw_82598; 129 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598; 130 mac->ops.reset_hw = &ixgbe_reset_hw_82598; 131 mac->ops.get_media_type = &ixgbe_get_media_type_82598; 132 mac->ops.get_supported_physical_layer = 133 &ixgbe_get_supported_physical_layer_82598; 134 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598; 135 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598; 136 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598; 137 138 /* RAR, Multicast, VLAN */ 139 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; 140 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598; 141 mac->ops.set_vfta = &ixgbe_set_vfta_82598; 142 mac->ops.set_vlvf = NULL; 143 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598; 144 145 /* Flow Control */ 146 mac->ops.fc_enable = &ixgbe_fc_enable_82598; 147 148 mac->mcft_size = 128; 149 mac->vft_size = 128; 150 mac->num_rar_entries = 16; 151 mac->rx_pb_size = 512; 152 mac->max_tx_queues = 32; 153 mac->max_rx_queues = 64; 154 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 155 156 /* SFP+ Module */ 157 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598; 158 159 /* Link */ 160 mac->ops.check_link = &ixgbe_check_mac_link_82598; 161 mac->ops.setup_link = &ixgbe_setup_mac_link_82598; 162 mac->ops.flap_tx_laser = NULL; 163 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598; 164 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598; 165 166 /* Manageability interface */ 167 mac->ops.set_fw_drv_ver = NULL; 168 169 return ret_val; 170 } 171 172 /** 173 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init 174 * @hw: pointer to hardware structure 175 * 176 * Initialize any function pointers that were not able to be 177 * set during init_shared_code because the PHY/SFP type was 178 * not known. Perform the SFP init if necessary. 179 * 180 **/ 181 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) 182 { 183 struct ixgbe_mac_info *mac = &hw->mac; 184 struct ixgbe_phy_info *phy = &hw->phy; 185 s32 ret_val = IXGBE_SUCCESS; 186 u16 list_offset, data_offset; 187 188 DEBUGFUNC("ixgbe_init_phy_ops_82598"); 189 190 /* Identify the PHY */ 191 phy->ops.identify(hw); 192 193 /* Overwrite the link function pointers if copper PHY */ 194 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 195 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 196 mac->ops.get_link_capabilities = 197 &ixgbe_get_copper_link_capabilities_generic; 198 } 199 200 switch (hw->phy.type) { 201 case ixgbe_phy_tn: 202 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 203 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 204 phy->ops.get_firmware_version = 205 &ixgbe_get_phy_firmware_version_tnx; 206 break; 207 case ixgbe_phy_nl: 208 phy->ops.reset = &ixgbe_reset_phy_nl; 209 210 /* Call SFP+ identify routine to get the SFP+ module type */ 211 ret_val = phy->ops.identify_sfp(hw); 212 if (ret_val != IXGBE_SUCCESS) 213 goto out; 214 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 215 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 216 goto out; 217 } 218 219 /* Check to see if SFP+ module is supported */ 220 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 221 &list_offset, 222 &data_offset); 223 if (ret_val != IXGBE_SUCCESS) { 224 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 225 goto out; 226 } 227 break; 228 default: 229 break; 230 } 231 232 out: 233 return ret_val; 234 } 235 236 /** 237 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 238 * @hw: pointer to hardware structure 239 * 240 * Starts the hardware using the generic start_hw function. 241 * Disables relaxed ordering Then set pcie completion timeout 242 * 243 **/ 244 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 245 { 246 u32 regval; 247 u32 i; 248 s32 ret_val = IXGBE_SUCCESS; 249 250 DEBUGFUNC("ixgbe_start_hw_82598"); 251 252 ret_val = ixgbe_start_hw_generic(hw); 253 254 /* Disable relaxed ordering */ 255 for (i = 0; ((i < hw->mac.max_tx_queues) && 256 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 257 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 258 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 259 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 260 } 261 262 for (i = 0; ((i < hw->mac.max_rx_queues) && 263 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 264 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 265 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 266 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 267 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 268 } 269 270 /* set the completion timeout for interface */ 271 if (ret_val == IXGBE_SUCCESS) 272 ixgbe_set_pcie_completion_timeout(hw); 273 274 return ret_val; 275 } 276 277 /** 278 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 279 * @hw: pointer to hardware structure 280 * @speed: pointer to link speed 281 * @autoneg: boolean auto-negotiation value 282 * 283 * Determines the link capabilities by reading the AUTOC register. 284 **/ 285 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 286 ixgbe_link_speed *speed, 287 bool *autoneg) 288 { 289 s32 status = IXGBE_SUCCESS; 290 u32 autoc = 0; 291 292 DEBUGFUNC("ixgbe_get_link_capabilities_82598"); 293 294 /* 295 * Determine link capabilities based on the stored value of AUTOC, 296 * which represents EEPROM defaults. If AUTOC value has not been 297 * stored, use the current register value. 298 */ 299 if (hw->mac.orig_link_settings_stored) 300 autoc = hw->mac.orig_autoc; 301 else 302 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 303 304 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 305 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 306 *speed = IXGBE_LINK_SPEED_1GB_FULL; 307 *autoneg = FALSE; 308 break; 309 310 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 311 *speed = IXGBE_LINK_SPEED_10GB_FULL; 312 *autoneg = FALSE; 313 break; 314 315 case IXGBE_AUTOC_LMS_1G_AN: 316 *speed = IXGBE_LINK_SPEED_1GB_FULL; 317 *autoneg = TRUE; 318 break; 319 320 case IXGBE_AUTOC_LMS_KX4_AN: 321 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 322 *speed = IXGBE_LINK_SPEED_UNKNOWN; 323 if (autoc & IXGBE_AUTOC_KX4_SUPP) 324 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 325 if (autoc & IXGBE_AUTOC_KX_SUPP) 326 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 327 *autoneg = TRUE; 328 break; 329 330 default: 331 status = IXGBE_ERR_LINK_SETUP; 332 break; 333 } 334 335 return status; 336 } 337 338 /** 339 * ixgbe_get_media_type_82598 - Determines media type 340 * @hw: pointer to hardware structure 341 * 342 * Returns the media type (fiber, copper, backplane) 343 **/ 344 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 345 { 346 enum ixgbe_media_type media_type; 347 348 DEBUGFUNC("ixgbe_get_media_type_82598"); 349 350 /* Detect if there is a copper PHY attached. */ 351 switch (hw->phy.type) { 352 case ixgbe_phy_cu_unknown: 353 case ixgbe_phy_tn: 354 media_type = ixgbe_media_type_copper; 355 goto out; 356 default: 357 break; 358 } 359 360 /* Media type for I82598 is based on device ID */ 361 switch (hw->device_id) { 362 case IXGBE_DEV_ID_82598: 363 case IXGBE_DEV_ID_82598_BX: 364 /* Default device ID is mezzanine card KX/KX4 */ 365 media_type = ixgbe_media_type_backplane; 366 break; 367 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 368 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 369 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 370 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 371 case IXGBE_DEV_ID_82598EB_XF_LR: 372 case IXGBE_DEV_ID_82598EB_SFP_LOM: 373 media_type = ixgbe_media_type_fiber; 374 break; 375 case IXGBE_DEV_ID_82598EB_CX4: 376 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 377 media_type = ixgbe_media_type_cx4; 378 break; 379 case IXGBE_DEV_ID_82598AT: 380 case IXGBE_DEV_ID_82598AT2: 381 media_type = ixgbe_media_type_copper; 382 break; 383 default: 384 media_type = ixgbe_media_type_unknown; 385 break; 386 } 387 out: 388 return media_type; 389 } 390 391 /** 392 * ixgbe_fc_enable_82598 - Enable flow control 393 * @hw: pointer to hardware structure 394 * 395 * Enable flow control according to the current settings. 396 **/ 397 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 398 { 399 s32 ret_val = IXGBE_SUCCESS; 400 u32 fctrl_reg; 401 u32 rmcs_reg; 402 u32 reg; 403 u32 fcrtl, fcrth; 404 u32 link_speed = 0; 405 int i; 406 bool link_up; 407 408 DEBUGFUNC("ixgbe_fc_enable_82598"); 409 410 /* Validate the water mark configuration */ 411 if (!hw->fc.pause_time) { 412 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 413 goto out; 414 } 415 416 /* Low water mark of zero causes XOFF floods */ 417 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 418 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 419 hw->fc.high_water[i]) { 420 if (!hw->fc.low_water[i] || 421 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 422 DEBUGOUT("Invalid water mark configuration\n"); 423 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 424 goto out; 425 } 426 } 427 } 428 429 /* 430 * On 82598 having Rx FC on causes resets while doing 1G 431 * so if it's on turn it off once we know link_speed. For 432 * more details see 82598 Specification update. 433 */ 434 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 435 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 436 switch (hw->fc.requested_mode) { 437 case ixgbe_fc_full: 438 hw->fc.requested_mode = ixgbe_fc_tx_pause; 439 break; 440 case ixgbe_fc_rx_pause: 441 hw->fc.requested_mode = ixgbe_fc_none; 442 break; 443 default: 444 /* no change */ 445 break; 446 } 447 } 448 449 /* Negotiate the fc mode to use */ 450 ixgbe_fc_autoneg(hw); 451 452 /* Disable any previous flow control settings */ 453 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 454 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 455 456 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 457 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 458 459 /* 460 * The possible values of fc.current_mode are: 461 * 0: Flow control is completely disabled 462 * 1: Rx flow control is enabled (we can receive pause frames, 463 * but not send pause frames). 464 * 2: Tx flow control is enabled (we can send pause frames but 465 * we do not support receiving pause frames). 466 * 3: Both Rx and Tx flow control (symmetric) are enabled. 467 * other: Invalid. 468 */ 469 switch (hw->fc.current_mode) { 470 case ixgbe_fc_none: 471 /* 472 * Flow control is disabled by software override or autoneg. 473 * The code below will actually disable it in the HW. 474 */ 475 break; 476 case ixgbe_fc_rx_pause: 477 /* 478 * Rx Flow control is enabled and Tx Flow control is 479 * disabled by software override. Since there really 480 * isn't a way to advertise that we are capable of RX 481 * Pause ONLY, we will advertise that we support both 482 * symmetric and asymmetric Rx PAUSE. Later, we will 483 * disable the adapter's ability to send PAUSE frames. 484 */ 485 fctrl_reg |= IXGBE_FCTRL_RFCE; 486 break; 487 case ixgbe_fc_tx_pause: 488 /* 489 * Tx Flow control is enabled, and Rx Flow control is 490 * disabled by software override. 491 */ 492 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 493 break; 494 case ixgbe_fc_full: 495 /* Flow control (both Rx and Tx) is enabled by SW override. */ 496 fctrl_reg |= IXGBE_FCTRL_RFCE; 497 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 498 break; 499 default: 500 DEBUGOUT("Flow control param set incorrectly\n"); 501 ret_val = IXGBE_ERR_CONFIG; 502 goto out; 503 } 504 505 /* Set 802.3x based flow control settings. */ 506 fctrl_reg |= IXGBE_FCTRL_DPF; 507 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 508 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 509 510 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 511 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 512 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 513 hw->fc.high_water[i]) { 514 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 515 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 516 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 517 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 518 } else { 519 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); 520 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); 521 } 522 523 } 524 525 /* Configure pause time (2 TCs per register) */ 526 reg = hw->fc.pause_time * 0x00010001; 527 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 528 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 529 530 /* Configure flow control refresh threshold value */ 531 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 532 533 out: 534 return ret_val; 535 } 536 537 /** 538 * ixgbe_start_mac_link_82598 - Configures MAC link settings 539 * @hw: pointer to hardware structure 540 * 541 * Configures link settings based on values in the ixgbe_hw struct. 542 * Restarts the link. Performs autonegotiation if needed. 543 **/ 544 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 545 bool autoneg_wait_to_complete) 546 { 547 u32 autoc_reg; 548 u32 links_reg; 549 u32 i; 550 s32 status = IXGBE_SUCCESS; 551 552 DEBUGFUNC("ixgbe_start_mac_link_82598"); 553 554 /* Restart link */ 555 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 556 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 557 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 558 559 /* Only poll for autoneg to complete if specified to do so */ 560 if (autoneg_wait_to_complete) { 561 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 562 IXGBE_AUTOC_LMS_KX4_AN || 563 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 564 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 565 links_reg = 0; /* Just in case Autoneg time = 0 */ 566 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 567 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 568 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 569 break; 570 msec_delay(100); 571 } 572 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 573 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 574 DEBUGOUT("Autonegotiation did not complete.\n"); 575 } 576 } 577 } 578 579 /* Add delay to filter out noises during initial link setup */ 580 msec_delay(50); 581 582 return status; 583 } 584 585 /** 586 * ixgbe_validate_link_ready - Function looks for phy link 587 * @hw: pointer to hardware structure 588 * 589 * Function indicates success when phy link is available. If phy is not ready 590 * within 5 seconds of MAC indicating link, the function returns error. 591 **/ 592 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) 593 { 594 u32 timeout; 595 u16 an_reg; 596 597 if (hw->device_id != IXGBE_DEV_ID_82598AT2) 598 return IXGBE_SUCCESS; 599 600 for (timeout = 0; 601 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { 602 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, 603 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); 604 605 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && 606 (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) 607 break; 608 609 msec_delay(100); 610 } 611 612 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { 613 DEBUGOUT("Link was indicated but link is down\n"); 614 return IXGBE_ERR_LINK_SETUP; 615 } 616 617 return IXGBE_SUCCESS; 618 } 619 620 /** 621 * ixgbe_check_mac_link_82598 - Get link/speed status 622 * @hw: pointer to hardware structure 623 * @speed: pointer to link speed 624 * @link_up: TRUE is link is up, FALSE otherwise 625 * @link_up_wait_to_complete: bool used to wait for link up or not 626 * 627 * Reads the links register to determine if link is up and the current speed 628 **/ 629 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 630 ixgbe_link_speed *speed, bool *link_up, 631 bool link_up_wait_to_complete) 632 { 633 u32 links_reg; 634 u32 i; 635 u16 link_reg, adapt_comp_reg; 636 637 DEBUGFUNC("ixgbe_check_mac_link_82598"); 638 639 /* 640 * SERDES PHY requires us to read link status from undocumented 641 * register 0xC79F. Bit 0 set indicates link is up/ready; clear 642 * indicates link down. OxC00C is read to check that the XAUI lanes 643 * are active. Bit 0 clear indicates active; set indicates inactive. 644 */ 645 if (hw->phy.type == ixgbe_phy_nl) { 646 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 647 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 648 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, 649 &adapt_comp_reg); 650 if (link_up_wait_to_complete) { 651 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 652 if ((link_reg & 1) && 653 ((adapt_comp_reg & 1) == 0)) { 654 *link_up = TRUE; 655 break; 656 } else { 657 *link_up = FALSE; 658 } 659 msec_delay(100); 660 hw->phy.ops.read_reg(hw, 0xC79F, 661 IXGBE_TWINAX_DEV, 662 &link_reg); 663 hw->phy.ops.read_reg(hw, 0xC00C, 664 IXGBE_TWINAX_DEV, 665 &adapt_comp_reg); 666 } 667 } else { 668 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 669 *link_up = TRUE; 670 else 671 *link_up = FALSE; 672 } 673 674 if (*link_up == FALSE) 675 goto out; 676 } 677 678 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 679 if (link_up_wait_to_complete) { 680 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 681 if (links_reg & IXGBE_LINKS_UP) { 682 *link_up = TRUE; 683 break; 684 } else { 685 *link_up = FALSE; 686 } 687 msec_delay(100); 688 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 689 } 690 } else { 691 if (links_reg & IXGBE_LINKS_UP) 692 *link_up = TRUE; 693 else 694 *link_up = FALSE; 695 } 696 697 if (links_reg & IXGBE_LINKS_SPEED) 698 *speed = IXGBE_LINK_SPEED_10GB_FULL; 699 else 700 *speed = IXGBE_LINK_SPEED_1GB_FULL; 701 702 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) && 703 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) 704 *link_up = FALSE; 705 706 out: 707 return IXGBE_SUCCESS; 708 } 709 710 /** 711 * ixgbe_setup_mac_link_82598 - Set MAC link speed 712 * @hw: pointer to hardware structure 713 * @speed: new link speed 714 * @autoneg: TRUE if autonegotiation enabled 715 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 716 * 717 * Set the link speed in the AUTOC register and restarts link. 718 **/ 719 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 720 ixgbe_link_speed speed, bool autoneg, 721 bool autoneg_wait_to_complete) 722 { 723 s32 status = IXGBE_SUCCESS; 724 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 725 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 726 u32 autoc = curr_autoc; 727 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 728 729 DEBUGFUNC("ixgbe_setup_mac_link_82598"); 730 731 /* Check to see if speed passed in is supported. */ 732 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 733 speed &= link_capabilities; 734 735 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 736 status = IXGBE_ERR_LINK_SETUP; 737 738 /* Set KX4/KX support according to speed requested */ 739 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 740 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 741 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 742 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 743 autoc |= IXGBE_AUTOC_KX4_SUPP; 744 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 745 autoc |= IXGBE_AUTOC_KX_SUPP; 746 if (autoc != curr_autoc) 747 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 748 } 749 750 if (status == IXGBE_SUCCESS) { 751 /* 752 * Setup and restart the link based on the new values in 753 * ixgbe_hw This will write the AUTOC register based on the new 754 * stored values 755 */ 756 status = ixgbe_start_mac_link_82598(hw, 757 autoneg_wait_to_complete); 758 } 759 760 return status; 761 } 762 763 764 /** 765 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field 766 * @hw: pointer to hardware structure 767 * @speed: new link speed 768 * @autoneg: TRUE if autonegotiation enabled 769 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 770 * 771 * Sets the link speed in the AUTOC register in the MAC and restarts link. 772 **/ 773 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 774 ixgbe_link_speed speed, 775 bool autoneg, 776 bool autoneg_wait_to_complete) 777 { 778 s32 status; 779 780 DEBUGFUNC("ixgbe_setup_copper_link_82598"); 781 782 /* Setup the PHY according to input speed */ 783 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 784 autoneg_wait_to_complete); 785 /* Set up MAC */ 786 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 787 788 return status; 789 } 790 791 /** 792 * ixgbe_reset_hw_82598 - Performs hardware reset 793 * @hw: pointer to hardware structure 794 * 795 * Resets the hardware by resetting the transmit and receive units, masks and 796 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 797 * reset. 798 **/ 799 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 800 { 801 s32 status = IXGBE_SUCCESS; 802 s32 phy_status = IXGBE_SUCCESS; 803 u32 ctrl; 804 u32 gheccr; 805 u32 i; 806 u32 autoc; 807 u8 analog_val; 808 809 DEBUGFUNC("ixgbe_reset_hw_82598"); 810 811 /* Call adapter stop to disable tx/rx and clear interrupts */ 812 status = hw->mac.ops.stop_adapter(hw); 813 if (status != IXGBE_SUCCESS) 814 goto reset_hw_out; 815 816 /* 817 * Power up the Atlas Tx lanes if they are currently powered down. 818 * Atlas Tx lanes are powered down for MAC loopback tests, but 819 * they are not automatically restored on reset. 820 */ 821 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 822 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 823 /* Enable Tx Atlas so packets can be transmitted again */ 824 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 825 &analog_val); 826 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 827 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 828 analog_val); 829 830 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 831 &analog_val); 832 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 833 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 834 analog_val); 835 836 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 837 &analog_val); 838 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 839 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 840 analog_val); 841 842 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 843 &analog_val); 844 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 845 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 846 analog_val); 847 } 848 849 /* Reset PHY */ 850 if (hw->phy.reset_disable == FALSE) { 851 /* PHY ops must be identified and initialized prior to reset */ 852 853 /* Init PHY and function pointers, perform SFP setup */ 854 phy_status = hw->phy.ops.init(hw); 855 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 856 goto reset_hw_out; 857 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 858 goto mac_reset_top; 859 860 hw->phy.ops.reset(hw); 861 } 862 863 mac_reset_top: 864 /* 865 * Issue global reset to the MAC. This needs to be a SW reset. 866 * If link reset is used, it might reset the MAC when mng is using it 867 */ 868 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; 869 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 870 IXGBE_WRITE_FLUSH(hw); 871 872 /* Poll for reset bit to self-clear indicating reset is complete */ 873 for (i = 0; i < 10; i++) { 874 usec_delay(1); 875 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 876 if (!(ctrl & IXGBE_CTRL_RST)) 877 break; 878 } 879 if (ctrl & IXGBE_CTRL_RST) { 880 status = IXGBE_ERR_RESET_FAILED; 881 DEBUGOUT("Reset polling failed to complete.\n"); 882 } 883 884 msec_delay(50); 885 886 /* 887 * Double resets are required for recovery from certain error 888 * conditions. Between resets, it is necessary to stall to allow time 889 * for any pending HW events to complete. 890 */ 891 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 892 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 893 goto mac_reset_top; 894 } 895 896 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 897 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); 898 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 899 900 /* 901 * Store the original AUTOC value if it has not been 902 * stored off yet. Otherwise restore the stored original 903 * AUTOC value since the reset operation sets back to deaults. 904 */ 905 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 906 if (hw->mac.orig_link_settings_stored == FALSE) { 907 hw->mac.orig_autoc = autoc; 908 hw->mac.orig_link_settings_stored = TRUE; 909 } else if (autoc != hw->mac.orig_autoc) { 910 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 911 } 912 913 /* Store the permanent mac address */ 914 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 915 916 /* 917 * Store MAC address from RAR0, clear receive address registers, and 918 * clear the multicast table 919 */ 920 hw->mac.ops.init_rx_addrs(hw); 921 922 reset_hw_out: 923 if (phy_status != IXGBE_SUCCESS) 924 status = phy_status; 925 926 return status; 927 } 928 929 /** 930 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 931 * @hw: pointer to hardware struct 932 * @rar: receive address register index to associate with a VMDq index 933 * @vmdq: VMDq set index 934 **/ 935 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 936 { 937 u32 rar_high; 938 u32 rar_entries = hw->mac.num_rar_entries; 939 940 DEBUGFUNC("ixgbe_set_vmdq_82598"); 941 942 /* Make sure we are using a valid rar index range */ 943 if (rar >= rar_entries) { 944 DEBUGOUT1("RAR index %d is out of range.\n", rar); 945 return IXGBE_ERR_INVALID_ARGUMENT; 946 } 947 948 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 949 rar_high &= ~IXGBE_RAH_VIND_MASK; 950 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 951 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 952 return IXGBE_SUCCESS; 953 } 954 955 /** 956 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 957 * @hw: pointer to hardware struct 958 * @rar: receive address register index to associate with a VMDq index 959 * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 960 **/ 961 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 962 { 963 u32 rar_high; 964 u32 rar_entries = hw->mac.num_rar_entries; 965 966 UNREFERENCED_1PARAMETER(vmdq); 967 968 /* Make sure we are using a valid rar index range */ 969 if (rar >= rar_entries) { 970 DEBUGOUT1("RAR index %d is out of range.\n", rar); 971 return IXGBE_ERR_INVALID_ARGUMENT; 972 } 973 974 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 975 if (rar_high & IXGBE_RAH_VIND_MASK) { 976 rar_high &= ~IXGBE_RAH_VIND_MASK; 977 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 978 } 979 980 return IXGBE_SUCCESS; 981 } 982 983 /** 984 * ixgbe_set_vfta_82598 - Set VLAN filter table 985 * @hw: pointer to hardware structure 986 * @vlan: VLAN id to write to VLAN filter 987 * @vind: VMDq output index that maps queue to VLAN id in VFTA 988 * @vlan_on: boolean flag to turn on/off VLAN in VFTA 989 * 990 * Turn on/off specified VLAN in the VLAN filter table. 991 **/ 992 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 993 bool vlan_on) 994 { 995 u32 regindex; 996 u32 bitindex; 997 u32 bits; 998 u32 vftabyte; 999 1000 DEBUGFUNC("ixgbe_set_vfta_82598"); 1001 1002 if (vlan > 4095) 1003 return IXGBE_ERR_PARAM; 1004 1005 /* Determine 32-bit word position in array */ 1006 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 1007 1008 /* Determine the location of the (VMD) queue index */ 1009 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 1010 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 1011 1012 /* Set the nibble for VMD queue index */ 1013 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 1014 bits &= (~(0x0F << bitindex)); 1015 bits |= (vind << bitindex); 1016 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 1017 1018 /* Determine the location of the bit for this VLAN id */ 1019 bitindex = vlan & 0x1F; /* lower five bits */ 1020 1021 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 1022 if (vlan_on) 1023 /* Turn on this VLAN id */ 1024 bits |= (1 << bitindex); 1025 else 1026 /* Turn off this VLAN id */ 1027 bits &= ~(1 << bitindex); 1028 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 1029 1030 return IXGBE_SUCCESS; 1031 } 1032 1033 /** 1034 * ixgbe_clear_vfta_82598 - Clear VLAN filter table 1035 * @hw: pointer to hardware structure 1036 * 1037 * Clears the VLAN filer table, and the VMDq index associated with the filter 1038 **/ 1039 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 1040 { 1041 u32 offset; 1042 u32 vlanbyte; 1043 1044 DEBUGFUNC("ixgbe_clear_vfta_82598"); 1045 1046 for (offset = 0; offset < hw->mac.vft_size; offset++) 1047 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 1048 1049 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 1050 for (offset = 0; offset < hw->mac.vft_size; offset++) 1051 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 1052 0); 1053 1054 return IXGBE_SUCCESS; 1055 } 1056 1057 /** 1058 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 1059 * @hw: pointer to hardware structure 1060 * @reg: analog register to read 1061 * @val: read value 1062 * 1063 * Performs read operation to Atlas analog register specified. 1064 **/ 1065 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 1066 { 1067 u32 atlas_ctl; 1068 1069 DEBUGFUNC("ixgbe_read_analog_reg8_82598"); 1070 1071 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 1072 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 1073 IXGBE_WRITE_FLUSH(hw); 1074 usec_delay(10); 1075 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 1076 *val = (u8)atlas_ctl; 1077 1078 return IXGBE_SUCCESS; 1079 } 1080 1081 /** 1082 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 1083 * @hw: pointer to hardware structure 1084 * @reg: atlas register to write 1085 * @val: value to write 1086 * 1087 * Performs write operation to Atlas analog register specified. 1088 **/ 1089 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 1090 { 1091 u32 atlas_ctl; 1092 1093 DEBUGFUNC("ixgbe_write_analog_reg8_82598"); 1094 1095 atlas_ctl = (reg << 8) | val; 1096 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 1097 IXGBE_WRITE_FLUSH(hw); 1098 usec_delay(10); 1099 1100 return IXGBE_SUCCESS; 1101 } 1102 1103 /** 1104 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. 1105 * @hw: pointer to hardware structure 1106 * @byte_offset: EEPROM byte offset to read 1107 * @eeprom_data: value read 1108 * 1109 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1110 **/ 1111 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1112 u8 *eeprom_data) 1113 { 1114 s32 status = IXGBE_SUCCESS; 1115 u16 sfp_addr = 0; 1116 u16 sfp_data = 0; 1117 u16 sfp_stat = 0; 1118 u32 i; 1119 1120 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598"); 1121 1122 if (hw->phy.type == ixgbe_phy_nl) { 1123 /* 1124 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to 1125 * 0xC30D. These registers are used to talk to the SFP+ 1126 * module's EEPROM through the SDA/SCL (I2C) interface. 1127 */ 1128 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; 1129 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1130 hw->phy.ops.write_reg(hw, 1131 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1132 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1133 sfp_addr); 1134 1135 /* Poll status */ 1136 for (i = 0; i < 100; i++) { 1137 hw->phy.ops.read_reg(hw, 1138 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1139 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1140 &sfp_stat); 1141 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1142 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1143 break; 1144 msec_delay(10); 1145 } 1146 1147 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1148 DEBUGOUT("EEPROM read did not pass.\n"); 1149 status = IXGBE_ERR_SFP_NOT_PRESENT; 1150 goto out; 1151 } 1152 1153 /* Read data */ 1154 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1155 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); 1156 1157 *eeprom_data = (u8)(sfp_data >> 8); 1158 } else { 1159 status = IXGBE_ERR_PHY; 1160 goto out; 1161 } 1162 1163 out: 1164 return status; 1165 } 1166 1167 /** 1168 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 1169 * @hw: pointer to hardware structure 1170 * 1171 * Determines physical layer capabilities of the current configuration. 1172 **/ 1173 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1174 { 1175 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1176 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1177 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1178 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1179 u16 ext_ability = 0; 1180 1181 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); 1182 1183 hw->phy.ops.identify(hw); 1184 1185 /* Copper PHY must be checked before AUTOC LMS to determine correct 1186 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1187 switch (hw->phy.type) { 1188 case ixgbe_phy_tn: 1189 case ixgbe_phy_cu_unknown: 1190 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1191 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1192 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1193 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1194 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1195 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1196 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1197 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1198 goto out; 1199 default: 1200 break; 1201 } 1202 1203 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1204 case IXGBE_AUTOC_LMS_1G_AN: 1205 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1206 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) 1207 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1208 else 1209 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1210 break; 1211 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1212 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) 1213 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1214 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) 1215 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1216 else /* XAUI */ 1217 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1218 break; 1219 case IXGBE_AUTOC_LMS_KX4_AN: 1220 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 1221 if (autoc & IXGBE_AUTOC_KX_SUPP) 1222 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1223 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1224 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1225 break; 1226 default: 1227 break; 1228 } 1229 1230 if (hw->phy.type == ixgbe_phy_nl) { 1231 hw->phy.ops.identify_sfp(hw); 1232 1233 switch (hw->phy.sfp_type) { 1234 case ixgbe_sfp_type_da_cu: 1235 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1236 break; 1237 case ixgbe_sfp_type_sr: 1238 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1239 break; 1240 case ixgbe_sfp_type_lr: 1241 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1242 break; 1243 default: 1244 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1245 break; 1246 } 1247 } 1248 1249 switch (hw->device_id) { 1250 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 1251 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1252 break; 1253 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1254 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1255 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1256 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1257 break; 1258 case IXGBE_DEV_ID_82598EB_XF_LR: 1259 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1260 break; 1261 default: 1262 break; 1263 } 1264 1265 out: 1266 return physical_layer; 1267 } 1268 1269 /** 1270 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple 1271 * port devices. 1272 * @hw: pointer to the HW structure 1273 * 1274 * Calls common function and corrects issue with some single port devices 1275 * that enable LAN1 but not LAN0. 1276 **/ 1277 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) 1278 { 1279 struct ixgbe_bus_info *bus = &hw->bus; 1280 u16 pci_gen = 0; 1281 u16 pci_ctrl2 = 0; 1282 1283 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); 1284 1285 ixgbe_set_lan_id_multi_port_pcie(hw); 1286 1287 /* check if LAN0 is disabled */ 1288 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); 1289 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { 1290 1291 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); 1292 1293 /* if LAN0 is completely disabled force function to 0 */ 1294 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && 1295 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && 1296 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { 1297 1298 bus->func = 0; 1299 } 1300 } 1301 } 1302 1303 /** 1304 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering 1305 * @hw: pointer to hardware structure 1306 * 1307 **/ 1308 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) 1309 { 1310 u32 regval; 1311 u32 i; 1312 1313 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); 1314 1315 /* Enable relaxed ordering */ 1316 for (i = 0; ((i < hw->mac.max_tx_queues) && 1317 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1318 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1319 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1320 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 1321 } 1322 1323 for (i = 0; ((i < hw->mac.max_rx_queues) && 1324 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1325 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 1326 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 1327 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 1328 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 1329 } 1330 1331 } 1332 1333 /** 1334 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer 1335 * @hw: pointer to hardware structure 1336 * @num_pb: number of packet buffers to allocate 1337 * @headroom: reserve n KB of headroom 1338 * @strategy: packet buffer allocation strategy 1339 **/ 1340 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, 1341 u32 headroom, int strategy) 1342 { 1343 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1344 u8 i = 0; 1345 UNREFERENCED_1PARAMETER(headroom); 1346 1347 if (!num_pb) 1348 return; 1349 1350 /* Setup Rx packet buffer sizes */ 1351 switch (strategy) { 1352 case PBA_STRATEGY_WEIGHTED: 1353 /* Setup the first four at 80KB */ 1354 rxpktsize = IXGBE_RXPBSIZE_80KB; 1355 for (; i < 4; i++) 1356 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1357 /* Setup the last four at 48KB...don't re-init i */ 1358 rxpktsize = IXGBE_RXPBSIZE_48KB; 1359 /* Fall Through */ 1360 case PBA_STRATEGY_EQUAL: 1361 default: 1362 /* Divide the remaining Rx packet buffer evenly among the TCs */ 1363 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1364 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1365 break; 1366 } 1367 1368 /* Setup Tx packet buffer sizes */ 1369 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1370 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1371 1372 return; 1373 } 1374